aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/Kconfig36
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_lpss.c98
-rw-r--r--drivers/acpi/acpica/acglobal.h1
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/evgpe.c9
-rw-r--r--drivers/acpi/acpica/hwgpe.c53
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utxface.c4
-rw-r--r--drivers/acpi/acpica/utxfinit.c11
-rw-r--r--drivers/acpi/apei/ghes.c20
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/device_pm.c10
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/osl.c8
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic.c354
-rw-r--r--drivers/acpi/pmic/intel_pmic.h25
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c211
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c268
-rw-r--r--drivers/acpi/processor_idle.c20
-rw-r--r--drivers/acpi/property.c551
-rw-r--r--drivers/acpi/scan.c214
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/tables.c68
-rw-r--r--drivers/acpi/utils.c5
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/amba/Kconfig14
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/Kconfig8
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/power/clock_ops.c150
-rw-r--r--drivers/base/power/domain.c147
-rw-r--r--drivers/base/power/domain_governor.c11
-rw-r--r--drivers/base/power/opp.c196
-rw-r--r--drivers/base/power/power.h56
-rw-r--r--drivers/base/power/qos.c5
-rw-r--r--drivers/base/power/runtime.c76
-rw-r--r--drivers/base/power/sysfs.c19
-rw-r--r--drivers/base/property.c431
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache-lzo.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c4
-rw-r--r--drivers/base/regmap/regcache.c63
-rw-r--r--drivers/base/regmap/regmap-ac97.c114
-rw-r--r--drivers/block/drbd/drbd_debugfs.c6
-rw-r--r--drivers/block/nvme-scsi.c4
-rw-r--r--drivers/block/virtio_blk.c74
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-cci.c552
-rw-r--r--drivers/bus/brcmstb_gisb.c167
-rw-r--r--drivers/bus/mvebu-mbus.c180
-rw-r--r--drivers/bus/omap_l3_noc.c63
-rw-r--r--drivers/bus/omap_l3_smx.c26
-rw-r--r--drivers/char/hw_random/exynos-rng.c2
-rw-r--r--drivers/char/mem.c13
-rw-r--r--drivers/char/virtio_console.c39
-rw-r--r--drivers/clk/at91/clk-usb.c35
-rw-r--r--drivers/clk/clk-divider.c18
-rw-r--r--drivers/clk/clk-s2mps11.c24
-rw-r--r--drivers/clk/mvebu/common.c32
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c4
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/rockchip/clk.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c29
-rw-r--r--drivers/clk/tegra/clk-divider.c13
-rw-r--r--drivers/clk/tegra/clk-tegra114.c7
-rw-r--r--drivers/clk/tegra/clk-tegra124.c7
-rw-r--r--drivers/clk/tegra/clk-tegra20.c8
-rw-r--r--drivers/clk/tegra/clk-tegra30.c7
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/ti/dpll.c15
-rw-r--r--drivers/clk/versatile/Makefile1
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c7
-rw-r--r--drivers/clk/versatile/clk-vexpress.c86
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c13
-rw-r--r--drivers/clocksource/time-armada-370-xp.c55
-rw-r--r--drivers/clocksource/timer-atmel-pit.c32
-rw-r--r--drivers/clocksource/timer-integrator-ap.c210
-rw-r--r--drivers/clocksource/timer-marco.c23
-rw-r--r--drivers/cpufreq/Kconfig57
-rw-r--r--drivers/cpufreq/Kconfig.arm8
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c66
-rw-r--r--drivers/cpufreq/cpufreq.c53
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c11
-rw-r--r--drivers/cpufreq/intel_pstate.c146
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c223
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c7
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c14
-rw-r--r--drivers/cpuidle/cpuidle-at91.c1
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c6
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c1
-rw-r--r--drivers/cpuidle/cpuidle-cps.c7
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c1
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c1
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c8
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c6
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c3
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c3
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c1
-rw-r--r--drivers/cpuidle/driver.c1
-rw-r--r--drivers/cpuidle/dt_idle_states.c12
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/edac/Kconfig16
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/amd64_edac.c260
-rw-r--r--drivers/edac/amd64_edac.h15
-rw-r--r--drivers/edac/edac_mc.c40
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_pci_sysfs.c5
-rw-r--r--drivers/edac/ghes_edac.c4
-rw-r--r--drivers/edac/i3000_edac.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i82443bxgx_edac.c3
-rw-r--r--drivers/edac/mce_amd.c47
-rw-r--r--drivers/edac/mce_amd.h3
-rw-r--r--drivers/edac/mce_amd_inj.c293
-rw-r--r--drivers/edac/mv64x60_edac.c8
-rw-r--r--drivers/edac/ppc4xx_edac.c4
-rw-r--r--drivers/edac/sb_edac.c208
-rw-r--r--drivers/edac/x38_edac.c3
-rw-r--r--drivers/firmware/dmi_scan.c79
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/cper.c2
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c11
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/devres.c32
-rw-r--r--drivers/gpio/gpio-dln2.c553
-rw-r--r--drivers/gpio/gpio-omap.c8
-rw-r--r--drivers/gpio/gpio-sch.c293
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c179
-rw-r--r--drivers/gpio/gpiolib.c85
-rw-r--r--drivers/gpio/gpiolib.h7
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c66
-rw-r--r--drivers/hid/hid-sensor-hub.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c2
-rw-r--r--drivers/hwmon/Kconfig28
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/g762.c6
-rw-r--r--drivers/hwmon/gpio-fan.c18
-rw-r--r--drivers/hwmon/ibmpowernv.c78
-rw-r--r--drivers/hwmon/iio_hwmon.c7
-rw-r--r--drivers/hwmon/ina2xx.c26
-rw-r--r--drivers/hwmon/lm75.c12
-rw-r--r--drivers/hwmon/lm95234.c91
-rw-r--r--drivers/hwmon/lm95245.c41
-rw-r--r--drivers/hwmon/nct6775.c77
-rw-r--r--drivers/hwmon/nct7802.c860
-rw-r--r--drivers/hwmon/pmbus/Kconfig11
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c39
-rw-r--r--drivers/hwmon/pmbus/pmbus.h30
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c118
-rw-r--r--drivers/hwmon/tmp401.c42
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-dln2.c262
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c12
-rw-r--r--drivers/i2c/i2c-core.c1
-rw-r--r--drivers/idle/intel_idle.c108
-rw-r--r--drivers/iio/accel/bmc150-accel.c44
-rw-r--r--drivers/iio/accel/kxcjk-1013.c6
-rw-r--r--drivers/iio/adc/Kconfig8
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/axp288_adc.c261
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/gyro/bmg160.c57
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1100
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h75
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c112
-rw-r--r--drivers/input/keyboard/samsung-keypad.c2
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/arm-smmu.c1
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c1
-rw-r--r--drivers/iommu/iommu.c50
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/iommu/irq_remapping.c8
-rw-r--r--drivers/iommu/msm_iommu.c1
-rw-r--r--drivers/iommu/omap-iommu.c1
-rw-r--r--drivers/iommu/shmobile-iommu.c1
-rw-r--r--drivers/iommu/tegra-smmu.c1609
-rw-r--r--drivers/irqchip/Kconfig7
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c70
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c32
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c66
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c65
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c178
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c45
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c32
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c3
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/irqchip/irq-tb10x.c4
-rw-r--r--drivers/leds/leds-gpio.c150
-rw-r--r--drivers/lguest/lguest_device.c17
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mailbox.c19
-rw-r--r--drivers/mailbox/mailbox.h14
-rw-r--r--drivers/mailbox/omap-mailbox.c346
-rw-r--r--drivers/mailbox/pcc.c403
-rw-r--r--drivers/md/dm-bio-prison.c186
-rw-r--r--drivers/md/dm-bio-prison.h28
-rw-r--r--drivers/md/dm-bufio.c226
-rw-r--r--drivers/md/dm-cache-block-types.h11
-rw-r--r--drivers/md/dm-cache-metadata.c34
-rw-r--r--drivers/md/dm-cache-metadata.h6
-rw-r--r--drivers/md/dm-cache-policy-mq.c82
-rw-r--r--drivers/md/dm-cache-target.c378
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-table.c36
-rw-r--r--drivers/md/dm-thin-metadata.c35
-rw-r--r--drivers/md/dm-thin-metadata.h9
-rw-r--r--drivers/md/dm-thin.c760
-rw-r--r--drivers/md/dm.c273
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/persistent-data/dm-array.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c77
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h7
-rw-r--r--drivers/media/common/cx2341x.c29
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c2
-rw-r--r--drivers/media/common/siano/smsir.c3
-rw-r--r--drivers/media/common/tveeprom.c36
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_net.c4
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/af9033.c140
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h11
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c117
-rw-r--r--drivers/media/dvb-frontends/cx22700.c3
-rw-r--r--drivers/media/dvb-frontends/cx24110.c50
-rw-r--r--drivers/media/dvb-frontends/cx24117.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c9
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c3
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c9
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c267
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h181
-rw-r--r--drivers/media/dvb-frontends/mn88472.h38
-rw-r--r--drivers/media/dvb-frontends/mn88473.h38
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c60
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h11
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c8
-rw-r--r--drivers/media/dvb-frontends/si2168.c75
-rw-r--r--drivers/media/dvb-frontends/si2168.h4
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h1
-rw-r--r--drivers/media/dvb-frontends/sp2.c21
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c196
-rw-r--r--drivers/media/dvb-frontends/stv090x.h44
-rw-r--r--drivers/media/firewire/firedtv-ci.c3
-rw-r--r--drivers/media/firewire/firedtv.h2
-rw-r--r--drivers/media/i2c/adv7170.c16
-rw-r--r--drivers/media/i2c/adv7175.c16
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7183.c6
-rw-r--r--drivers/media/i2c/adv7511.c229
-rw-r--r--drivers/media/i2c/adv7604.c109
-rw-r--r--drivers/media/i2c/adv7842.c40
-rw-r--r--drivers/media/i2c/ak881x.c8
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c14
-rw-r--r--drivers/media/i2c/cx25840/cx25840-firmware.c11
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c6
-rw-r--r--drivers/media/i2c/ml86v7667.c6
-rw-r--r--drivers/media/i2c/mt9m032.c6
-rw-r--r--drivers/media/i2c/mt9p031.c8
-rw-r--r--drivers/media/i2c/mt9t001.c8
-rw-r--r--drivers/media/i2c/mt9v011.c6
-rw-r--r--drivers/media/i2c/mt9v032.c12
-rw-r--r--drivers/media/i2c/noon010pc30.c12
-rw-r--r--drivers/media/i2c/ov7670.c16
-rw-r--r--drivers/media/i2c/ov9650.c10
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h6
-rw-r--r--drivers/media/i2c/s5k4ecgx.c4
-rw-r--r--drivers/media/i2c/s5k5baf.c14
-rw-r--r--drivers/media/i2c/s5k6a3.c2
-rw-r--r--drivers/media/i2c/s5k6aa.c8
-rw-r--r--drivers/media/i2c/saa6752hs.c6
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c280
-rw-r--r--drivers/media/i2c/smiapp-pll.h21
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c261
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h8
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c8
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c14
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c70
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c10
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c22
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c26
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c54
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c8
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c58
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c20
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c40
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c12
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c54
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c10
-rw-r--r--drivers/media/i2c/sr030pc30.c14
-rw-r--r--drivers/media/i2c/tvp514x.c12
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/i2c/tvp7002.c10
-rw-r--r--drivers/media/i2c/vs6624.c18
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile3
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c12
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c18
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h3
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h1
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c9
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c131
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c21
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c691
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c31
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885.h8
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c12
-rw-r--r--drivers/media/pci/cx88/Kconfig5
-rw-r--r--drivers/media/pci/cx88/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c112
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c565
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c71
-rw-r--r--drivers/media/pci/cx88/cx88-core.c119
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c158
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c159
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c216
-rw-r--r--drivers/media/pci/cx88/cx88-video.c871
-rw-r--r--drivers/media/pci/cx88/cx88.h104
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/meye/meye.c3
-rw-r--r--drivers/media/pci/pt1/pt1.c13
-rw-r--r--drivers/media/pci/pt3/pt3.c75
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c18
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c16
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c101
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c13
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-types.h4
-rw-r--r--drivers/media/pci/saa7164/saa7164.h4
-rw-r--r--drivers/media/pci/smipcie/Kconfig17
-rw-r--r--drivers/media/pci/smipcie/Makefile6
-rw-r--r--drivers/media/pci/smipcie/smipcie.c1099
-rw-r--r--drivers/media/pci/smipcie/smipcie.h299
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c10
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c88
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h1
-rw-r--r--drivers/media/pci/tw68/tw68-core.c15
-rw-r--r--drivers/media/pci/tw68/tw68-video.c9
-rw-r--r--drivers/media/pci/tw68/tw68.h1
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c5
-rw-r--r--drivers/media/pci/zoran/zoran_procfs.c4
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c34
-rw-r--r--drivers/media/platform/coda/Makefile2
-rw-r--r--drivers/media/platform/coda/coda-bit.c322
-rw-r--r--drivers/media/platform/coda/coda-common.c611
-rw-r--r--drivers/media/platform/coda/coda-jpeg.c238
-rw-r--r--drivers/media/platform/coda/coda.h24
-rw-r--r--drivers/media/platform/coda/coda_regs.h7
-rw-r--r--drivers/media/platform/davinci/vpbe.c21
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c617
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c8
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c23
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c20
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c10
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.c26
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c16
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.c14
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c16
-rw-r--r--drivers/media/platform/fsl-viu.c3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c85
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h3
-rw-r--r--drivers/media/platform/mx2_emmaprp.c9
-rw-r--r--drivers/media/platform/omap/Kconfig3
-rw-r--r--drivers/media/platform/omap/omap_vout.c11
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c112
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c18
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c42
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c60
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c19
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c95
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h10
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c10
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c3
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c8
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c10
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c13
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c51
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c122
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c65
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c13
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c32
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c10
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c2
-rw-r--r--drivers/media/platform/sh_vou.c11
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c22
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c26
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c36
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c16
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c14
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c20
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c38
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c78
-rw-r--r--drivers/media/platform/ti-vpe/csc.c10
-rw-r--r--drivers/media/platform/ti-vpe/sc.c10
-rw-r--r--drivers/media/platform/via-camera.c12
-rw-r--r--drivers/media/platform/vim2m.c (renamed from drivers/media/platform/mem2mem_testdev.c)222
-rw-r--r--drivers/media/platform/vino.c6
-rw-r--r--drivers/media/platform/vivid/vivid-core.c21
-rw-r--r--drivers/media/platform/vivid/vivid-core.h16
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c165
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.c704
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.h4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c327
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h38
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c38
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c29
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c42
-rw-r--r--drivers/media/radio/radio-wl1273.c4
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c28
-rw-r--r--drivers/media/radio/si4713/si4713.c164
-rw-r--r--drivers/media/radio/si4713/si4713.h15
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig26
-rw-r--r--drivers/media/rc/Makefile2
-rw-r--r--drivers/media/rc/igorplugusb.c261
-rw-r--r--drivers/media/rc/img-ir/Kconfig1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c1
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c28
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h6
-rw-r--r--drivers/media/rc/ir-lirc-codec.c12
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/rc/lirc_dev.c3
-rw-r--r--drivers/media/rc/meson-ir.c216
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/tuners/Kconfig8
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/m88rs6000t.c744
-rw-r--r--drivers/media/tuners/m88rs6000t.h29
-rw-r--r--drivers/media/tuners/m88ts2022.c2
-rw-r--r--drivers/media/tuners/mxl5007t.c30
-rw-r--r--drivers/media/tuners/r820t.c12
-rw-r--r--drivers/media/tuners/si2157.c44
-rw-r--r--drivers/media/tuners/si2157.h2
-rw-r--r--drivers/media/tuners/si2157_priv.h8
-rw-r--r--drivers/media/tuners/tda18271-common.c2
-rw-r--r--drivers/media/tuners/xc5000.c17
-rw-r--r--drivers/media/tuners/xc5000.h1
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c5
-rw-r--r--drivers/media/usb/au0828/au0828-core.c8
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c14
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c59
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c97
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c331
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c257
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c165
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c159
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c132
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c8
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c47
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c48
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c89
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h41
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c438
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c22
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c231
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h7
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c3
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c230
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c19
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c7
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c71
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c41
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c95
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c17
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h3
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h1
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c98
-rw-r--r--drivers/media/usb/em28xx/em28xx.h27
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c27
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c51
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c161
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1009
-rw-r--r--drivers/media/usb/uvc/uvc_video.c23
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c125
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c87
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c34
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c15
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c49
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c71
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c425
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c194
-rw-r--r--drivers/memory/Kconfig20
-rw-r--r--drivers/memory/Makefile4
-rw-r--r--drivers/memory/omap-gpmc.c2092
-rw-r--r--drivers/memory/tegra/Kconfig7
-rw-r--r--drivers/memory/tegra/Makefile7
-rw-r--r--drivers/memory/tegra/mc.c301
-rw-r--r--drivers/memory/tegra/mc.h40
-rw-r--r--drivers/memory/tegra/tegra114.c948
-rw-r--r--drivers/memory/tegra/tegra124.c995
-rw-r--r--drivers/memory/tegra/tegra30.c970
-rw-r--r--drivers/memory/tegra30-mc.c378
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/message/fusion/mptscsih.c22
-rw-r--r--drivers/message/fusion/mptscsih.h3
-rw-r--r--drivers/mfd/Kconfig25
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab8500-gpadc.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c57
-rw-r--r--drivers/mfd/arizona-core.c4
-rw-r--r--drivers/mfd/arizona-spi.c2
-rw-r--r--drivers/mfd/atmel-hlcdc.c122
-rw-r--r--drivers/mfd/axp20x.c364
-rw-r--r--drivers/mfd/da9063-core.c4
-rw-r--r--drivers/mfd/db8500-prcmu.c22
-rw-r--r--drivers/mfd/dln2.c781
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c3
-rw-r--r--drivers/mfd/lpc_sch.c6
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max77693.c31
-rw-r--r--drivers/mfd/mfd-core.c8
-rw-r--r--drivers/mfd/rts5227.c6
-rw-r--r--drivers/mfd/rts5249.c4
-rw-r--r--drivers/mfd/rtsx_gops.c37
-rw-r--r--drivers/mfd/rtsx_pcr.h3
-rw-r--r--drivers/mfd/rtsx_usb.c4
-rw-r--r--drivers/mfd/sec-core.c39
-rw-r--r--drivers/mfd/sec-irq.c23
-rw-r--r--drivers/mfd/syscon.c96
-rw-r--r--drivers/mfd/t7l66xb.c14
-rw-r--r--drivers/mfd/tc3589x.c9
-rw-r--r--drivers/mfd/tc6387xb.c10
-rw-r--r--drivers/mfd/tc6393xb.c23
-rw-r--r--drivers/mfd/tps65090.c62
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/twl4030-power.c3
-rw-r--r--drivers/mfd/viperboard.c5
-rw-r--r--drivers/mfd/wm5102-tables.c6
-rw-r--r--drivers/mfd/wm5110-tables.c20
-rw-r--r--drivers/mfd/wm8350-core.c2
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/mfd/wm8997-tables.c6
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/apds990x.c4
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/eeprom/at25.c34
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c62
-rw-r--r--drivers/misc/enclosure.c44
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c4
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c8
-rw-r--r--drivers/misc/mei/pci-txe.c8
-rw-r--r--drivers/misc/mic/card/mic_virtio.c14
-rw-r--r--drivers/misc/vexpress-syscfg.c60
-rw-r--r--drivers/mmc/card/block.c83
-rw-r--r--drivers/mmc/card/mmc_test.c28
-rw-r--r--drivers/mmc/card/queue.c16
-rw-r--r--drivers/mmc/core/bus.c59
-rw-r--r--drivers/mmc/core/core.c111
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/debugfs.c9
-rw-r--r--drivers/mmc/core/mmc.c184
-rw-r--r--drivers/mmc/core/mmc_ops.c130
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/sdio.c10
-rw-r--r--drivers/mmc/core/sdio_bus.c16
-rw-r--r--drivers/mmc/host/Kconfig7
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c207
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c91
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c6
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c13
-rw-r--r--drivers/mmc/host/dw_mmc.c346
-rw-r--r--drivers/mmc/host/dw_mmc.h15
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/mmc/host/msm_sdcc.c6
-rw-r--r--drivers/mmc/host/mvsdio.c7
-rw-r--r--drivers/mmc/host/mxcmmc.c25
-rw-r--r--drivers/mmc/host/mxs-mmc.c26
-rw-r--r--drivers/mmc/host/omap_hsmmc.c286
-rw-r--r--drivers/mmc/host/sdhci-acpi.c54
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c82
-rw-r--r--drivers/mmc/host/sdhci-msm.c50
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.c74
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c15
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c62
-rw-r--r--drivers/mmc/host/sdhci-s3c.c14
-rw-r--r--drivers/mmc/host/sdhci.c280
-rw-r--r--drivers/mmc/host/sdhci.h48
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c1
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/toshsd.c717
-rw-r--r--drivers/mmc/host/toshsd.h176
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/dsa/bcm_sf2.c58
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c96
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c27
-rw-r--r--drivers/net/macvtap.c68
-rw-r--r--drivers/net/tun.c172
-rw-r--r--drivers/net/virtio_net.c161
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/xen-netback/xenbus.c15
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nubus/nubus.c4
-rw-r--r--drivers/of/base.c33
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/of_pci.c8
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/host/Kconfig13
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-dra7xx.c6
-rw-r--r--drivers/pci/host/pci-exynos.c18
-rw-r--r--drivers/pci/host/pci-host-generic.c126
-rw-r--r--drivers/pci/host/pci-imx6.c4
-rw-r--r--drivers/pci/host/pci-keystone-dw.c8
-rw-r--r--drivers/pci/host/pci-keystone.c5
-rw-r--r--drivers/pci/host/pci-keystone.h2
-rw-r--r--drivers/pci/host/pci-layerscape.c179
-rw-r--r--drivers/pci/host/pci-mvebu.c15
-rw-r--r--drivers/pci/host/pci-tegra.c63
-rw-r--r--drivers/pci/host/pcie-designware.c33
-rw-r--r--drivers/pci/host/pcie-designware.h2
-rw-r--r--drivers/pci/host/pcie-rcar.c33
-rw-r--r--drivers/pci/host/pcie-spear13xx.c13
-rw-r--r--drivers/pci/host/pcie-xilinx.c37
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c5
-rw-r--r--drivers/pci/iov.c11
-rw-r--r--drivers/pci/msi.c420
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c20
-rw-r--r--drivers/pci/pci-sysfs.c31
-rw-r--r--drivers/pci/pci.c21
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/probe.c80
-rw-r--r--drivers/pci/search.c3
-rw-r--r--drivers/pci/xen-pcifront.c13
-rw-r--r--drivers/pinctrl/Kconfig13
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c72
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c25
-rw-r--r--drivers/pinctrl/intel/Kconfig27
-rw-r--r--drivers/pinctrl/intel/Makefile4
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c (renamed from drivers/pinctrl/pinctrl-baytrail.c)7
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1519
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c33
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c18
-rw-r--r--drivers/pinctrl/pinconf-generic.c71
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/pinctrl-at91.h72
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c5
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c116
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c8
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c19
-rw-r--r--drivers/pinctrl/qcom/Kconfig13
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c933
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c949
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c376
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c30
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c31
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c131
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h82
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c749
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h1
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c4
-rw-r--r--drivers/power/reset/imx-snvs-poweroff.c66
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/intel_rapl.c264
-rw-r--r--drivers/regulator/88pm8607.c3
-rw-r--r--drivers/regulator/Kconfig20
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/act8865-regulator.c31
-rw-r--r--drivers/regulator/anatop-regulator.c18
-rw-r--r--drivers/regulator/arizona-ldo1.c10
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/core.c23
-rw-r--r--drivers/regulator/da9052-regulator.c3
-rw-r--r--drivers/regulator/da9063-regulator.c19
-rw-r--r--drivers/regulator/da9210-regulator.c2
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fan53555.c17
-rw-r--r--drivers/regulator/fixed.c24
-rw-r--r--drivers/regulator/gpio-regulator.c115
-rw-r--r--drivers/regulator/max77686.c172
-rw-r--r--drivers/regulator/max77693.c2
-rw-r--r--drivers/regulator/max77802.c254
-rw-r--r--drivers/regulator/max8952.c4
-rw-r--r--drivers/regulator/max8973-regulator.c3
-rw-r--r--drivers/regulator/max8997.c3
-rw-r--r--drivers/regulator/max8998.c5
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c3
-rw-r--r--drivers/regulator/of_regulator.c87
-rw-r--r--drivers/regulator/pwm-regulator.c3
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c28
-rw-r--r--drivers/regulator/rk808-regulator.c57
-rw-r--r--drivers/regulator/rn5t618-regulator.c51
-rw-r--r--drivers/regulator/rt5033-regulator.c123
-rw-r--r--drivers/regulator/s2mpa01.c2
-rw-r--r--drivers/regulator/s2mps11.c105
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/sky81452-regulator.c46
-rw-r--r--drivers/regulator/stw481x-vmmc.c3
-rw-r--r--drivers/regulator/ti-abb-regulator.c3
-rw-r--r--drivers/regulator/tps51632-regulator.c43
-rw-r--r--drivers/regulator/tps62360-regulator.c31
-rw-r--r--drivers/regulator/tps65090-regulator.c4
-rw-r--r--drivers/regulator/tps65218-regulator.c3
-rw-r--r--drivers/regulator/twl-regulator.c3
-rw-r--r--drivers/regulator/vexpress.c3
-rw-r--r--drivers/regulator/wm8994-regulator.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c51
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c11
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c15
-rw-r--r--drivers/reset/reset-berlin.c131
-rw-r--r--drivers/reset/reset-socfpga.c15
-rw-r--r--drivers/reset/sti/Kconfig4
-rw-r--r--drivers/reset/sti/Makefile1
-rw-r--r--drivers/reset/sti/reset-stih407.c158
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/class.c30
-rw-r--r--drivers/rtc/interface.c21
-rw-r--r--drivers/rtc/rtc-ab8500.c2
-rw-r--r--drivers/rtc/rtc-at91sam9.c138
-rw-r--r--drivers/rtc/rtc-ds1307.c127
-rw-r--r--drivers/rtc/rtc-ds1374.c285
-rw-r--r--drivers/rtc/rtc-isl12057.c83
-rw-r--r--drivers/rtc/rtc-lib.c38
-rw-r--r--drivers/rtc/rtc-omap.c547
-rw-r--r--drivers/rtc/rtc-pcf8563.c55
-rw-r--r--drivers/rtc/rtc-sirfsoc.c66
-rw-r--r--drivers/rtc/rtc-snvs.c39
-rw-r--r--drivers/s390/char/hmcdrv_dev.c16
-rw-r--r--drivers/s390/kvm/kvm_virtio.c11
-rw-r--r--drivers/s390/kvm/virtio_ccw.c203
-rw-r--r--drivers/s390/scsi/zfcp_aux.c6
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c32
-rw-r--r--drivers/s390/scsi/zfcp_def.h3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fc.h14
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c26
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c66
-rw-r--r--drivers/scsi/3w-9xxx.c15
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/3w-xxxx.c15
-rw-r--r--drivers/scsi/53c700.c51
-rw-r--r--drivers/scsi/BusLogic.c4
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR5380.c310
-rw-r--r--drivers/scsi/NCR5380.h78
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/advansys.c11
-rw-r--r--drivers/scsi/aha152x.c994
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c19
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c23
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c13
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c13
-rw-r--r--drivers/scsi/am53c974.c586
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/arm/acornscsi.c12
-rw-r--r--drivers/scsi/arm/cumana_1.c18
-rw-r--r--drivers/scsi/arm/fas216.c39
-rw-r--r--drivers/scsi/arm/oak.c23
-rw-r--r--drivers/scsi/atari_NCR5380.c986
-rw-r--r--drivers/scsi/atari_scsi.c673
-rw-r--r--drivers/scsi/atari_scsi.h60
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c30
-rw-r--r--drivers/scsi/bfa/bfad_im.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c18
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/ch.c36
-rw-r--r--drivers/scsi/constants.c599
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c37
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c55
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c82
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c86
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c81
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c82
-rw-r--r--drivers/scsi/dmx3191d.c31
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/dtc.c85
-rw-r--r--drivers/scsi/dtc.h26
-rw-r--r--drivers/scsi/eata.c8
-rw-r--r--drivers/scsi/esas2r/esas2r.h5
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c22
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c64
-rw-r--r--drivers/scsi/esp_scsi.c428
-rw-r--r--drivers/scsi/esp_scsi.h22
-rw-r--r--drivers/scsi/fcoe/fcoe.c6
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c23
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c43
-rw-r--r--drivers/scsi/fnic/fnic_trace.c4
-rw-r--r--drivers/scsi/g_NCR5380.c224
-rw-r--r--drivers/scsi/g_NCR5380.h26
-rw-r--r--drivers/scsi/gdth.c5
-rw-r--r--drivers/scsi/hosts.c5
-rw-r--r--drivers/scsi/hpsa.c509
-rw-r--r--drivers/scsi/hpsa.h33
-rw-r--r--drivers/scsi/hpsa_cmd.h34
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c58
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c11
-rw-r--r--drivers/scsi/ipr.c175
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/init.c4
-rw-r--r--drivers/scsi/isci/task.c147
-rw-r--r--drivers/scsi/isci/task.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/libfc/fc_fcp.c52
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c9
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c21
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c223
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c158
-rw-r--r--drivers/scsi/mac_scsi.c552
-rw-r--r--drivers/scsi/mac_scsi.h74
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h23
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c164
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h18
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c82
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c84
-rw-r--r--drivers/scsi/mvsas/mv_init.c24
-rw-r--r--drivers/scsi/mvsas/mv_sas.c109
-rw-r--r--drivers/scsi/mvsas/mv_sas.h10
-rw-r--r--drivers/scsi/ncr53c8xx.c5
-rw-r--r--drivers/scsi/osd/osd_uld.c2
-rw-r--r--drivers/scsi/osst.c29
-rw-r--r--drivers/scsi/pas16.c106
-rw-r--r--drivers/scsi/pas16.h21
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pmcraid.c94
-rw-r--r--drivers/scsi/ps3rom.c4
-rw-r--r--drivers/scsi/qla1280.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c83
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c35
-rw-r--r--drivers/scsi/scsi.c211
-rw-r--r--drivers/scsi/scsi_debug.c1995
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c211
-rw-r--r--drivers/scsi/scsi_ioctl.c74
-rw-r--r--drivers/scsi/scsi_lib.c165
-rw-r--r--drivers/scsi/scsi_logging.h1
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c27
-rw-r--r--drivers/scsi/scsi_sysfs.c18
-rw-r--r--drivers/scsi/scsi_trace.c2
-rw-r--r--drivers/scsi/scsi_transport_spi.c23
-rw-r--r--drivers/scsi/scsicam.c4
-rw-r--r--drivers/scsi/sd.c114
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c62
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/sr.h3
-rw-r--r--drivers/scsi/sr_ioctl.c10
-rw-r--r--drivers/scsi/st.c47
-rw-r--r--drivers/scsi/stex.c30
-rw-r--r--drivers/scsi/storvsc_drv.c6
-rw-r--r--drivers/scsi/sun3_NCR5380.c2933
-rw-r--r--drivers/scsi/sun3_scsi.c512
-rw-r--r--drivers/scsi/sun3_scsi.h84
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/t128.c83
-rw-r--r--drivers/scsi/t128.h23
-rw-r--r--drivers/scsi/tmscsim.c2620
-rw-r--r--drivers/scsi/tmscsim.h551
-rw-r--r--drivers/scsi/u14-34f.c10
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c15
-rw-r--r--drivers/scsi/ufs/ufshcd.c143
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c90
-rw-r--r--drivers/scsi/vmw_pvscsi.c32
-rw-r--r--drivers/scsi/wd7000.c1
-rw-r--r--drivers/scsi/wd719x.c996
-rw-r--r--drivers/scsi/wd719x.h249
-rw-r--r--drivers/sh/pm_runtime.c4
-rw-r--r--drivers/soc/ti/knav_qmss.h8
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c13
-rw-r--r--drivers/soc/versatile/Kconfig9
-rw-r--r--drivers/soc/versatile/Makefile1
-rw-r--r--drivers/soc/versatile/soc-integrator.c155
-rw-r--r--drivers/spi/Kconfig17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c115
-rw-r--r--drivers/spi/spi-cadence.c33
-rw-r--r--drivers/spi/spi-dw-mid.c114
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-fsl-cpm.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-fsl-espi.c59
-rw-r--r--drivers/spi/spi-fsl-lib.c59
-rw-r--r--drivers/spi/spi-fsl-lib.h10
-rw-r--r--drivers/spi/spi-fsl-spi.c17
-rw-r--r--drivers/spi/spi-gpio.c37
-rw-r--r--drivers/spi/spi-img-spfi.c746
-rw-r--r--drivers/spi/spi-meson-spifc.c462
-rw-r--r--drivers/spi/spi-mxs.c12
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c8
-rw-r--r--drivers/spi/spi-pxa2xx.c304
-rw-r--r--drivers/spi/spi-pxa2xx.h16
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c41
-rw-r--r--drivers/spi/spi-sirf.c16
-rw-r--r--drivers/spi/spi-txx9.c3
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/spi/spidev.c16
-rw-r--r--drivers/staging/iio/light/isl29028.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c21
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c158
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h10
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c388
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c9
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c18
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c26
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c100
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c90
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c98
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c18
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c8
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c5
-rw-r--r--drivers/staging/media/lirc/Kconfig6
-rw-r--r--drivers/staging/media/lirc/Makefile1
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c508
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c10
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c8
-rw-r--r--drivers/staging/media/mn88472/Kconfig7
-rw-r--r--drivers/staging/media/mn88472/Makefile5
-rw-r--r--drivers/staging/media/mn88472/TODO21
-rw-r--r--drivers/staging/media/mn88472/mn88472.c523
-rw-r--r--drivers/staging/media/mn88472/mn88472_priv.h36
-rw-r--r--drivers/staging/media/mn88473/Kconfig7
-rw-r--r--drivers/staging/media/mn88473/Makefile5
-rw-r--r--drivers/staging/media/mn88473/TODO21
-rw-r--r--drivers/staging/media/mn88473/mn88473.c464
-rw-r--r--drivers/staging/media/mn88473/mn88473_priv.h36
-rw-r--r--drivers/staging/media/omap24xx/Kconfig35
-rw-r--r--drivers/staging/media/omap24xx/Makefile5
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam-dma.c598
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.c1882
-rw-r--r--drivers/staging/media/omap24xx/omap24xxcam.h596
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.c938
-rw-r--r--drivers/staging/media/omap24xx/tcm825x.h200
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.c164
-rw-r--r--drivers/staging/media/omap24xx/v4l2-int-device.h305
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c62
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c16
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c28
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c26
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c78
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/target/loopback/tcm_loop.c75
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/thermal/cpu_cooling.c37
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c9
-rw-r--r--drivers/thermal/st/st_thermal.c3
-rw-r--r--drivers/tty/n_tty.c17
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/of_serial.c27
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hcd-pci.c11
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c13
-rw-r--r--drivers/usb/core/usb.c4
-rw-r--r--drivers/usb/core/usb.h23
-rw-r--r--drivers/usb/dwc3/ep0.c8
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-ring.c43
-rw-r--r--drivers/usb/host/xhci.c136
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c2
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c33
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/keyspan.c97
-rw-r--r--drivers/usb/serial/ssu100.c11
-rw-r--r--drivers/usb/storage/debug.c12
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vhost/net.c31
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c93
-rw-r--r--drivers/vhost/vhost.h41
-rw-r--r--drivers/video/backlight/Kconfig1
-rw-r--r--drivers/video/backlight/lp855x_bl.c55
-rw-r--r--drivers/video/backlight/pwm_bl.c5
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbdev/Kconfig19
-rw-r--r--drivers/video/fbdev/amba-clcd.c1
-rw-r--r--drivers/video/fbdev/arkfb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c3
-rw-r--r--drivers/video/fbdev/mmp/core.c6
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/mx3fb.c3
-rw-r--r--drivers/video/fbdev/mxsfb.c19
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c9
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c99
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c57
-rw-r--r--drivers/video/fbdev/omap2/dss/Kconfig7
-rw-r--r--drivers/video/fbdev/omap2/dss/Makefile2
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c20
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c327
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c659
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c58
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c123
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.h225
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.c42
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.h12
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi.h71
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c338
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.c14
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.h4
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c338
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.c11
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.h2
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_phy.c31
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c313
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_wp.c16
-rw-r--r--drivers/video/fbdev/omap2/dss/output.c19
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c378
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c3
-rw-r--r--drivers/video/fbdev/simplefb.c162
-rw-r--r--drivers/video/fbdev/sis/sis_main.c14
-rw-r--r--drivers/video/fbdev/sm501fb.c1
-rw-r--r--drivers/video/fbdev/smscufx.c6
-rw-r--r--drivers/video/fbdev/udlfb.c9
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c6
-rw-r--r--drivers/video/fbdev/via/viafbdev.c3
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c102
-rw-r--r--drivers/virtio/virtio_mmio.c17
-rw-r--r--drivers/virtio/virtio_pci.c802
-rw-r--r--drivers/virtio/virtio_pci_common.c464
-rw-r--r--drivers/virtio/virtio_pci_common.h136
-rw-r--r--drivers/virtio/virtio_pci_legacy.c326
-rw-r--r--drivers/virtio/virtio_ring.c109
-rw-r--r--drivers/w1/masters/omap_hdq.c7
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/orion_wdt.c42
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/xen/efi.c1
-rw-r--r--drivers/xen/xen-scsiback.c4
1253 files changed, 58562 insertions, 38307 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 1a693d3f9d51..af02a8a8ec4a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -1,5 +1,7 @@
1menu "Device Drivers" 1menu "Device Drivers"
2 2
3source "drivers/amba/Kconfig"
4
3source "drivers/base/Kconfig" 5source "drivers/base/Kconfig"
4 6
5source "drivers/bus/Kconfig" 7source "drivers/bus/Kconfig"
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b23fe37f67c0..8951cefb0a96 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,15 +360,14 @@ config ACPI_BGRT
360config ACPI_REDUCED_HARDWARE_ONLY 360config ACPI_REDUCED_HARDWARE_ONLY
361 bool "Hardware-reduced ACPI support only" if EXPERT 361 bool "Hardware-reduced ACPI support only" if EXPERT
362 def_bool n 362 def_bool n
363 depends on ACPI
364 help 363 help
365 This config item changes the way the ACPI code is built. When this 364 This config item changes the way the ACPI code is built. When this
366 option is selected, the kernel will use a specialized version of 365 option is selected, the kernel will use a specialized version of
367 ACPICA that ONLY supports the ACPI "reduced hardware" mode. The 366 ACPICA that ONLY supports the ACPI "reduced hardware" mode. The
368 resulting kernel will be smaller but it will also be restricted to 367 resulting kernel will be smaller but it will also be restricted to
369 running in ACPI reduced hardware mode ONLY. 368 running in ACPI reduced hardware mode ONLY.
370 369
371 If you are unsure what to do, do not enable this option. 370 If you are unsure what to do, do not enable this option.
372 371
373source "drivers/acpi/apei/Kconfig" 372source "drivers/acpi/apei/Kconfig"
374 373
@@ -394,4 +393,27 @@ config ACPI_EXTLOG
394 driver adds support for that functionality with corresponding 393 driver adds support for that functionality with corresponding
395 tracepoint which carries that information to userspace. 394 tracepoint which carries that information to userspace.
396 395
396menuconfig PMIC_OPREGION
397 bool "PMIC (Power Management Integrated Circuit) operation region support"
398 help
399 Select this option to enable support for ACPI operation
400 region of the PMIC chip. The operation region can be used
401 to control power rails and sensor reading/writing on the
402 PMIC chip.
403
404if PMIC_OPREGION
405config CRC_PMIC_OPREGION
406 bool "ACPI operation region support for CrystalCove PMIC"
407 depends on INTEL_SOC_PMIC
408 help
409 This config adds ACPI operation region support for CrystalCove PMIC.
410
411config XPOWER_PMIC_OPREGION
412 bool "ACPI operation region support for XPower AXP288 PMIC"
413 depends on AXP288_ADC = y
414 help
415 This config adds ACPI operation region support for XPower AXP288 PMIC.
416
417endif
418
397endif # ACPI 419endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index c3b2fcb729f3..f74317cc1ca9 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,7 @@ acpi-y += int340x_thermal.o
47acpi-y += power.o 47acpi-y += power.o
48acpi-y += event.o 48acpi-y += event.o
49acpi-y += sysfs.o 49acpi-y += sysfs.o
50acpi-y += property.o
50acpi-$(CONFIG_X86) += acpi_cmos_rtc.o 51acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
51acpi-$(CONFIG_DEBUG_FS) += debugfs.o 52acpi-$(CONFIG_DEBUG_FS) += debugfs.o
52acpi-$(CONFIG_ACPI_NUMA) += numa.o 53acpi-$(CONFIG_ACPI_NUMA) += numa.o
@@ -87,3 +88,7 @@ obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
87obj-$(CONFIG_ACPI_APEI) += apei/ 88obj-$(CONFIG_ACPI_APEI) += apei/
88 89
89obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o 90obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
91
92obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
93obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
94obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 93d160661f4c..4f3febf8a589 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ACPI support for Intel Lynxpoint LPSS. 2 * ACPI support for Intel Lynxpoint LPSS.
3 * 3 *
4 * Copyright (C) 2013, Intel Corporation 4 * Copyright (C) 2013, 2014, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * 7 *
@@ -60,6 +60,8 @@ ACPI_MODULE_NAME("acpi_lpss");
60#define LPSS_CLK_DIVIDER BIT(2) 60#define LPSS_CLK_DIVIDER BIT(2)
61#define LPSS_LTR BIT(3) 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4) 62#define LPSS_SAVE_CTX BIT(4)
63#define LPSS_DEV_PROXY BIT(5)
64#define LPSS_PROXY_REQ BIT(6)
63 65
64struct lpss_private_data; 66struct lpss_private_data;
65 67
@@ -70,8 +72,10 @@ struct lpss_device_desc {
70 void (*setup)(struct lpss_private_data *pdata); 72 void (*setup)(struct lpss_private_data *pdata);
71}; 73};
72 74
75static struct device *proxy_device;
76
73static struct lpss_device_desc lpss_dma_desc = { 77static struct lpss_device_desc lpss_dma_desc = {
74 .flags = LPSS_CLK, 78 .flags = LPSS_CLK | LPSS_PROXY_REQ,
75}; 79};
76 80
77struct lpss_private_data { 81struct lpss_private_data {
@@ -146,22 +150,24 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
146}; 150};
147 151
148static struct lpss_device_desc byt_uart_dev_desc = { 152static struct lpss_device_desc byt_uart_dev_desc = {
149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 153 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
154 LPSS_DEV_PROXY,
150 .prv_offset = 0x800, 155 .prv_offset = 0x800,
151 .setup = lpss_uart_setup, 156 .setup = lpss_uart_setup,
152}; 157};
153 158
154static struct lpss_device_desc byt_spi_dev_desc = { 159static struct lpss_device_desc byt_spi_dev_desc = {
155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
161 LPSS_DEV_PROXY,
156 .prv_offset = 0x400, 162 .prv_offset = 0x400,
157}; 163};
158 164
159static struct lpss_device_desc byt_sdio_dev_desc = { 165static struct lpss_device_desc byt_sdio_dev_desc = {
160 .flags = LPSS_CLK, 166 .flags = LPSS_CLK | LPSS_DEV_PROXY,
161}; 167};
162 168
163static struct lpss_device_desc byt_i2c_dev_desc = { 169static struct lpss_device_desc byt_i2c_dev_desc = {
164 .flags = LPSS_CLK | LPSS_SAVE_CTX, 170 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
165 .prv_offset = 0x800, 171 .prv_offset = 0x800,
166 .setup = byt_i2c_setup, 172 .setup = byt_i2c_setup,
167}; 173};
@@ -368,6 +374,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
368 adev->driver_data = pdata; 374 adev->driver_data = pdata;
369 pdev = acpi_create_platform_device(adev); 375 pdev = acpi_create_platform_device(adev);
370 if (!IS_ERR_OR_NULL(pdev)) { 376 if (!IS_ERR_OR_NULL(pdev)) {
377 if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
378 proxy_device = &pdev->dev;
371 return 1; 379 return 1;
372 } 380 }
373 381
@@ -499,14 +507,15 @@ static void acpi_lpss_set_ltr(struct device *dev, s32 val)
499/** 507/**
500 * acpi_lpss_save_ctx() - Save the private registers of LPSS device 508 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
501 * @dev: LPSS device 509 * @dev: LPSS device
510 * @pdata: pointer to the private data of the LPSS device
502 * 511 *
503 * Most LPSS devices have private registers which may loose their context when 512 * Most LPSS devices have private registers which may loose their context when
504 * the device is powered down. acpi_lpss_save_ctx() saves those registers into 513 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
505 * prv_reg_ctx array. 514 * prv_reg_ctx array.
506 */ 515 */
507static void acpi_lpss_save_ctx(struct device *dev) 516static void acpi_lpss_save_ctx(struct device *dev,
517 struct lpss_private_data *pdata)
508{ 518{
509 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
510 unsigned int i; 519 unsigned int i;
511 520
512 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 521 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
@@ -521,12 +530,13 @@ static void acpi_lpss_save_ctx(struct device *dev)
521/** 530/**
522 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device 531 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
523 * @dev: LPSS device 532 * @dev: LPSS device
533 * @pdata: pointer to the private data of the LPSS device
524 * 534 *
525 * Restores the registers that were previously stored with acpi_lpss_save_ctx(). 535 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
526 */ 536 */
527static void acpi_lpss_restore_ctx(struct device *dev) 537static void acpi_lpss_restore_ctx(struct device *dev,
538 struct lpss_private_data *pdata)
528{ 539{
529 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
530 unsigned int i; 540 unsigned int i;
531 541
532 /* 542 /*
@@ -549,54 +559,82 @@ static void acpi_lpss_restore_ctx(struct device *dev)
549#ifdef CONFIG_PM_SLEEP 559#ifdef CONFIG_PM_SLEEP
550static int acpi_lpss_suspend_late(struct device *dev) 560static int acpi_lpss_suspend_late(struct device *dev)
551{ 561{
552 int ret = pm_generic_suspend_late(dev); 562 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
563 int ret;
553 564
565 ret = pm_generic_suspend_late(dev);
554 if (ret) 566 if (ret)
555 return ret; 567 return ret;
556 568
557 acpi_lpss_save_ctx(dev); 569 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
570 acpi_lpss_save_ctx(dev, pdata);
571
558 return acpi_dev_suspend_late(dev); 572 return acpi_dev_suspend_late(dev);
559} 573}
560 574
561static int acpi_lpss_resume_early(struct device *dev) 575static int acpi_lpss_resume_early(struct device *dev)
562{ 576{
563 int ret = acpi_dev_resume_early(dev); 577 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
578 int ret;
564 579
580 ret = acpi_dev_resume_early(dev);
565 if (ret) 581 if (ret)
566 return ret; 582 return ret;
567 583
568 acpi_lpss_restore_ctx(dev); 584 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
585 acpi_lpss_restore_ctx(dev, pdata);
586
569 return pm_generic_resume_early(dev); 587 return pm_generic_resume_early(dev);
570} 588}
571#endif /* CONFIG_PM_SLEEP */ 589#endif /* CONFIG_PM_SLEEP */
572 590
573#ifdef CONFIG_PM_RUNTIME
574static int acpi_lpss_runtime_suspend(struct device *dev) 591static int acpi_lpss_runtime_suspend(struct device *dev)
575{ 592{
576 int ret = pm_generic_runtime_suspend(dev); 593 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
594 int ret;
595
596 ret = pm_generic_runtime_suspend(dev);
597 if (ret)
598 return ret;
577 599
600 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
601 acpi_lpss_save_ctx(dev, pdata);
602
603 ret = acpi_dev_runtime_suspend(dev);
578 if (ret) 604 if (ret)
579 return ret; 605 return ret;
580 606
581 acpi_lpss_save_ctx(dev); 607 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
582 return acpi_dev_runtime_suspend(dev); 608 return pm_runtime_put_sync_suspend(proxy_device);
609
610 return 0;
583} 611}
584 612
585static int acpi_lpss_runtime_resume(struct device *dev) 613static int acpi_lpss_runtime_resume(struct device *dev)
586{ 614{
587 int ret = acpi_dev_runtime_resume(dev); 615 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
616 int ret;
617
618 if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
619 ret = pm_runtime_get_sync(proxy_device);
620 if (ret)
621 return ret;
622 }
588 623
624 ret = acpi_dev_runtime_resume(dev);
589 if (ret) 625 if (ret)
590 return ret; 626 return ret;
591 627
592 acpi_lpss_restore_ctx(dev); 628 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
629 acpi_lpss_restore_ctx(dev, pdata);
630
593 return pm_generic_runtime_resume(dev); 631 return pm_generic_runtime_resume(dev);
594} 632}
595#endif /* CONFIG_PM_RUNTIME */
596#endif /* CONFIG_PM */ 633#endif /* CONFIG_PM */
597 634
598static struct dev_pm_domain acpi_lpss_pm_domain = { 635static struct dev_pm_domain acpi_lpss_pm_domain = {
599 .ops = { 636 .ops = {
637#ifdef CONFIG_PM
600#ifdef CONFIG_PM_SLEEP 638#ifdef CONFIG_PM_SLEEP
601 .prepare = acpi_subsys_prepare, 639 .prepare = acpi_subsys_prepare,
602 .complete = acpi_subsys_complete, 640 .complete = acpi_subsys_complete,
@@ -608,7 +646,6 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
608 .poweroff_late = acpi_lpss_suspend_late, 646 .poweroff_late = acpi_lpss_suspend_late,
609 .restore_early = acpi_lpss_resume_early, 647 .restore_early = acpi_lpss_resume_early,
610#endif 648#endif
611#ifdef CONFIG_PM_RUNTIME
612 .runtime_suspend = acpi_lpss_runtime_suspend, 649 .runtime_suspend = acpi_lpss_runtime_suspend,
613 .runtime_resume = acpi_lpss_runtime_resume, 650 .runtime_resume = acpi_lpss_runtime_resume,
614#endif 651#endif
@@ -631,30 +668,27 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
631 return 0; 668 return 0;
632 669
633 pdata = acpi_driver_data(adev); 670 pdata = acpi_driver_data(adev);
634 if (!pdata || !pdata->mmio_base) 671 if (!pdata)
635 return 0; 672 return 0;
636 673
637 if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { 674 if (pdata->mmio_base &&
675 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
638 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n"); 676 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
639 return 0; 677 return 0;
640 } 678 }
641 679
642 switch (action) { 680 switch (action) {
643 case BUS_NOTIFY_BOUND_DRIVER:
644 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
645 pdev->dev.pm_domain = &acpi_lpss_pm_domain;
646 break;
647 case BUS_NOTIFY_UNBOUND_DRIVER:
648 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
649 pdev->dev.pm_domain = NULL;
650 break;
651 case BUS_NOTIFY_ADD_DEVICE: 681 case BUS_NOTIFY_ADD_DEVICE:
682 pdev->dev.pm_domain = &acpi_lpss_pm_domain;
652 if (pdata->dev_desc->flags & LPSS_LTR) 683 if (pdata->dev_desc->flags & LPSS_LTR)
653 return sysfs_create_group(&pdev->dev.kobj, 684 return sysfs_create_group(&pdev->dev.kobj,
654 &lpss_attr_group); 685 &lpss_attr_group);
686 break;
655 case BUS_NOTIFY_DEL_DEVICE: 687 case BUS_NOTIFY_DEL_DEVICE:
656 if (pdata->dev_desc->flags & LPSS_LTR) 688 if (pdata->dev_desc->flags & LPSS_LTR)
657 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); 689 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
690 pdev->dev.pm_domain = NULL;
691 break;
658 default: 692 default:
659 break; 693 break;
660 } 694 }
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ebf02cc10a43..7f60582d0c8c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -305,6 +305,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
305 305
306ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE); 306ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
307ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE); 307ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
308ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
308 309
309ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm); 310ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
310ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose); 311ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c00e7e41ad75..680d23bbae7c 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -454,6 +454,7 @@ struct acpi_gpe_register_info {
454 u16 base_gpe_number; /* Base GPE number for this register */ 454 u16 base_gpe_number; /* Base GPE number for this register */
455 u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ 455 u8 enable_for_wake; /* GPEs to keep enabled when sleeping */
456 u8 enable_for_run; /* GPEs to keep enabled when running */ 456 u8 enable_for_run; /* GPEs to keep enabled when running */
457 u8 enable_mask; /* Current mask of enabled GPEs */
457}; 458};
458 459
459/* 460/*
@@ -722,6 +723,7 @@ union acpi_parse_value {
722 ACPI_DISASM_ONLY_MEMBERS (\ 723 ACPI_DISASM_ONLY_MEMBERS (\
723 u8 disasm_flags; /* Used during AML disassembly */\ 724 u8 disasm_flags; /* Used during AML disassembly */\
724 u8 disasm_opcode; /* Subtype used for disassembly */\ 725 u8 disasm_opcode; /* Subtype used for disassembly */\
726 char *operator_symbol;/* Used for C-style operator name strings */\
725 char aml_op_name[16]) /* Op name (debug only) */ 727 char aml_op_name[16]) /* Op name (debug only) */
726 728
727/* Flags for disasm_flags field above */ 729/* Flags for disasm_flags field above */
@@ -827,6 +829,8 @@ struct acpi_parse_state {
827#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04 829#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
828#define ACPI_PARSEOP_PREDEF_CHECKED 0x08 830#define ACPI_PARSEOP_PREDEF_CHECKED 0x08
829#define ACPI_PARSEOP_SPECIAL 0x10 831#define ACPI_PARSEOP_SPECIAL 0x10
832#define ACPI_PARSEOP_COMPOUND 0x20
833#define ACPI_PARSEOP_ASSIGNMENT 0x40
830 834
831/***************************************************************************** 835/*****************************************************************************
832 * 836 *
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 2095dfb72bcb..aa70154cf4fa 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -134,7 +134,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
134 134
135 /* Enable the requested GPE */ 135 /* Enable the requested GPE */
136 136
137 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 137 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
138 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
139} 139}
140 140
@@ -213,7 +213,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
213 if (ACPI_SUCCESS(status)) { 213 if (ACPI_SUCCESS(status)) {
214 status = 214 status =
215 acpi_hw_low_set_gpe(gpe_event_info, 215 acpi_hw_low_set_gpe(gpe_event_info,
216 ACPI_GPE_DISABLE); 216 ACPI_GPE_DISABLE_SAVE);
217 } 217 }
218 218
219 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
@@ -616,8 +616,11 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
616static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) 616static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
617{ 617{
618 struct acpi_gpe_event_info *gpe_event_info = context; 618 struct acpi_gpe_event_info *gpe_event_info = context;
619 acpi_cpu_flags flags;
619 620
621 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
620 (void)acpi_ev_finish_gpe(gpe_event_info); 622 (void)acpi_ev_finish_gpe(gpe_event_info);
623 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
621 624
622 ACPI_FREE(gpe_event_info); 625 ACPI_FREE(gpe_event_info);
623 return; 626 return;
@@ -655,7 +658,7 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
655 658
656 /* 659 /*
657 * Enable this GPE, conditionally. This means that the GPE will 660 * Enable this GPE, conditionally. This means that the GPE will
658 * only be physically enabled if the enable_for_run bit is set 661 * only be physically enabled if the enable_mask bit is set
659 * in the event_info. 662 * in the event_info.
660 */ 663 */
661 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); 664 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 48ac7b7b59cd..494027f5c067 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -115,12 +115,12 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
115 /* Set or clear just the bit that corresponds to this GPE */ 115 /* Set or clear just the bit that corresponds to this GPE */
116 116
117 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); 117 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
118 switch (action) { 118 switch (action & ~ACPI_GPE_SAVE_MASK) {
119 case ACPI_GPE_CONDITIONAL_ENABLE: 119 case ACPI_GPE_CONDITIONAL_ENABLE:
120 120
121 /* Only enable if the enable_for_run bit is set */ 121 /* Only enable if the corresponding enable_mask bit is set */
122 122
123 if (!(register_bit & gpe_register_info->enable_for_run)) { 123 if (!(register_bit & gpe_register_info->enable_mask)) {
124 return (AE_BAD_PARAMETER); 124 return (AE_BAD_PARAMETER);
125 } 125 }
126 126
@@ -145,6 +145,9 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
145 /* Write the updated enable mask */ 145 /* Write the updated enable mask */
146 146
147 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); 147 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
148 if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
149 gpe_register_info->enable_mask = enable_mask;
150 }
148 return (status); 151 return (status);
149} 152}
150 153
@@ -262,6 +265,32 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
262 265
263/****************************************************************************** 266/******************************************************************************
264 * 267 *
268 * FUNCTION: acpi_hw_gpe_enable_write
269 *
270 * PARAMETERS: enable_mask - Bit mask to write to the GPE register
271 * gpe_register_info - Gpe Register info
272 *
273 * RETURN: Status
274 *
275 * DESCRIPTION: Write the enable mask byte to the given GPE register.
276 *
277 ******************************************************************************/
278
279static acpi_status
280acpi_hw_gpe_enable_write(u8 enable_mask,
281 struct acpi_gpe_register_info *gpe_register_info)
282{
283 acpi_status status;
284
285 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
286 if (ACPI_SUCCESS(status)) {
287 gpe_register_info->enable_mask = enable_mask;
288 }
289 return (status);
290}
291
292/******************************************************************************
293 *
265 * FUNCTION: acpi_hw_disable_gpe_block 294 * FUNCTION: acpi_hw_disable_gpe_block
266 * 295 *
267 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info 296 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
@@ -287,8 +316,8 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
287 /* Disable all GPEs in this register */ 316 /* Disable all GPEs in this register */
288 317
289 status = 318 status =
290 acpi_hw_write(0x00, 319 acpi_hw_gpe_enable_write(0x00,
291 &gpe_block->register_info[i].enable_address); 320 &gpe_block->register_info[i]);
292 if (ACPI_FAILURE(status)) { 321 if (ACPI_FAILURE(status)) {
293 return (status); 322 return (status);
294 } 323 }
@@ -355,21 +384,23 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
355{ 384{
356 u32 i; 385 u32 i;
357 acpi_status status; 386 acpi_status status;
387 struct acpi_gpe_register_info *gpe_register_info;
358 388
359 /* NOTE: assumes that all GPEs are currently disabled */ 389 /* NOTE: assumes that all GPEs are currently disabled */
360 390
361 /* Examine each GPE Register within the block */ 391 /* Examine each GPE Register within the block */
362 392
363 for (i = 0; i < gpe_block->register_count; i++) { 393 for (i = 0; i < gpe_block->register_count; i++) {
364 if (!gpe_block->register_info[i].enable_for_run) { 394 gpe_register_info = &gpe_block->register_info[i];
395 if (!gpe_register_info->enable_for_run) {
365 continue; 396 continue;
366 } 397 }
367 398
368 /* Enable all "runtime" GPEs in this register */ 399 /* Enable all "runtime" GPEs in this register */
369 400
370 status = 401 status =
371 acpi_hw_write(gpe_block->register_info[i].enable_for_run, 402 acpi_hw_gpe_enable_write(gpe_register_info->enable_for_run,
372 &gpe_block->register_info[i].enable_address); 403 gpe_register_info);
373 if (ACPI_FAILURE(status)) { 404 if (ACPI_FAILURE(status)) {
374 return (status); 405 return (status);
375 } 406 }
@@ -399,10 +430,12 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
399{ 430{
400 u32 i; 431 u32 i;
401 acpi_status status; 432 acpi_status status;
433 struct acpi_gpe_register_info *gpe_register_info;
402 434
403 /* Examine each GPE Register within the block */ 435 /* Examine each GPE Register within the block */
404 436
405 for (i = 0; i < gpe_block->register_count; i++) { 437 for (i = 0; i < gpe_block->register_count; i++) {
438 gpe_register_info = &gpe_block->register_info[i];
406 439
407 /* 440 /*
408 * Enable all "wake" GPEs in this register and disable the 441 * Enable all "wake" GPEs in this register and disable the
@@ -410,8 +443,8 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
410 */ 443 */
411 444
412 status = 445 status =
413 acpi_hw_write(gpe_block->register_info[i].enable_for_wake, 446 acpi_hw_gpe_enable_write(gpe_register_info->enable_for_wake,
414 &gpe_block->register_info[i].enable_address); 447 gpe_register_info);
415 if (ACPI_FAILURE(status)) { 448 if (ACPI_FAILURE(status)) {
416 return (status); 449 return (status);
417 } 450 }
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 5cd017c7ac0e..bc1ff820c7dd 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -263,7 +263,7 @@ const char *acpi_gbl_bpb_decode[] = {
263/* UART serial bus stop bits */ 263/* UART serial bus stop bits */
264 264
265const char *acpi_gbl_sb_decode[] = { 265const char *acpi_gbl_sb_decode[] = {
266 "StopBitsNone", 266 "StopBitsZero",
267 "StopBitsOne", 267 "StopBitsOne",
268 "StopBitsOnePlusHalf", 268 "StopBitsOnePlusHalf",
269 "StopBitsTwo" 269 "StopBitsTwo"
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 502a8492dc83..49c873c68756 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -531,7 +531,9 @@ acpi_decode_pld_buffer(u8 *in_buffer,
531 ACPI_MOVE_32_TO_32(&dword, &buffer[0]); 531 ACPI_MOVE_32_TO_32(&dword, &buffer[0]);
532 pld_info->revision = ACPI_PLD_GET_REVISION(&dword); 532 pld_info->revision = ACPI_PLD_GET_REVISION(&dword);
533 pld_info->ignore_color = ACPI_PLD_GET_IGNORE_COLOR(&dword); 533 pld_info->ignore_color = ACPI_PLD_GET_IGNORE_COLOR(&dword);
534 pld_info->color = ACPI_PLD_GET_COLOR(&dword); 534 pld_info->red = ACPI_PLD_GET_RED(&dword);
535 pld_info->green = ACPI_PLD_GET_GREEN(&dword);
536 pld_info->blue = ACPI_PLD_GET_BLUE(&dword);
535 537
536 /* Second 32-bit DWord */ 538 /* Second 32-bit DWord */
537 539
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 13380d818462..b1fd6886e439 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -53,6 +53,9 @@
53#define _COMPONENT ACPI_UTILITIES 53#define _COMPONENT ACPI_UTILITIES
54ACPI_MODULE_NAME("utxfinit") 54ACPI_MODULE_NAME("utxfinit")
55 55
56/* For acpi_exec only */
57void ae_do_object_overrides(void);
58
56/******************************************************************************* 59/*******************************************************************************
57 * 60 *
58 * FUNCTION: acpi_initialize_subsystem 61 * FUNCTION: acpi_initialize_subsystem
@@ -65,6 +68,7 @@ ACPI_MODULE_NAME("utxfinit")
65 * called, so any early initialization belongs here. 68 * called, so any early initialization belongs here.
66 * 69 *
67 ******************************************************************************/ 70 ******************************************************************************/
71
68acpi_status __init acpi_initialize_subsystem(void) 72acpi_status __init acpi_initialize_subsystem(void)
69{ 73{
70 acpi_status status; 74 acpi_status status;
@@ -275,6 +279,13 @@ acpi_status __init acpi_initialize_objects(u32 flags)
275 return_ACPI_STATUS(status); 279 return_ACPI_STATUS(status);
276 } 280 }
277 } 281 }
282#ifdef ACPI_EXEC_APP
283 /*
284 * This call implements the "initialization file" option for acpi_exec.
285 * This is the precise point that we want to perform the overrides.
286 */
287 ae_do_object_overrides();
288#endif
278 289
279 /* 290 /*
280 * Execute any module-level code that was detected during the table load 291 * Execute any module-level code that was detected during the table load
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index fc5f780bb61d..1b6aa514848f 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -128,7 +128,7 @@ static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
128static struct gen_pool *ghes_estatus_pool; 128static struct gen_pool *ghes_estatus_pool;
129static unsigned long ghes_estatus_pool_size_request; 129static unsigned long ghes_estatus_pool_size_request;
130 130
131struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; 131static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
132static atomic_t ghes_estatus_cache_alloced; 132static atomic_t ghes_estatus_cache_alloced;
133 133
134static int ghes_ioremap_init(void) 134static int ghes_ioremap_init(void)
@@ -738,20 +738,6 @@ static LIST_HEAD(ghes_nmi);
738 738
739static int ghes_panic_timeout __read_mostly = 30; 739static int ghes_panic_timeout __read_mostly = 30;
740 740
741static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
742{
743 struct llist_node *next, *tail = NULL;
744
745 while (llnode) {
746 next = llnode->next;
747 llnode->next = tail;
748 tail = llnode;
749 llnode = next;
750 }
751
752 return tail;
753}
754
755static void ghes_proc_in_irq(struct irq_work *irq_work) 741static void ghes_proc_in_irq(struct irq_work *irq_work)
756{ 742{
757 struct llist_node *llnode, *next; 743 struct llist_node *llnode, *next;
@@ -765,7 +751,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
765 * Because the time order of estatus in list is reversed, 751 * Because the time order of estatus in list is reversed,
766 * revert it back to proper order. 752 * revert it back to proper order.
767 */ 753 */
768 llnode = llist_nodes_reverse(llnode); 754 llnode = llist_reverse_order(llnode);
769 while (llnode) { 755 while (llnode) {
770 next = llnode->next; 756 next = llnode->next;
771 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 757 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -798,7 +784,7 @@ static void ghes_print_queued_estatus(void)
798 * Because the time order of estatus in list is reversed, 784 * Because the time order of estatus in list is reversed,
799 * revert it back to proper order. 785 * revert it back to proper order.
800 */ 786 */
801 llnode = llist_nodes_reverse(llnode); 787 llnode = llist_reverse_order(llnode);
802 while (llnode) { 788 while (llnode) {
803 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 789 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
804 llnode); 790 llnode);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 8ec8a89a20ab..d98ba4355819 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1180,6 +1180,10 @@ static int acpi_battery_add(struct acpi_device *device)
1180 1180
1181 if (!device) 1181 if (!device)
1182 return -EINVAL; 1182 return -EINVAL;
1183
1184 if (device->dep_unmet)
1185 return -EPROBE_DEFER;
1186
1183 battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); 1187 battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
1184 if (!battery) 1188 if (!battery)
1185 return -ENOMEM; 1189 return -ENOMEM;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 7db193160766..897640188acd 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -201,7 +201,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
201 * Transition Power 201 * Transition Power
202 * ---------------- 202 * ----------------
203 * In accordance with the ACPI specification first apply power (via 203 * In accordance with the ACPI specification first apply power (via
204 * power resources) and then evalute _PSx. 204 * power resources) and then evaluate _PSx.
205 */ 205 */
206 if (device->power.flags.power_resources) { 206 if (device->power.flags.power_resources) {
207 result = acpi_power_transition(device, state); 207 result = acpi_power_transition(device, state);
@@ -692,7 +692,6 @@ static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
692 return 0; 692 return 0;
693} 693}
694 694
695#ifdef CONFIG_PM_RUNTIME
696/** 695/**
697 * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. 696 * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
698 * @dev: Device to enable/disable the platform to wake up. 697 * @dev: Device to enable/disable the platform to wake up.
@@ -714,7 +713,6 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
714 return acpi_device_wakeup(adev, ACPI_STATE_S0, enable); 713 return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
715} 714}
716EXPORT_SYMBOL(acpi_pm_device_run_wake); 715EXPORT_SYMBOL(acpi_pm_device_run_wake);
717#endif /* CONFIG_PM_RUNTIME */
718 716
719#ifdef CONFIG_PM_SLEEP 717#ifdef CONFIG_PM_SLEEP
720/** 718/**
@@ -773,7 +771,6 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
773 acpi_device_set_power(adev, ACPI_STATE_D0) : 0; 771 acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
774} 772}
775 773
776#ifdef CONFIG_PM_RUNTIME
777/** 774/**
778 * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. 775 * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
779 * @dev: Device to put into a low-power state. 776 * @dev: Device to put into a low-power state.
@@ -855,7 +852,6 @@ int acpi_subsys_runtime_resume(struct device *dev)
855 return ret ? ret : pm_generic_runtime_resume(dev); 852 return ret ? ret : pm_generic_runtime_resume(dev);
856} 853}
857EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); 854EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
858#endif /* CONFIG_PM_RUNTIME */
859 855
860#ifdef CONFIG_PM_SLEEP 856#ifdef CONFIG_PM_SLEEP
861/** 857/**
@@ -1023,10 +1019,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
1023 1019
1024static struct dev_pm_domain acpi_general_pm_domain = { 1020static struct dev_pm_domain acpi_general_pm_domain = {
1025 .ops = { 1021 .ops = {
1026#ifdef CONFIG_PM_RUNTIME 1022#ifdef CONFIG_PM
1027 .runtime_suspend = acpi_subsys_runtime_suspend, 1023 .runtime_suspend = acpi_subsys_runtime_suspend,
1028 .runtime_resume = acpi_subsys_runtime_resume, 1024 .runtime_resume = acpi_subsys_runtime_resume,
1029#endif
1030#ifdef CONFIG_PM_SLEEP 1025#ifdef CONFIG_PM_SLEEP
1031 .prepare = acpi_subsys_prepare, 1026 .prepare = acpi_subsys_prepare,
1032 .complete = acpi_subsys_complete, 1027 .complete = acpi_subsys_complete,
@@ -1038,6 +1033,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
1038 .poweroff_late = acpi_subsys_suspend_late, 1033 .poweroff_late = acpi_subsys_suspend_late,
1039 .restore_early = acpi_subsys_resume_early, 1034 .restore_early = acpi_subsys_resume_early,
1040#endif 1035#endif
1036#endif
1041 }, 1037 },
1042}; 1038};
1043 1039
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 447f6d679b29..163e82f536fa 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -173,4 +173,10 @@ static inline void suspend_nvs_restore(void) {}
173bool acpi_osi_is_win8(void); 173bool acpi_osi_is_win8(void);
174#endif 174#endif
175 175
176/*--------------------------------------------------------------------------
177 Device properties
178 -------------------------------------------------------------------------- */
179void acpi_init_properties(struct acpi_device *adev);
180void acpi_free_properties(struct acpi_device *adev);
181
176#endif /* _ACPI_INTERNAL_H_ */ 182#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9964f70be98d..f9eeae871593 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -436,7 +436,7 @@ static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
436static void acpi_os_map_cleanup(struct acpi_ioremap *map) 436static void acpi_os_map_cleanup(struct acpi_ioremap *map)
437{ 437{
438 if (!map->refcount) { 438 if (!map->refcount) {
439 synchronize_rcu(); 439 synchronize_rcu_expedited();
440 acpi_unmap(map->phys, map->virt); 440 acpi_unmap(map->phys, map->virt);
441 kfree(map); 441 kfree(map);
442 } 442 }
@@ -1188,6 +1188,12 @@ EXPORT_SYMBOL(acpi_os_execute);
1188 1188
1189void acpi_os_wait_events_complete(void) 1189void acpi_os_wait_events_complete(void)
1190{ 1190{
1191 /*
1192 * Make sure the GPE handler or the fixed event handler is not used
1193 * on another CPU after removal.
1194 */
1195 if (acpi_irq_handler)
1196 synchronize_hardirq(acpi_gbl_FADT.sci_interrupt);
1191 flush_workqueue(kacpid_wq); 1197 flush_workqueue(kacpid_wq);
1192 flush_workqueue(kacpi_notify_wq); 1198 flush_workqueue(kacpi_notify_wq);
1193} 1199}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6e6b80eb0bba..7cc4e33179f9 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -484,7 +484,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
484 /* Keep IOAPIC pin configuration when suspending */ 484 /* Keep IOAPIC pin configuration when suspending */
485 if (dev->dev.power.is_prepared) 485 if (dev->dev.power.is_prepared)
486 return; 486 return;
487#ifdef CONFIG_PM_RUNTIME 487#ifdef CONFIG_PM
488 if (dev->dev.power.runtime_status == RPM_SUSPENDING) 488 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
489 return; 489 return;
490#endif 490#endif
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
new file mode 100644
index 000000000000..a732e5d7e322
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -0,0 +1,354 @@
1/*
2 * intel_pmic.c - Intel PMIC operation region driver
3 *
4 * Copyright (C) 2014 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/acpi.h>
18#include <linux/regmap.h>
19#include "intel_pmic.h"
20
21#define PMIC_POWER_OPREGION_ID 0x8d
22#define PMIC_THERMAL_OPREGION_ID 0x8c
23
24struct acpi_lpat {
25 int temp;
26 int raw;
27};
28
29struct intel_pmic_opregion {
30 struct mutex lock;
31 struct acpi_lpat *lpat;
32 int lpat_count;
33 struct regmap *regmap;
34 struct intel_pmic_opregion_data *data;
35};
36
37static int pmic_get_reg_bit(int address, struct pmic_table *table,
38 int count, int *reg, int *bit)
39{
40 int i;
41
42 for (i = 0; i < count; i++) {
43 if (table[i].address == address) {
44 *reg = table[i].reg;
45 if (bit)
46 *bit = table[i].bit;
47 return 0;
48 }
49 }
50 return -ENOENT;
51}
52
53/**
54 * raw_to_temp(): Return temperature from raw value through LPAT table
55 *
56 * @lpat: the temperature_raw mapping table
57 * @count: the count of the above mapping table
58 * @raw: the raw value, used as a key to get the temerature from the
59 * above mapping table
60 *
61 * A positive value will be returned on success, a negative errno will
62 * be returned in error cases.
63 */
64static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw)
65{
66 int i, delta_temp, delta_raw, temp;
67
68 for (i = 0; i < count - 1; i++) {
69 if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
70 (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
71 break;
72 }
73
74 if (i == count - 1)
75 return -ENOENT;
76
77 delta_temp = lpat[i+1].temp - lpat[i].temp;
78 delta_raw = lpat[i+1].raw - lpat[i].raw;
79 temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
80
81 return temp;
82}
83
84/**
85 * temp_to_raw(): Return raw value from temperature through LPAT table
86 *
87 * @lpat: the temperature_raw mapping table
88 * @count: the count of the above mapping table
89 * @temp: the temperature, used as a key to get the raw value from the
90 * above mapping table
91 *
92 * A positive value will be returned on success, a negative errno will
93 * be returned in error cases.
94 */
95static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp)
96{
97 int i, delta_temp, delta_raw, raw;
98
99 for (i = 0; i < count - 1; i++) {
100 if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
101 break;
102 }
103
104 if (i == count - 1)
105 return -ENOENT;
106
107 delta_temp = lpat[i+1].temp - lpat[i].temp;
108 delta_raw = lpat[i+1].raw - lpat[i].raw;
109 raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
110
111 return raw;
112}
113
114static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion,
115 acpi_handle handle, struct device *dev)
116{
117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 union acpi_object *obj_p, *obj_e;
119 int *lpat, i;
120 acpi_status status;
121
122 status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
123 if (ACPI_FAILURE(status))
124 return;
125
126 obj_p = (union acpi_object *)buffer.pointer;
127 if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
128 (obj_p->package.count % 2) || (obj_p->package.count < 4))
129 goto out;
130
131 lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count,
132 GFP_KERNEL);
133 if (!lpat)
134 goto out;
135
136 for (i = 0; i < obj_p->package.count; i++) {
137 obj_e = &obj_p->package.elements[i];
138 if (obj_e->type != ACPI_TYPE_INTEGER) {
139 devm_kfree(dev, lpat);
140 goto out;
141 }
142 lpat[i] = (s64)obj_e->integer.value;
143 }
144
145 opregion->lpat = (struct acpi_lpat *)lpat;
146 opregion->lpat_count = obj_p->package.count / 2;
147
148out:
149 kfree(buffer.pointer);
150}
151
152static acpi_status intel_pmic_power_handler(u32 function,
153 acpi_physical_address address, u32 bits, u64 *value64,
154 void *handler_context, void *region_context)
155{
156 struct intel_pmic_opregion *opregion = region_context;
157 struct regmap *regmap = opregion->regmap;
158 struct intel_pmic_opregion_data *d = opregion->data;
159 int reg, bit, result;
160
161 if (bits != 32 || !value64)
162 return AE_BAD_PARAMETER;
163
164 if (function == ACPI_WRITE && !(*value64 == 0 || *value64 == 1))
165 return AE_BAD_PARAMETER;
166
167 result = pmic_get_reg_bit(address, d->power_table,
168 d->power_table_count, &reg, &bit);
169 if (result == -ENOENT)
170 return AE_BAD_PARAMETER;
171
172 mutex_lock(&opregion->lock);
173
174 result = function == ACPI_READ ?
175 d->get_power(regmap, reg, bit, value64) :
176 d->update_power(regmap, reg, bit, *value64 == 1);
177
178 mutex_unlock(&opregion->lock);
179
180 return result ? AE_ERROR : AE_OK;
181}
182
183static int pmic_read_temp(struct intel_pmic_opregion *opregion,
184 int reg, u64 *value)
185{
186 int raw_temp, temp;
187
188 if (!opregion->data->get_raw_temp)
189 return -ENXIO;
190
191 raw_temp = opregion->data->get_raw_temp(opregion->regmap, reg);
192 if (raw_temp < 0)
193 return raw_temp;
194
195 if (!opregion->lpat) {
196 *value = raw_temp;
197 return 0;
198 }
199
200 temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp);
201 if (temp < 0)
202 return temp;
203
204 *value = temp;
205 return 0;
206}
207
208static int pmic_thermal_temp(struct intel_pmic_opregion *opregion, int reg,
209 u32 function, u64 *value)
210{
211 return function == ACPI_READ ?
212 pmic_read_temp(opregion, reg, value) : -EINVAL;
213}
214
215static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
216 u32 function, u64 *value)
217{
218 int raw_temp;
219
220 if (function == ACPI_READ)
221 return pmic_read_temp(opregion, reg, value);
222
223 if (!opregion->data->update_aux)
224 return -ENXIO;
225
226 if (opregion->lpat) {
227 raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count,
228 *value);
229 if (raw_temp < 0)
230 return raw_temp;
231 } else {
232 raw_temp = *value;
233 }
234
235 return opregion->data->update_aux(opregion->regmap, reg, raw_temp);
236}
237
238static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
239 u32 function, u64 *value)
240{
241 struct intel_pmic_opregion_data *d = opregion->data;
242 struct regmap *regmap = opregion->regmap;
243
244 if (!d->get_policy || !d->update_policy)
245 return -ENXIO;
246
247 if (function == ACPI_READ)
248 return d->get_policy(regmap, reg, value);
249
250 if (*value != 0 && *value != 1)
251 return -EINVAL;
252
253 return d->update_policy(regmap, reg, *value);
254}
255
256static bool pmic_thermal_is_temp(int address)
257{
258 return (address <= 0x3c) && !(address % 12);
259}
260
261static bool pmic_thermal_is_aux(int address)
262{
263 return (address >= 4 && address <= 0x40 && !((address - 4) % 12)) ||
264 (address >= 8 && address <= 0x44 && !((address - 8) % 12));
265}
266
267static bool pmic_thermal_is_pen(int address)
268{
269 return address >= 0x48 && address <= 0x5c;
270}
271
272static acpi_status intel_pmic_thermal_handler(u32 function,
273 acpi_physical_address address, u32 bits, u64 *value64,
274 void *handler_context, void *region_context)
275{
276 struct intel_pmic_opregion *opregion = region_context;
277 struct intel_pmic_opregion_data *d = opregion->data;
278 int reg, result;
279
280 if (bits != 32 || !value64)
281 return AE_BAD_PARAMETER;
282
283 result = pmic_get_reg_bit(address, d->thermal_table,
284 d->thermal_table_count, &reg, NULL);
285 if (result == -ENOENT)
286 return AE_BAD_PARAMETER;
287
288 mutex_lock(&opregion->lock);
289
290 if (pmic_thermal_is_temp(address))
291 result = pmic_thermal_temp(opregion, reg, function, value64);
292 else if (pmic_thermal_is_aux(address))
293 result = pmic_thermal_aux(opregion, reg, function, value64);
294 else if (pmic_thermal_is_pen(address))
295 result = pmic_thermal_pen(opregion, reg, function, value64);
296 else
297 result = -EINVAL;
298
299 mutex_unlock(&opregion->lock);
300
301 if (result < 0) {
302 if (result == -EINVAL)
303 return AE_BAD_PARAMETER;
304 else
305 return AE_ERROR;
306 }
307
308 return AE_OK;
309}
310
311int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
312 struct regmap *regmap,
313 struct intel_pmic_opregion_data *d)
314{
315 acpi_status status;
316 struct intel_pmic_opregion *opregion;
317
318 if (!dev || !regmap || !d)
319 return -EINVAL;
320
321 if (!handle)
322 return -ENODEV;
323
324 opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL);
325 if (!opregion)
326 return -ENOMEM;
327
328 mutex_init(&opregion->lock);
329 opregion->regmap = regmap;
330 pmic_thermal_lpat(opregion, handle, dev);
331
332 status = acpi_install_address_space_handler(handle,
333 PMIC_POWER_OPREGION_ID,
334 intel_pmic_power_handler,
335 NULL, opregion);
336 if (ACPI_FAILURE(status))
337 return -ENODEV;
338
339 status = acpi_install_address_space_handler(handle,
340 PMIC_THERMAL_OPREGION_ID,
341 intel_pmic_thermal_handler,
342 NULL, opregion);
343 if (ACPI_FAILURE(status)) {
344 acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
345 intel_pmic_power_handler);
346 return -ENODEV;
347 }
348
349 opregion->data = d;
350 return 0;
351}
352EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
353
354MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
new file mode 100644
index 000000000000..d4e90af8f0dd
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -0,0 +1,25 @@
1#ifndef __INTEL_PMIC_H
2#define __INTEL_PMIC_H
3
4struct pmic_table {
5 int address; /* operation region address */
6 int reg; /* corresponding thermal register */
7 int bit; /* control bit for power */
8};
9
10struct intel_pmic_opregion_data {
11 int (*get_power)(struct regmap *r, int reg, int bit, u64 *value);
12 int (*update_power)(struct regmap *r, int reg, int bit, bool on);
13 int (*get_raw_temp)(struct regmap *r, int reg);
14 int (*update_aux)(struct regmap *r, int reg, int raw_temp);
15 int (*get_policy)(struct regmap *r, int reg, u64 *value);
16 int (*update_policy)(struct regmap *r, int reg, int enable);
17 struct pmic_table *power_table;
18 int power_table_count;
19 struct pmic_table *thermal_table;
20 int thermal_table_count;
21};
22
23int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
24
25#endif
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
new file mode 100644
index 000000000000..ef7d8ff95abe
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -0,0 +1,211 @@
1/*
2 * intel_pmic_crc.c - Intel CrystalCove PMIC operation region driver
3 *
4 * Copyright (C) 2014 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/acpi.h>
18#include <linux/mfd/intel_soc_pmic.h>
19#include <linux/regmap.h>
20#include <linux/platform_device.h>
21#include "intel_pmic.h"
22
23#define PWR_SOURCE_SELECT BIT(1)
24
25#define PMIC_A0LOCK_REG 0xc5
26
27static struct pmic_table power_table[] = {
28 {
29 .address = 0x24,
30 .reg = 0x66,
31 .bit = 0x00,
32 },
33 {
34 .address = 0x48,
35 .reg = 0x5d,
36 .bit = 0x00,
37 },
38};
39
40static struct pmic_table thermal_table[] = {
41 {
42 .address = 0x00,
43 .reg = 0x75
44 },
45 {
46 .address = 0x04,
47 .reg = 0x95
48 },
49 {
50 .address = 0x08,
51 .reg = 0x97
52 },
53 {
54 .address = 0x0c,
55 .reg = 0x77
56 },
57 {
58 .address = 0x10,
59 .reg = 0x9a
60 },
61 {
62 .address = 0x14,
63 .reg = 0x9c
64 },
65 {
66 .address = 0x18,
67 .reg = 0x79
68 },
69 {
70 .address = 0x1c,
71 .reg = 0x9f
72 },
73 {
74 .address = 0x20,
75 .reg = 0xa1
76 },
77 {
78 .address = 0x48,
79 .reg = 0x94
80 },
81 {
82 .address = 0x4c,
83 .reg = 0x99
84 },
85 {
86 .address = 0x50,
87 .reg = 0x9e
88 },
89};
90
91static int intel_crc_pmic_get_power(struct regmap *regmap, int reg,
92 int bit, u64 *value)
93{
94 int data;
95
96 if (regmap_read(regmap, reg, &data))
97 return -EIO;
98
99 *value = (data & PWR_SOURCE_SELECT) && (data & BIT(bit)) ? 1 : 0;
100 return 0;
101}
102
103static int intel_crc_pmic_update_power(struct regmap *regmap, int reg,
104 int bit, bool on)
105{
106 int data;
107
108 if (regmap_read(regmap, reg, &data))
109 return -EIO;
110
111 if (on) {
112 data |= PWR_SOURCE_SELECT | BIT(bit);
113 } else {
114 data &= ~BIT(bit);
115 data |= PWR_SOURCE_SELECT;
116 }
117
118 if (regmap_write(regmap, reg, data))
119 return -EIO;
120 return 0;
121}
122
123static int intel_crc_pmic_get_raw_temp(struct regmap *regmap, int reg)
124{
125 int temp_l, temp_h;
126
127 /*
128 * Raw temperature value is 10bits: 8bits in reg
129 * and 2bits in reg-1: bit0,1
130 */
131 if (regmap_read(regmap, reg, &temp_l) ||
132 regmap_read(regmap, reg - 1, &temp_h))
133 return -EIO;
134
135 return temp_l | (temp_h & 0x3) << 8;
136}
137
138static int intel_crc_pmic_update_aux(struct regmap *regmap, int reg, int raw)
139{
140 return regmap_write(regmap, reg, raw) ||
141 regmap_update_bits(regmap, reg - 1, 0x3, raw >> 8) ? -EIO : 0;
142}
143
144static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value)
145{
146 int pen;
147
148 if (regmap_read(regmap, reg, &pen))
149 return -EIO;
150 *value = pen >> 7;
151 return 0;
152}
153
154static int intel_crc_pmic_update_policy(struct regmap *regmap,
155 int reg, int enable)
156{
157 int alert0;
158
159 /* Update to policy enable bit requires unlocking a0lock */
160 if (regmap_read(regmap, PMIC_A0LOCK_REG, &alert0))
161 return -EIO;
162
163 if (regmap_update_bits(regmap, PMIC_A0LOCK_REG, 0x01, 0))
164 return -EIO;
165
166 if (regmap_update_bits(regmap, reg, 0x80, enable << 7))
167 return -EIO;
168
169 /* restore alert0 */
170 if (regmap_write(regmap, PMIC_A0LOCK_REG, alert0))
171 return -EIO;
172
173 return 0;
174}
175
176static struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
177 .get_power = intel_crc_pmic_get_power,
178 .update_power = intel_crc_pmic_update_power,
179 .get_raw_temp = intel_crc_pmic_get_raw_temp,
180 .update_aux = intel_crc_pmic_update_aux,
181 .get_policy = intel_crc_pmic_get_policy,
182 .update_policy = intel_crc_pmic_update_policy,
183 .power_table = power_table,
184 .power_table_count= ARRAY_SIZE(power_table),
185 .thermal_table = thermal_table,
186 .thermal_table_count = ARRAY_SIZE(thermal_table),
187};
188
189static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
190{
191 struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
192 return intel_pmic_install_opregion_handler(&pdev->dev,
193 ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
194 &intel_crc_pmic_opregion_data);
195}
196
197static struct platform_driver intel_crc_pmic_opregion_driver = {
198 .probe = intel_crc_pmic_opregion_probe,
199 .driver = {
200 .name = "crystal_cove_pmic",
201 },
202};
203
204static int __init intel_crc_pmic_opregion_driver_init(void)
205{
206 return platform_driver_register(&intel_crc_pmic_opregion_driver);
207}
208module_init(intel_crc_pmic_opregion_driver_init);
209
210MODULE_DESCRIPTION("CrystalCove ACPI opration region driver");
211MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
new file mode 100644
index 000000000000..6a082d4de12c
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -0,0 +1,268 @@
1/*
2 * intel_pmic_xpower.c - XPower AXP288 PMIC operation region driver
3 *
4 * Copyright (C) 2014 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/acpi.h>
18#include <linux/mfd/axp20x.h>
19#include <linux/regmap.h>
20#include <linux/platform_device.h>
21#include <linux/iio/consumer.h>
22#include "intel_pmic.h"
23
24#define XPOWER_GPADC_LOW 0x5b
25
26static struct pmic_table power_table[] = {
27 {
28 .address = 0x00,
29 .reg = 0x13,
30 .bit = 0x05,
31 },
32 {
33 .address = 0x04,
34 .reg = 0x13,
35 .bit = 0x06,
36 },
37 {
38 .address = 0x08,
39 .reg = 0x13,
40 .bit = 0x07,
41 },
42 {
43 .address = 0x0c,
44 .reg = 0x12,
45 .bit = 0x03,
46 },
47 {
48 .address = 0x10,
49 .reg = 0x12,
50 .bit = 0x04,
51 },
52 {
53 .address = 0x14,
54 .reg = 0x12,
55 .bit = 0x05,
56 },
57 {
58 .address = 0x18,
59 .reg = 0x12,
60 .bit = 0x06,
61 },
62 {
63 .address = 0x1c,
64 .reg = 0x12,
65 .bit = 0x00,
66 },
67 {
68 .address = 0x20,
69 .reg = 0x12,
70 .bit = 0x01,
71 },
72 {
73 .address = 0x24,
74 .reg = 0x12,
75 .bit = 0x02,
76 },
77 {
78 .address = 0x28,
79 .reg = 0x13,
80 .bit = 0x02,
81 },
82 {
83 .address = 0x2c,
84 .reg = 0x13,
85 .bit = 0x03,
86 },
87 {
88 .address = 0x30,
89 .reg = 0x13,
90 .bit = 0x04,
91 },
92 {
93 .address = 0x38,
94 .reg = 0x10,
95 .bit = 0x03,
96 },
97 {
98 .address = 0x3c,
99 .reg = 0x10,
100 .bit = 0x06,
101 },
102 {
103 .address = 0x40,
104 .reg = 0x10,
105 .bit = 0x05,
106 },
107 {
108 .address = 0x44,
109 .reg = 0x10,
110 .bit = 0x04,
111 },
112 {
113 .address = 0x48,
114 .reg = 0x10,
115 .bit = 0x01,
116 },
117 {
118 .address = 0x4c,
119 .reg = 0x10,
120 .bit = 0x00
121 },
122};
123
124/* TMP0 - TMP5 are the same, all from GPADC */
125static struct pmic_table thermal_table[] = {
126 {
127 .address = 0x00,
128 .reg = XPOWER_GPADC_LOW
129 },
130 {
131 .address = 0x0c,
132 .reg = XPOWER_GPADC_LOW
133 },
134 {
135 .address = 0x18,
136 .reg = XPOWER_GPADC_LOW
137 },
138 {
139 .address = 0x24,
140 .reg = XPOWER_GPADC_LOW
141 },
142 {
143 .address = 0x30,
144 .reg = XPOWER_GPADC_LOW
145 },
146 {
147 .address = 0x3c,
148 .reg = XPOWER_GPADC_LOW
149 },
150};
151
152static int intel_xpower_pmic_get_power(struct regmap *regmap, int reg,
153 int bit, u64 *value)
154{
155 int data;
156
157 if (regmap_read(regmap, reg, &data))
158 return -EIO;
159
160 *value = (data & BIT(bit)) ? 1 : 0;
161 return 0;
162}
163
164static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
165 int bit, bool on)
166{
167 int data;
168
169 if (regmap_read(regmap, reg, &data))
170 return -EIO;
171
172 if (on)
173 data |= BIT(bit);
174 else
175 data &= ~BIT(bit);
176
177 if (regmap_write(regmap, reg, data))
178 return -EIO;
179
180 return 0;
181}
182
183/**
184 * intel_xpower_pmic_get_raw_temp(): Get raw temperature reading from the PMIC
185 *
186 * @regmap: regmap of the PMIC device
187 * @reg: register to get the reading
188 *
189 * We could get the sensor value by manipulating the HW regs here, but since
190 * the axp288 IIO driver may also access the same regs at the same time, the
191 * APIs provided by IIO subsystem are used here instead to avoid problems. As
192 * a result, the two passed in params are of no actual use.
193 *
194 * Return a positive value on success, errno on failure.
195 */
196static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
197{
198 struct iio_channel *gpadc_chan;
199 int ret, val;
200
201 gpadc_chan = iio_channel_get(NULL, "axp288-system-temp");
202 if (IS_ERR_OR_NULL(gpadc_chan))
203 return -EACCES;
204
205 ret = iio_read_channel_raw(gpadc_chan, &val);
206 if (ret < 0)
207 val = ret;
208
209 iio_channel_release(gpadc_chan);
210 return val;
211}
212
213static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
214 .get_power = intel_xpower_pmic_get_power,
215 .update_power = intel_xpower_pmic_update_power,
216 .get_raw_temp = intel_xpower_pmic_get_raw_temp,
217 .power_table = power_table,
218 .power_table_count = ARRAY_SIZE(power_table),
219 .thermal_table = thermal_table,
220 .thermal_table_count = ARRAY_SIZE(thermal_table),
221};
222
223static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
224 acpi_physical_address address, u32 bit_width, u64 *value,
225 void *handler_context, void *region_context)
226{
227 return AE_OK;
228}
229
230static int intel_xpower_pmic_opregion_probe(struct platform_device *pdev)
231{
232 struct device *parent = pdev->dev.parent;
233 struct axp20x_dev *axp20x = dev_get_drvdata(parent);
234 acpi_status status;
235 int result;
236
237 status = acpi_install_address_space_handler(ACPI_HANDLE(parent),
238 ACPI_ADR_SPACE_GPIO, intel_xpower_pmic_gpio_handler,
239 NULL, NULL);
240 if (ACPI_FAILURE(status))
241 return -ENODEV;
242
243 result = intel_pmic_install_opregion_handler(&pdev->dev,
244 ACPI_HANDLE(parent), axp20x->regmap,
245 &intel_xpower_pmic_opregion_data);
246 if (result)
247 acpi_remove_address_space_handler(ACPI_HANDLE(parent),
248 ACPI_ADR_SPACE_GPIO,
249 intel_xpower_pmic_gpio_handler);
250
251 return result;
252}
253
254static struct platform_driver intel_xpower_pmic_opregion_driver = {
255 .probe = intel_xpower_pmic_opregion_probe,
256 .driver = {
257 .name = "axp288_pmic_acpi",
258 },
259};
260
261static int __init intel_xpower_pmic_opregion_driver_init(void)
262{
263 return platform_driver_register(&intel_xpower_pmic_opregion_driver);
264}
265module_init(intel_xpower_pmic_opregion_driver_init);
266
267MODULE_DESCRIPTION("XPower AXP288 ACPI operation region driver");
268MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 17f9ec501972..499536504698 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -334,10 +334,10 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
334 334
335static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 335static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
336{ 336{
337 acpi_status status = 0; 337 acpi_status status;
338 u64 count; 338 u64 count;
339 int current_count; 339 int current_count;
340 int i; 340 int i, ret = 0;
341 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 341 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
342 union acpi_object *cst; 342 union acpi_object *cst;
343 343
@@ -358,7 +358,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
358 /* There must be at least 2 elements */ 358 /* There must be at least 2 elements */
359 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 359 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
360 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 360 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
361 status = -EFAULT; 361 ret = -EFAULT;
362 goto end; 362 goto end;
363 } 363 }
364 364
@@ -367,7 +367,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
367 /* Validate number of power states. */ 367 /* Validate number of power states. */
368 if (count < 1 || count != cst->package.count - 1) { 368 if (count < 1 || count != cst->package.count - 1) {
369 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 369 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
370 status = -EFAULT; 370 ret = -EFAULT;
371 goto end; 371 goto end;
372 } 372 }
373 373
@@ -489,12 +489,12 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
489 489
490 /* Validate number of power states discovered */ 490 /* Validate number of power states discovered */
491 if (current_count < 2) 491 if (current_count < 2)
492 status = -EFAULT; 492 ret = -EFAULT;
493 493
494 end: 494 end:
495 kfree(buffer.pointer); 495 kfree(buffer.pointer);
496 496
497 return status; 497 return ret;
498} 498}
499 499
500static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 500static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -985,8 +985,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
985 state->flags = 0; 985 state->flags = 0;
986 switch (cx->type) { 986 switch (cx->type) {
987 case ACPI_STATE_C1: 987 case ACPI_STATE_C1:
988 if (cx->entry_method == ACPI_CSTATE_FFH) 988 if (cx->entry_method != ACPI_CSTATE_FFH)
989 state->flags |= CPUIDLE_FLAG_TIME_VALID; 989 state->flags |= CPUIDLE_FLAG_TIME_INVALID;
990 990
991 state->enter = acpi_idle_enter_c1; 991 state->enter = acpi_idle_enter_c1;
992 state->enter_dead = acpi_idle_play_dead; 992 state->enter_dead = acpi_idle_play_dead;
@@ -994,14 +994,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
994 break; 994 break;
995 995
996 case ACPI_STATE_C2: 996 case ACPI_STATE_C2:
997 state->flags |= CPUIDLE_FLAG_TIME_VALID;
998 state->enter = acpi_idle_enter_simple; 997 state->enter = acpi_idle_enter_simple;
999 state->enter_dead = acpi_idle_play_dead; 998 state->enter_dead = acpi_idle_play_dead;
1000 drv->safe_state_index = count; 999 drv->safe_state_index = count;
1001 break; 1000 break;
1002 1001
1003 case ACPI_STATE_C3: 1002 case ACPI_STATE_C3:
1004 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1005 state->enter = pr->flags.bm_check ? 1003 state->enter = pr->flags.bm_check ?
1006 acpi_idle_enter_bm : 1004 acpi_idle_enter_bm :
1007 acpi_idle_enter_simple; 1005 acpi_idle_enter_simple;
@@ -1111,7 +1109,7 @@ static int acpi_processor_registered;
1111 1109
1112int acpi_processor_power_init(struct acpi_processor *pr) 1110int acpi_processor_power_init(struct acpi_processor *pr)
1113{ 1111{
1114 acpi_status status = 0; 1112 acpi_status status;
1115 int retval; 1113 int retval;
1116 struct cpuidle_device *dev; 1114 struct cpuidle_device *dev;
1117 static int first_run; 1115 static int first_run;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
new file mode 100644
index 000000000000..0d083736e25b
--- /dev/null
+++ b/drivers/acpi/property.c
@@ -0,0 +1,551 @@
1/*
2 * ACPI device specific properties support.
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * All rights reserved.
6 *
7 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
8 * Darren Hart <dvhart@linux.intel.com>
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/acpi.h>
17#include <linux/device.h>
18#include <linux/export.h>
19
20#include "internal.h"
21
22/* ACPI _DSD device properties UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
23static const u8 prp_uuid[16] = {
24 0x14, 0xd8, 0xff, 0xda, 0xba, 0x6e, 0x8c, 0x4d,
25 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01
26};
27
28static bool acpi_property_value_ok(const union acpi_object *value)
29{
30 int j;
31
32 /*
33 * The value must be an integer, a string, a reference, or a package
34 * whose every element must be an integer, a string, or a reference.
35 */
36 switch (value->type) {
37 case ACPI_TYPE_INTEGER:
38 case ACPI_TYPE_STRING:
39 case ACPI_TYPE_LOCAL_REFERENCE:
40 return true;
41
42 case ACPI_TYPE_PACKAGE:
43 for (j = 0; j < value->package.count; j++)
44 switch (value->package.elements[j].type) {
45 case ACPI_TYPE_INTEGER:
46 case ACPI_TYPE_STRING:
47 case ACPI_TYPE_LOCAL_REFERENCE:
48 continue;
49
50 default:
51 return false;
52 }
53
54 return true;
55 }
56 return false;
57}
58
59static bool acpi_properties_format_valid(const union acpi_object *properties)
60{
61 int i;
62
63 for (i = 0; i < properties->package.count; i++) {
64 const union acpi_object *property;
65
66 property = &properties->package.elements[i];
67 /*
68 * Only two elements allowed, the first one must be a string and
69 * the second one has to satisfy certain conditions.
70 */
71 if (property->package.count != 2
72 || property->package.elements[0].type != ACPI_TYPE_STRING
73 || !acpi_property_value_ok(&property->package.elements[1]))
74 return false;
75 }
76 return true;
77}
78
79static void acpi_init_of_compatible(struct acpi_device *adev)
80{
81 const union acpi_object *of_compatible;
82 struct acpi_hardware_id *hwid;
83 bool acpi_of = false;
84 int ret;
85
86 /*
87 * Check if the special PRP0001 ACPI ID is present and in that
88 * case we fill in Device Tree compatible properties for this
89 * device.
90 */
91 list_for_each_entry(hwid, &adev->pnp.ids, list) {
92 if (!strcmp(hwid->id, "PRP0001")) {
93 acpi_of = true;
94 break;
95 }
96 }
97
98 if (!acpi_of)
99 return;
100
101 ret = acpi_dev_get_property_array(adev, "compatible", ACPI_TYPE_STRING,
102 &of_compatible);
103 if (ret) {
104 ret = acpi_dev_get_property(adev, "compatible",
105 ACPI_TYPE_STRING, &of_compatible);
106 if (ret) {
107 acpi_handle_warn(adev->handle,
108 "PRP0001 requires compatible property\n");
109 return;
110 }
111 }
112 adev->data.of_compatible = of_compatible;
113}
114
115void acpi_init_properties(struct acpi_device *adev)
116{
117 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
118 const union acpi_object *desc;
119 acpi_status status;
120 int i;
121
122 status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf,
123 ACPI_TYPE_PACKAGE);
124 if (ACPI_FAILURE(status))
125 return;
126
127 desc = buf.pointer;
128 if (desc->package.count % 2)
129 goto fail;
130
131 /* Look for the device properties UUID. */
132 for (i = 0; i < desc->package.count; i += 2) {
133 const union acpi_object *uuid, *properties;
134
135 uuid = &desc->package.elements[i];
136 properties = &desc->package.elements[i + 1];
137
138 /*
139 * The first element must be a UUID and the second one must be
140 * a package.
141 */
142 if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
143 || properties->type != ACPI_TYPE_PACKAGE)
144 break;
145
146 if (memcmp(uuid->buffer.pointer, prp_uuid, sizeof(prp_uuid)))
147 continue;
148
149 /*
150 * We found the matching UUID. Now validate the format of the
151 * package immediately following it.
152 */
153 if (!acpi_properties_format_valid(properties))
154 break;
155
156 adev->data.pointer = buf.pointer;
157 adev->data.properties = properties;
158
159 acpi_init_of_compatible(adev);
160 return;
161 }
162
163 fail:
164 dev_warn(&adev->dev, "Returned _DSD data is not valid, skipping\n");
165 ACPI_FREE(buf.pointer);
166}
167
168void acpi_free_properties(struct acpi_device *adev)
169{
170 ACPI_FREE((void *)adev->data.pointer);
171 adev->data.of_compatible = NULL;
172 adev->data.pointer = NULL;
173 adev->data.properties = NULL;
174}
175
176/**
177 * acpi_dev_get_property - return an ACPI property with given name
178 * @adev: ACPI device to get property
179 * @name: Name of the property
180 * @type: Expected property type
181 * @obj: Location to store the property value (if not %NULL)
182 *
183 * Look up a property with @name and store a pointer to the resulting ACPI
184 * object at the location pointed to by @obj if found.
185 *
186 * Callers must not attempt to free the returned objects. These objects will be
187 * freed by the ACPI core automatically during the removal of @adev.
188 *
189 * Return: %0 if property with @name has been found (success),
190 * %-EINVAL if the arguments are invalid,
191 * %-ENODATA if the property doesn't exist,
192 * %-EPROTO if the property value type doesn't match @type.
193 */
194int acpi_dev_get_property(struct acpi_device *adev, const char *name,
195 acpi_object_type type, const union acpi_object **obj)
196{
197 const union acpi_object *properties;
198 int i;
199
200 if (!adev || !name)
201 return -EINVAL;
202
203 if (!adev->data.pointer || !adev->data.properties)
204 return -ENODATA;
205
206 properties = adev->data.properties;
207 for (i = 0; i < properties->package.count; i++) {
208 const union acpi_object *propname, *propvalue;
209 const union acpi_object *property;
210
211 property = &properties->package.elements[i];
212
213 propname = &property->package.elements[0];
214 propvalue = &property->package.elements[1];
215
216 if (!strcmp(name, propname->string.pointer)) {
217 if (type != ACPI_TYPE_ANY && propvalue->type != type)
218 return -EPROTO;
219 else if (obj)
220 *obj = propvalue;
221
222 return 0;
223 }
224 }
225 return -ENODATA;
226}
227EXPORT_SYMBOL_GPL(acpi_dev_get_property);
228
229/**
230 * acpi_dev_get_property_array - return an ACPI array property with given name
231 * @adev: ACPI device to get property
232 * @name: Name of the property
233 * @type: Expected type of array elements
234 * @obj: Location to store a pointer to the property value (if not NULL)
235 *
236 * Look up an array property with @name and store a pointer to the resulting
237 * ACPI object at the location pointed to by @obj if found.
238 *
239 * Callers must not attempt to free the returned objects. Those objects will be
240 * freed by the ACPI core automatically during the removal of @adev.
241 *
242 * Return: %0 if array property (package) with @name has been found (success),
243 * %-EINVAL if the arguments are invalid,
244 * %-ENODATA if the property doesn't exist,
245 * %-EPROTO if the property is not a package or the type of its elements
246 * doesn't match @type.
247 */
248int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
249 acpi_object_type type,
250 const union acpi_object **obj)
251{
252 const union acpi_object *prop;
253 int ret, i;
254
255 ret = acpi_dev_get_property(adev, name, ACPI_TYPE_PACKAGE, &prop);
256 if (ret)
257 return ret;
258
259 if (type != ACPI_TYPE_ANY) {
260 /* Check that all elements are of correct type. */
261 for (i = 0; i < prop->package.count; i++)
262 if (prop->package.elements[i].type != type)
263 return -EPROTO;
264 }
265 if (obj)
266 *obj = prop;
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(acpi_dev_get_property_array);
271
272/**
273 * acpi_dev_get_property_reference - returns handle to the referenced object
274 * @adev: ACPI device to get property
275 * @name: Name of the property
276 * @index: Index of the reference to return
277 * @args: Location to store the returned reference with optional arguments
278 *
279 * Find property with @name, verifify that it is a package containing at least
280 * one object reference and if so, store the ACPI device object pointer to the
281 * target object in @args->adev. If the reference includes arguments, store
282 * them in the @args->args[] array.
283 *
284 * If there's more than one reference in the property value package, @index is
285 * used to select the one to return.
286 *
287 * Return: %0 on success, negative error code on failure.
288 */
289int acpi_dev_get_property_reference(struct acpi_device *adev,
290 const char *name, size_t index,
291 struct acpi_reference_args *args)
292{
293 const union acpi_object *element, *end;
294 const union acpi_object *obj;
295 struct acpi_device *device;
296 int ret, idx = 0;
297
298 ret = acpi_dev_get_property(adev, name, ACPI_TYPE_ANY, &obj);
299 if (ret)
300 return ret;
301
302 /*
303 * The simplest case is when the value is a single reference. Just
304 * return that reference then.
305 */
306 if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
307 if (index)
308 return -EINVAL;
309
310 ret = acpi_bus_get_device(obj->reference.handle, &device);
311 if (ret)
312 return ret;
313
314 args->adev = device;
315 args->nargs = 0;
316 return 0;
317 }
318
319 /*
320 * If it is not a single reference, then it is a package of
321 * references followed by number of ints as follows:
322 *
323 * Package () { REF, INT, REF, INT, INT }
324 *
325 * The index argument is then used to determine which reference
326 * the caller wants (along with the arguments).
327 */
328 if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
329 return -EPROTO;
330
331 element = obj->package.elements;
332 end = element + obj->package.count;
333
334 while (element < end) {
335 u32 nargs, i;
336
337 if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
338 return -EPROTO;
339
340 ret = acpi_bus_get_device(element->reference.handle, &device);
341 if (ret)
342 return -ENODEV;
343
344 element++;
345 nargs = 0;
346
347 /* assume following integer elements are all args */
348 for (i = 0; element + i < end; i++) {
349 int type = element[i].type;
350
351 if (type == ACPI_TYPE_INTEGER)
352 nargs++;
353 else if (type == ACPI_TYPE_LOCAL_REFERENCE)
354 break;
355 else
356 return -EPROTO;
357 }
358
359 if (idx++ == index) {
360 args->adev = device;
361 args->nargs = nargs;
362 for (i = 0; i < nargs; i++)
363 args->args[i] = element[i].integer.value;
364
365 return 0;
366 }
367
368 element += nargs;
369 }
370
371 return -EPROTO;
372}
373EXPORT_SYMBOL_GPL(acpi_dev_get_property_reference);
374
375int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
376 void **valptr)
377{
378 return acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY,
379 (const union acpi_object **)valptr);
380}
381
382int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
383 enum dev_prop_type proptype, void *val)
384{
385 const union acpi_object *obj;
386 int ret;
387
388 if (!val)
389 return -EINVAL;
390
391 if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
392 ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_INTEGER, &obj);
393 if (ret)
394 return ret;
395
396 switch (proptype) {
397 case DEV_PROP_U8:
398 if (obj->integer.value > U8_MAX)
399 return -EOVERFLOW;
400 *(u8 *)val = obj->integer.value;
401 break;
402 case DEV_PROP_U16:
403 if (obj->integer.value > U16_MAX)
404 return -EOVERFLOW;
405 *(u16 *)val = obj->integer.value;
406 break;
407 case DEV_PROP_U32:
408 if (obj->integer.value > U32_MAX)
409 return -EOVERFLOW;
410 *(u32 *)val = obj->integer.value;
411 break;
412 default:
413 *(u64 *)val = obj->integer.value;
414 break;
415 }
416 } else if (proptype == DEV_PROP_STRING) {
417 ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_STRING, &obj);
418 if (ret)
419 return ret;
420
421 *(char **)val = obj->string.pointer;
422 } else {
423 ret = -EINVAL;
424 }
425 return ret;
426}
427
428static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
429 size_t nval)
430{
431 int i;
432
433 for (i = 0; i < nval; i++) {
434 if (items[i].type != ACPI_TYPE_INTEGER)
435 return -EPROTO;
436 if (items[i].integer.value > U8_MAX)
437 return -EOVERFLOW;
438
439 val[i] = items[i].integer.value;
440 }
441 return 0;
442}
443
444static int acpi_copy_property_array_u16(const union acpi_object *items,
445 u16 *val, size_t nval)
446{
447 int i;
448
449 for (i = 0; i < nval; i++) {
450 if (items[i].type != ACPI_TYPE_INTEGER)
451 return -EPROTO;
452 if (items[i].integer.value > U16_MAX)
453 return -EOVERFLOW;
454
455 val[i] = items[i].integer.value;
456 }
457 return 0;
458}
459
460static int acpi_copy_property_array_u32(const union acpi_object *items,
461 u32 *val, size_t nval)
462{
463 int i;
464
465 for (i = 0; i < nval; i++) {
466 if (items[i].type != ACPI_TYPE_INTEGER)
467 return -EPROTO;
468 if (items[i].integer.value > U32_MAX)
469 return -EOVERFLOW;
470
471 val[i] = items[i].integer.value;
472 }
473 return 0;
474}
475
476static int acpi_copy_property_array_u64(const union acpi_object *items,
477 u64 *val, size_t nval)
478{
479 int i;
480
481 for (i = 0; i < nval; i++) {
482 if (items[i].type != ACPI_TYPE_INTEGER)
483 return -EPROTO;
484
485 val[i] = items[i].integer.value;
486 }
487 return 0;
488}
489
490static int acpi_copy_property_array_string(const union acpi_object *items,
491 char **val, size_t nval)
492{
493 int i;
494
495 for (i = 0; i < nval; i++) {
496 if (items[i].type != ACPI_TYPE_STRING)
497 return -EPROTO;
498
499 val[i] = items[i].string.pointer;
500 }
501 return 0;
502}
503
504int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
505 enum dev_prop_type proptype, void *val, size_t nval)
506{
507 const union acpi_object *obj;
508 const union acpi_object *items;
509 int ret;
510
511 if (val && nval == 1) {
512 ret = acpi_dev_prop_read_single(adev, propname, proptype, val);
513 if (!ret)
514 return ret;
515 }
516
517 ret = acpi_dev_get_property_array(adev, propname, ACPI_TYPE_ANY, &obj);
518 if (ret)
519 return ret;
520
521 if (!val)
522 return obj->package.count;
523 else if (nval <= 0)
524 return -EINVAL;
525
526 if (nval > obj->package.count)
527 return -EOVERFLOW;
528
529 items = obj->package.elements;
530 switch (proptype) {
531 case DEV_PROP_U8:
532 ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
533 break;
534 case DEV_PROP_U16:
535 ret = acpi_copy_property_array_u16(items, (u16 *)val, nval);
536 break;
537 case DEV_PROP_U32:
538 ret = acpi_copy_property_array_u32(items, (u32 *)val, nval);
539 break;
540 case DEV_PROP_U64:
541 ret = acpi_copy_property_array_u64(items, (u64 *)val, nval);
542 break;
543 case DEV_PROP_STRING:
544 ret = acpi_copy_property_array_string(items, (char **)val, nval);
545 break;
546 default:
547 ret = -EINVAL;
548 break;
549 }
550 return ret;
551}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0476e90b2091..1b1cf558d3d3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -36,6 +36,8 @@ bool acpi_force_hot_remove;
36 36
37static const char *dummy_hid = "device"; 37static const char *dummy_hid = "device";
38 38
39static LIST_HEAD(acpi_dep_list);
40static DEFINE_MUTEX(acpi_dep_list_lock);
39static LIST_HEAD(acpi_bus_id_list); 41static LIST_HEAD(acpi_bus_id_list);
40static DEFINE_MUTEX(acpi_scan_lock); 42static DEFINE_MUTEX(acpi_scan_lock);
41static LIST_HEAD(acpi_scan_handlers_list); 43static LIST_HEAD(acpi_scan_handlers_list);
@@ -43,6 +45,12 @@ DEFINE_MUTEX(acpi_device_lock);
43LIST_HEAD(acpi_wakeup_device_list); 45LIST_HEAD(acpi_wakeup_device_list);
44static DEFINE_MUTEX(acpi_hp_context_lock); 46static DEFINE_MUTEX(acpi_hp_context_lock);
45 47
48struct acpi_dep_data {
49 struct list_head node;
50 acpi_handle master;
51 acpi_handle slave;
52};
53
46struct acpi_device_bus_id{ 54struct acpi_device_bus_id{
47 char bus_id[15]; 55 char bus_id[15];
48 unsigned int instance_no; 56 unsigned int instance_no;
@@ -124,17 +132,56 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
124 if (list_empty(&acpi_dev->pnp.ids)) 132 if (list_empty(&acpi_dev->pnp.ids))
125 return 0; 133 return 0;
126 134
127 len = snprintf(modalias, size, "acpi:"); 135 /*
128 size -= len; 136 * If the device has PRP0001 we expose DT compatible modalias
129 137 * instead in form of of:NnameTCcompatible.
130 list_for_each_entry(id, &acpi_dev->pnp.ids, list) { 138 */
131 count = snprintf(&modalias[len], size, "%s:", id->id); 139 if (acpi_dev->data.of_compatible) {
132 if (count < 0) 140 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
133 return -EINVAL; 141 const union acpi_object *of_compatible, *obj;
134 if (count >= size) 142 int i, nval;
135 return -ENOMEM; 143 char *c;
136 len += count; 144
137 size -= count; 145 acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
146 /* DT strings are all in lower case */
147 for (c = buf.pointer; *c != '\0'; c++)
148 *c = tolower(*c);
149
150 len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
151 ACPI_FREE(buf.pointer);
152
153 of_compatible = acpi_dev->data.of_compatible;
154 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
155 nval = of_compatible->package.count;
156 obj = of_compatible->package.elements;
157 } else { /* Must be ACPI_TYPE_STRING. */
158 nval = 1;
159 obj = of_compatible;
160 }
161 for (i = 0; i < nval; i++, obj++) {
162 count = snprintf(&modalias[len], size, "C%s",
163 obj->string.pointer);
164 if (count < 0)
165 return -EINVAL;
166 if (count >= size)
167 return -ENOMEM;
168
169 len += count;
170 size -= count;
171 }
172 } else {
173 len = snprintf(modalias, size, "acpi:");
174 size -= len;
175
176 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
177 count = snprintf(&modalias[len], size, "%s:", id->id);
178 if (count < 0)
179 return -EINVAL;
180 if (count >= size)
181 return -ENOMEM;
182 len += count;
183 size -= count;
184 }
138 } 185 }
139 186
140 modalias[len] = '\0'; 187 modalias[len] = '\0';
@@ -902,6 +949,51 @@ int acpi_match_device_ids(struct acpi_device *device,
902} 949}
903EXPORT_SYMBOL(acpi_match_device_ids); 950EXPORT_SYMBOL(acpi_match_device_ids);
904 951
952/* Performs match against special "PRP0001" shoehorn ACPI ID */
953static bool acpi_of_driver_match_device(struct device *dev,
954 const struct device_driver *drv)
955{
956 const union acpi_object *of_compatible, *obj;
957 struct acpi_device *adev;
958 int i, nval;
959
960 adev = ACPI_COMPANION(dev);
961 if (!adev)
962 return false;
963
964 of_compatible = adev->data.of_compatible;
965 if (!drv->of_match_table || !of_compatible)
966 return false;
967
968 if (of_compatible->type == ACPI_TYPE_PACKAGE) {
969 nval = of_compatible->package.count;
970 obj = of_compatible->package.elements;
971 } else { /* Must be ACPI_TYPE_STRING. */
972 nval = 1;
973 obj = of_compatible;
974 }
975 /* Now we can look for the driver DT compatible strings */
976 for (i = 0; i < nval; i++, obj++) {
977 const struct of_device_id *id;
978
979 for (id = drv->of_match_table; id->compatible[0]; id++)
980 if (!strcasecmp(obj->string.pointer, id->compatible))
981 return true;
982 }
983
984 return false;
985}
986
987bool acpi_driver_match_device(struct device *dev,
988 const struct device_driver *drv)
989{
990 if (!drv->acpi_match_table)
991 return acpi_of_driver_match_device(dev, drv);
992
993 return !!acpi_match_device(drv->acpi_match_table, dev);
994}
995EXPORT_SYMBOL_GPL(acpi_driver_match_device);
996
905static void acpi_free_power_resources_lists(struct acpi_device *device) 997static void acpi_free_power_resources_lists(struct acpi_device *device)
906{ 998{
907 int i; 999 int i;
@@ -922,6 +1014,7 @@ static void acpi_device_release(struct device *dev)
922{ 1014{
923 struct acpi_device *acpi_dev = to_acpi_device(dev); 1015 struct acpi_device *acpi_dev = to_acpi_device(dev);
924 1016
1017 acpi_free_properties(acpi_dev);
925 acpi_free_pnp_ids(&acpi_dev->pnp); 1018 acpi_free_pnp_ids(&acpi_dev->pnp);
926 acpi_free_power_resources_lists(acpi_dev); 1019 acpi_free_power_resources_lists(acpi_dev);
927 kfree(acpi_dev); 1020 kfree(acpi_dev);
@@ -1304,6 +1397,26 @@ int acpi_device_add(struct acpi_device *device,
1304 return result; 1397 return result;
1305} 1398}
1306 1399
1400struct acpi_device *acpi_get_next_child(struct device *dev,
1401 struct acpi_device *child)
1402{
1403 struct acpi_device *adev = ACPI_COMPANION(dev);
1404 struct list_head *head, *next;
1405
1406 if (!adev)
1407 return NULL;
1408
1409 head = &adev->children;
1410 if (list_empty(head))
1411 return NULL;
1412
1413 if (!child)
1414 return list_first_entry(head, struct acpi_device, node);
1415
1416 next = child->node.next;
1417 return next == head ? NULL : list_entry(next, struct acpi_device, node);
1418}
1419
1307/* -------------------------------------------------------------------------- 1420/* --------------------------------------------------------------------------
1308 Driver Management 1421 Driver Management
1309 -------------------------------------------------------------------------- */ 1422 -------------------------------------------------------------------------- */
@@ -1923,9 +2036,11 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1923 device->device_type = type; 2036 device->device_type = type;
1924 device->handle = handle; 2037 device->handle = handle;
1925 device->parent = acpi_bus_get_parent(handle); 2038 device->parent = acpi_bus_get_parent(handle);
2039 device->fwnode.type = FWNODE_ACPI;
1926 acpi_set_device_status(device, sta); 2040 acpi_set_device_status(device, sta);
1927 acpi_device_get_busid(device); 2041 acpi_device_get_busid(device);
1928 acpi_set_pnp_ids(handle, &device->pnp, type); 2042 acpi_set_pnp_ids(handle, &device->pnp, type);
2043 acpi_init_properties(device);
1929 acpi_bus_get_flags(device); 2044 acpi_bus_get_flags(device);
1930 device->flags.match_driver = false; 2045 device->flags.match_driver = false;
1931 device->flags.initialized = true; 2046 device->flags.initialized = true;
@@ -2086,6 +2201,59 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
2086 } 2201 }
2087} 2202}
2088 2203
2204static void acpi_device_dep_initialize(struct acpi_device *adev)
2205{
2206 struct acpi_dep_data *dep;
2207 struct acpi_handle_list dep_devices;
2208 acpi_status status;
2209 int i;
2210
2211 if (!acpi_has_method(adev->handle, "_DEP"))
2212 return;
2213
2214 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
2215 &dep_devices);
2216 if (ACPI_FAILURE(status)) {
2217 dev_err(&adev->dev, "Failed to evaluate _DEP.\n");
2218 return;
2219 }
2220
2221 for (i = 0; i < dep_devices.count; i++) {
2222 struct acpi_device_info *info;
2223 int skip;
2224
2225 status = acpi_get_object_info(dep_devices.handles[i], &info);
2226 if (ACPI_FAILURE(status)) {
2227 dev_err(&adev->dev, "Error reading device info\n");
2228 continue;
2229 }
2230
2231 /*
2232 * Skip the dependency of Windows System Power
2233 * Management Controller
2234 */
2235 skip = info->valid & ACPI_VALID_HID &&
2236 !strcmp(info->hardware_id.string, "INT3396");
2237
2238 kfree(info);
2239
2240 if (skip)
2241 continue;
2242
2243 dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL);
2244 if (!dep)
2245 return;
2246
2247 dep->master = dep_devices.handles[i];
2248 dep->slave = adev->handle;
2249 adev->dep_unmet++;
2250
2251 mutex_lock(&acpi_dep_list_lock);
2252 list_add_tail(&dep->node , &acpi_dep_list);
2253 mutex_unlock(&acpi_dep_list_lock);
2254 }
2255}
2256
2089static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, 2257static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
2090 void *not_used, void **return_value) 2258 void *not_used, void **return_value)
2091{ 2259{
@@ -2112,6 +2280,7 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
2112 return AE_CTRL_DEPTH; 2280 return AE_CTRL_DEPTH;
2113 2281
2114 acpi_scan_init_hotplug(device); 2282 acpi_scan_init_hotplug(device);
2283 acpi_device_dep_initialize(device);
2115 2284
2116 out: 2285 out:
2117 if (!*return_value) 2286 if (!*return_value)
@@ -2232,6 +2401,29 @@ static void acpi_bus_attach(struct acpi_device *device)
2232 device->handler->hotplug.notify_online(device); 2401 device->handler->hotplug.notify_online(device);
2233} 2402}
2234 2403
2404void acpi_walk_dep_device_list(acpi_handle handle)
2405{
2406 struct acpi_dep_data *dep, *tmp;
2407 struct acpi_device *adev;
2408
2409 mutex_lock(&acpi_dep_list_lock);
2410 list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
2411 if (dep->master == handle) {
2412 acpi_bus_get_device(dep->slave, &adev);
2413 if (!adev)
2414 continue;
2415
2416 adev->dep_unmet--;
2417 if (!adev->dep_unmet)
2418 acpi_bus_attach(adev);
2419 list_del(&dep->node);
2420 kfree(dep);
2421 }
2422 }
2423 mutex_unlock(&acpi_dep_list_lock);
2424}
2425EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
2426
2235/** 2427/**
2236 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. 2428 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
2237 * @handle: Root of the namespace scope to scan. 2429 * @handle: Root of the namespace scope to scan.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 05a31b573fc3..8aa9254a387f 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -630,6 +630,7 @@ static int acpi_freeze_begin(void)
630static int acpi_freeze_prepare(void) 630static int acpi_freeze_prepare(void)
631{ 631{
632 acpi_enable_all_wakeup_gpes(); 632 acpi_enable_all_wakeup_gpes();
633 acpi_os_wait_events_complete();
633 enable_irq_wake(acpi_gbl_FADT.sci_interrupt); 634 enable_irq_wake(acpi_gbl_FADT.sci_interrupt);
634 return 0; 635 return 0;
635} 636}
@@ -825,6 +826,7 @@ static void acpi_power_off_prepare(void)
825 /* Prepare to power off the system */ 826 /* Prepare to power off the system */
826 acpi_sleep_prepare(ACPI_STATE_S5); 827 acpi_sleep_prepare(ACPI_STATE_S5);
827 acpi_disable_all_gpes(); 828 acpi_disable_all_gpes();
829 acpi_os_wait_events_complete();
828} 830}
829 831
830static void acpi_power_off(void) 832static void acpi_power_off(void)
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 6d5a6cda0734..93b81523a2fe 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -190,30 +190,24 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
190 } 190 }
191} 191}
192 192
193
194int __init 193int __init
195acpi_table_parse_entries(char *id, 194acpi_parse_entries(char *id, unsigned long table_size,
196 unsigned long table_size, 195 acpi_tbl_entry_handler handler,
197 int entry_id, 196 struct acpi_table_header *table_header,
198 acpi_tbl_entry_handler handler, 197 int entry_id, unsigned int max_entries)
199 unsigned int max_entries)
200{ 198{
201 struct acpi_table_header *table_header = NULL;
202 struct acpi_subtable_header *entry; 199 struct acpi_subtable_header *entry;
203 unsigned int count = 0; 200 int count = 0;
204 unsigned long table_end; 201 unsigned long table_end;
205 acpi_size tbl_size;
206 202
207 if (acpi_disabled) 203 if (acpi_disabled)
208 return -ENODEV; 204 return -ENODEV;
209 205
210 if (!handler) 206 if (!id || !handler)
211 return -EINVAL; 207 return -EINVAL;
212 208
213 if (strncmp(id, ACPI_SIG_MADT, 4) == 0) 209 if (!table_size)
214 acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size); 210 return -EINVAL;
215 else
216 acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
217 211
218 if (!table_header) { 212 if (!table_header) {
219 pr_warn("%4.4s not present\n", id); 213 pr_warn("%4.4s not present\n", id);
@@ -230,9 +224,12 @@ acpi_table_parse_entries(char *id,
230 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < 224 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
231 table_end) { 225 table_end) {
232 if (entry->type == entry_id 226 if (entry->type == entry_id
233 && (!max_entries || count++ < max_entries)) 227 && (!max_entries || count < max_entries)) {
234 if (handler(entry, table_end)) 228 if (handler(entry, table_end))
235 goto err; 229 return -EINVAL;
230
231 count++;
232 }
236 233
237 /* 234 /*
238 * If entry->length is 0, break from this loop to avoid 235 * If entry->length is 0, break from this loop to avoid
@@ -240,22 +237,53 @@ acpi_table_parse_entries(char *id,
240 */ 237 */
241 if (entry->length == 0) { 238 if (entry->length == 0) {
242 pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id); 239 pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
243 goto err; 240 return -EINVAL;
244 } 241 }
245 242
246 entry = (struct acpi_subtable_header *) 243 entry = (struct acpi_subtable_header *)
247 ((unsigned long)entry + entry->length); 244 ((unsigned long)entry + entry->length);
248 } 245 }
246
249 if (max_entries && count > max_entries) { 247 if (max_entries && count > max_entries) {
250 pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n", 248 pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
251 id, entry_id, count - max_entries, count); 249 id, entry_id, count - max_entries, count);
252 } 250 }
253 251
254 early_acpi_os_unmap_memory((char *)table_header, tbl_size);
255 return count; 252 return count;
256err: 253}
254
255int __init
256acpi_table_parse_entries(char *id,
257 unsigned long table_size,
258 int entry_id,
259 acpi_tbl_entry_handler handler,
260 unsigned int max_entries)
261{
262 struct acpi_table_header *table_header = NULL;
263 acpi_size tbl_size;
264 int count;
265 u32 instance = 0;
266
267 if (acpi_disabled)
268 return -ENODEV;
269
270 if (!id || !handler)
271 return -EINVAL;
272
273 if (!strncmp(id, ACPI_SIG_MADT, 4))
274 instance = acpi_apic_instance;
275
276 acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
277 if (!table_header) {
278 pr_warn("%4.4s not present\n", id);
279 return -ENODEV;
280 }
281
282 count = acpi_parse_entries(id, table_size, handler, table_header,
283 entry_id, max_entries);
284
257 early_acpi_os_unmap_memory((char *)table_header, tbl_size); 285 early_acpi_os_unmap_memory((char *)table_header, tbl_size);
258 return -EINVAL; 286 return count;
259} 287}
260 288
261int __init 289int __init
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 371ac12d25b1..dd8ff63ee2b4 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -136,8 +136,7 @@ acpi_extract_package(union acpi_object *package,
136 break; 136 break;
137 case 'B': 137 case 'B':
138 size_required += 138 size_required +=
139 sizeof(u8 *) + 139 sizeof(u8 *) + element->buffer.length;
140 (element->buffer.length * sizeof(u8));
141 tail_offset += sizeof(u8 *); 140 tail_offset += sizeof(u8 *);
142 break; 141 break;
143 default: 142 default:
@@ -255,7 +254,7 @@ acpi_extract_package(union acpi_object *package,
255 memcpy(tail, element->buffer.pointer, 254 memcpy(tail, element->buffer.pointer,
256 element->buffer.length); 255 element->buffer.length);
257 head += sizeof(u8 *); 256 head += sizeof(u8 *);
258 tail += element->buffer.length * sizeof(u8); 257 tail += element->buffer.length;
259 break; 258 break;
260 default: 259 default:
261 /* Should never get here */ 260 /* Should never get here */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 807a88a0f394..185a57d13723 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
1164 return true; 1164 return true;
1165 1165
1166 for (i = 0; i < video->attached_count; i++) { 1166 for (i = 0; i < video->attached_count; i++) {
1167 if (video->attached_array[i].bind_info == device) 1167 if ((video->attached_array[i].value.int_val & 0xfff) ==
1168 (device->device_id & 0xfff))
1168 return true; 1169 return true;
1169 } 1170 }
1170 1171
@@ -1680,6 +1681,19 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
1680 printk(KERN_ERR PREFIX "Create sysfs link\n"); 1681 printk(KERN_ERR PREFIX "Create sysfs link\n");
1681} 1682}
1682 1683
1684static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
1685{
1686 struct acpi_video_device *dev;
1687 union acpi_object *levels;
1688
1689 mutex_lock(&video->device_list_lock);
1690 list_for_each_entry(dev, &video->video_device_list, entry) {
1691 if (!acpi_video_device_lcd_query_levels(dev, &levels))
1692 kfree(levels);
1693 }
1694 mutex_unlock(&video->device_list_lock);
1695}
1696
1683static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) 1697static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
1684{ 1698{
1685 struct acpi_video_device *dev; 1699 struct acpi_video_device *dev;
@@ -1687,6 +1701,8 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
1687 if (video->backlight_registered) 1701 if (video->backlight_registered)
1688 return 0; 1702 return 0;
1689 1703
1704 acpi_video_run_bcl_for_osi(video);
1705
1690 if (!acpi_video_verify_backlight_support()) 1706 if (!acpi_video_verify_backlight_support())
1691 return 0; 1707 return 0;
1692 1708
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
new file mode 100644
index 000000000000..4a5c9d279059
--- /dev/null
+++ b/drivers/amba/Kconfig
@@ -0,0 +1,14 @@
1config ARM_AMBA
2 bool
3
4if ARM_AMBA
5
6config TEGRA_AHB
7 bool "Enable AHB driver for NVIDIA Tegra SoCs"
8 default y if ARCH_TEGRA
9 help
10 Adds AHB configuration functionality for NVIDIA Tegra SoCs,
11 which controls AHB bus master arbitration and some performance
12 parameters (priority, prefetch size).
13
14endif
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 47bbdc1b5be3..973a3332a85f 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -124,7 +124,7 @@ static const struct dev_pm_ops amba_pm = {
124 .thaw = pm_generic_thaw, 124 .thaw = pm_generic_thaw,
125 .poweroff = pm_generic_poweroff, 125 .poweroff = pm_generic_poweroff,
126 .restore = pm_generic_restore, 126 .restore = pm_generic_restore,
127 SET_PM_RUNTIME_PM_OPS( 127 SET_RUNTIME_PM_OPS(
128 amba_pm_runtime_suspend, 128 amba_pm_runtime_suspend,
129 amba_pm_runtime_resume, 129 amba_pm_runtime_resume,
130 NULL 130 NULL
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e45f83789809..49f1e6890587 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -321,6 +321,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ 321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ 322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ 323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
324 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
325 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
326 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
324 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 327 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
325 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ 328 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
326 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 329 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -492,6 +495,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
492 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 495 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
493 */ 496 */
494 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, 497 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
498 { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
495 499
496 /* Enmotus */ 500 /* Enmotus */
497 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 501 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0586f66d70fa..dd45c6a03e5d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1164,7 +1164,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1164 1164
1165 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1165 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1166 depth = min(ATA_MAX_QUEUE - 1, depth); 1166 depth = min(ATA_MAX_QUEUE - 1, depth);
1167 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1167 scsi_change_queue_depth(sdev, depth);
1168 } 1168 }
1169 1169
1170 blk_queue_flush_queueable(q, false); 1170 blk_queue_flush_queueable(q, false);
@@ -1243,21 +1243,17 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1243 * @ap: ATA port to which the device change the queue depth 1243 * @ap: ATA port to which the device change the queue depth
1244 * @sdev: SCSI device to configure queue depth for 1244 * @sdev: SCSI device to configure queue depth for
1245 * @queue_depth: new queue depth 1245 * @queue_depth: new queue depth
1246 * @reason: calling context
1247 * 1246 *
1248 * libsas and libata have different approaches for associating a sdev to 1247 * libsas and libata have different approaches for associating a sdev to
1249 * its ata_port. 1248 * its ata_port.
1250 * 1249 *
1251 */ 1250 */
1252int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1251int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1253 int queue_depth, int reason) 1252 int queue_depth)
1254{ 1253{
1255 struct ata_device *dev; 1254 struct ata_device *dev;
1256 unsigned long flags; 1255 unsigned long flags;
1257 1256
1258 if (reason != SCSI_QDEPTH_DEFAULT)
1259 return -EOPNOTSUPP;
1260
1261 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 1257 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1262 return sdev->queue_depth; 1258 return sdev->queue_depth;
1263 1259
@@ -1282,15 +1278,13 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1282 if (sdev->queue_depth == queue_depth) 1278 if (sdev->queue_depth == queue_depth)
1283 return -EINVAL; 1279 return -EINVAL;
1284 1280
1285 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); 1281 return scsi_change_queue_depth(sdev, queue_depth);
1286 return queue_depth;
1287} 1282}
1288 1283
1289/** 1284/**
1290 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 1285 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
1291 * @sdev: SCSI device to configure queue depth for 1286 * @sdev: SCSI device to configure queue depth for
1292 * @queue_depth: new queue depth 1287 * @queue_depth: new queue depth
1293 * @reason: calling context
1294 * 1288 *
1295 * This is libata standard hostt->change_queue_depth callback. 1289 * This is libata standard hostt->change_queue_depth callback.
1296 * SCSI will call into this callback when user tries to set queue 1290 * SCSI will call into this callback when user tries to set queue
@@ -1302,12 +1296,11 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1302 * RETURNS: 1296 * RETURNS:
1303 * Newly configured queue depth. 1297 * Newly configured queue depth.
1304 */ 1298 */
1305int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, 1299int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1306 int reason)
1307{ 1300{
1308 struct ata_port *ap = ata_shost_to_port(sdev->host); 1301 struct ata_port *ap = ata_shost_to_port(sdev->host);
1309 1302
1310 return __ata_change_queue_depth(ap, sdev, queue_depth, reason); 1303 return __ata_change_queue_depth(ap, sdev, queue_depth);
1311} 1304}
1312 1305
1313/** 1306/**
@@ -3570,7 +3563,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
3570 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3563 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3571 break; 3564 break;
3572 3565
3573 case SERVICE_ACTION_IN: 3566 case SERVICE_ACTION_IN_16:
3574 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 3567 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3575 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3568 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3576 else 3569 else
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 07bc7e4dbd04..65071591b143 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
1488 host_priv->csr_base = csr_base; 1488 host_priv->csr_base = csr_base;
1489 1489
1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1491 if (irq < 0) { 1491 if (!irq) {
1492 dev_err(&ofdev->dev, "invalid irq from platform\n"); 1492 dev_err(&ofdev->dev, "invalid irq from platform\n");
1493 goto error_exit_with_cleanup; 1493 goto error_exit_with_cleanup;
1494 } 1494 }
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index cdf99fac139a..1db6f5ce5e89 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1951,7 +1951,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
1951 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 1951 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1952 1952
1953 if (strncmp(model_num, "Maxtor", 6) == 0) { 1953 if (strncmp(model_num, "Maxtor", 6) == 0) {
1954 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); 1954 ata_scsi_change_queue_depth(sdev, 1);
1955 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", 1955 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1956 sdev->queue_depth); 1956 sdev->queue_depth);
1957 } 1957 }
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 7652e8dc188f..21b0bc6a9c96 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); 1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
1226 if (!card->config_regs) { 1226 if (!card->config_regs) {
1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n"); 1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n");
1228 err = -ENOMEM;
1228 goto out_release_regions; 1229 goto out_release_regions;
1229 } 1230 }
1230 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); 1231 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
1231 if (!card->buffers) { 1232 if (!card->buffers) {
1232 dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); 1233 dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
1234 err = -ENOMEM;
1233 goto out_unmap_config; 1235 goto out_unmap_config;
1234 } 1236 }
1235 1237
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index df04227d00cf..98504ec99c7d 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -267,18 +267,24 @@ comment "Default contiguous memory area size:"
267config CMA_SIZE_MBYTES 267config CMA_SIZE_MBYTES
268 int "Size in Mega Bytes" 268 int "Size in Mega Bytes"
269 depends on !CMA_SIZE_SEL_PERCENTAGE 269 depends on !CMA_SIZE_SEL_PERCENTAGE
270 default 0 if X86
270 default 16 271 default 16
271 help 272 help
272 Defines the size (in MiB) of the default memory area for Contiguous 273 Defines the size (in MiB) of the default memory area for Contiguous
273 Memory Allocator. 274 Memory Allocator. If the size of 0 is selected, CMA is disabled by
275 default, but it can be enabled by passing cma=size[MG] to the kernel.
276
274 277
275config CMA_SIZE_PERCENTAGE 278config CMA_SIZE_PERCENTAGE
276 int "Percentage of total memory" 279 int "Percentage of total memory"
277 depends on !CMA_SIZE_SEL_MBYTES 280 depends on !CMA_SIZE_SEL_MBYTES
281 default 0 if X86
278 default 10 282 default 10
279 help 283 help
280 Defines the size of the default memory area for Contiguous Memory 284 Defines the size of the default memory area for Contiguous Memory
281 Allocator as a percentage of the total memory in the system. 285 Allocator as a percentage of the total memory in the system.
286 If 0 percent is selected, CMA is disabled by default, but it can be
287 enabled by passing cma=size[MG] to the kernel.
282 288
283choice 289choice
284 prompt "Selected region size" 290 prompt "Selected region size"
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 6922cd6850a2..53c3fe1aeb29 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -4,7 +4,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
4 driver.o class.o platform.o \ 4 driver.o class.o platform.o \
5 cpu.o firmware.o init.o map.o devres.o \ 5 cpu.o firmware.o init.o map.o devres.o \
6 attribute_container.o transport_class.o \ 6 attribute_container.o transport_class.o \
7 topology.o container.o 7 topology.o container.o property.o
8obj-$(CONFIG_DEVTMPFS) += devtmpfs.o 8obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
9obj-$(CONFIG_DMA_CMA) += dma-contiguous.o 9obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
10obj-y += power/ 10obj-y += power/
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 78369305e069..d626576a4f75 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -12,6 +12,7 @@
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/pm_clock.h> 13#include <linux/pm_clock.h>
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/clkdev.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/err.h> 17#include <linux/err.h>
17 18
@@ -34,14 +35,20 @@ struct pm_clock_entry {
34/** 35/**
35 * pm_clk_enable - Enable a clock, reporting any errors 36 * pm_clk_enable - Enable a clock, reporting any errors
36 * @dev: The device for the given clock 37 * @dev: The device for the given clock
37 * @clk: The clock being enabled. 38 * @ce: PM clock entry corresponding to the clock.
38 */ 39 */
39static inline int __pm_clk_enable(struct device *dev, struct clk *clk) 40static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
40{ 41{
41 int ret = clk_enable(clk); 42 int ret;
42 if (ret) 43
43 dev_err(dev, "%s: failed to enable clk %p, error %d\n", 44 if (ce->status < PCE_STATUS_ERROR) {
44 __func__, clk, ret); 45 ret = clk_enable(ce->clk);
46 if (!ret)
47 ce->status = PCE_STATUS_ENABLED;
48 else
49 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
50 __func__, ce->clk, ret);
51 }
45 52
46 return ret; 53 return ret;
47} 54}
@@ -53,7 +60,8 @@ static inline int __pm_clk_enable(struct device *dev, struct clk *clk)
53 */ 60 */
54static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) 61static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
55{ 62{
56 ce->clk = clk_get(dev, ce->con_id); 63 if (!ce->clk)
64 ce->clk = clk_get(dev, ce->con_id);
57 if (IS_ERR(ce->clk)) { 65 if (IS_ERR(ce->clk)) {
58 ce->status = PCE_STATUS_ERROR; 66 ce->status = PCE_STATUS_ERROR;
59 } else { 67 } else {
@@ -63,15 +71,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
63 } 71 }
64} 72}
65 73
66/** 74static int __pm_clk_add(struct device *dev, const char *con_id,
67 * pm_clk_add - Start using a device clock for power management. 75 struct clk *clk)
68 * @dev: Device whose clock is going to be used for power management.
69 * @con_id: Connection ID of the clock.
70 *
71 * Add the clock represented by @con_id to the list of clocks used for
72 * the power management of @dev.
73 */
74int pm_clk_add(struct device *dev, const char *con_id)
75{ 76{
76 struct pm_subsys_data *psd = dev_to_psd(dev); 77 struct pm_subsys_data *psd = dev_to_psd(dev);
77 struct pm_clock_entry *ce; 78 struct pm_clock_entry *ce;
@@ -93,6 +94,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
93 kfree(ce); 94 kfree(ce);
94 return -ENOMEM; 95 return -ENOMEM;
95 } 96 }
97 } else {
98 if (IS_ERR(ce->clk) || !__clk_get(clk)) {
99 kfree(ce);
100 return -ENOENT;
101 }
102 ce->clk = clk;
96 } 103 }
97 104
98 pm_clk_acquire(dev, ce); 105 pm_clk_acquire(dev, ce);
@@ -104,6 +111,32 @@ int pm_clk_add(struct device *dev, const char *con_id)
104} 111}
105 112
106/** 113/**
114 * pm_clk_add - Start using a device clock for power management.
115 * @dev: Device whose clock is going to be used for power management.
116 * @con_id: Connection ID of the clock.
117 *
118 * Add the clock represented by @con_id to the list of clocks used for
119 * the power management of @dev.
120 */
121int pm_clk_add(struct device *dev, const char *con_id)
122{
123 return __pm_clk_add(dev, con_id, NULL);
124}
125
126/**
127 * pm_clk_add_clk - Start using a device clock for power management.
128 * @dev: Device whose clock is going to be used for power management.
129 * @clk: Clock pointer
130 *
131 * Add the clock to the list of clocks used for the power management of @dev.
132 * It will increment refcount on clock pointer, use clk_put() on it when done.
133 */
134int pm_clk_add_clk(struct device *dev, struct clk *clk)
135{
136 return __pm_clk_add(dev, NULL, clk);
137}
138
139/**
107 * __pm_clk_remove - Destroy PM clock entry. 140 * __pm_clk_remove - Destroy PM clock entry.
108 * @ce: PM clock entry to destroy. 141 * @ce: PM clock entry to destroy.
109 */ 142 */
@@ -223,10 +256,6 @@ void pm_clk_destroy(struct device *dev)
223 } 256 }
224} 257}
225 258
226#endif /* CONFIG_PM */
227
228#ifdef CONFIG_PM_RUNTIME
229
230/** 259/**
231 * pm_clk_suspend - Disable clocks in a device's PM clock list. 260 * pm_clk_suspend - Disable clocks in a device's PM clock list.
232 * @dev: Device to disable the clocks for. 261 * @dev: Device to disable the clocks for.
@@ -266,7 +295,6 @@ int pm_clk_resume(struct device *dev)
266 struct pm_subsys_data *psd = dev_to_psd(dev); 295 struct pm_subsys_data *psd = dev_to_psd(dev);
267 struct pm_clock_entry *ce; 296 struct pm_clock_entry *ce;
268 unsigned long flags; 297 unsigned long flags;
269 int ret;
270 298
271 dev_dbg(dev, "%s()\n", __func__); 299 dev_dbg(dev, "%s()\n", __func__);
272 300
@@ -275,13 +303,8 @@ int pm_clk_resume(struct device *dev)
275 303
276 spin_lock_irqsave(&psd->lock, flags); 304 spin_lock_irqsave(&psd->lock, flags);
277 305
278 list_for_each_entry(ce, &psd->clock_list, node) { 306 list_for_each_entry(ce, &psd->clock_list, node)
279 if (ce->status < PCE_STATUS_ERROR) { 307 __pm_clk_enable(dev, ce);
280 ret = __pm_clk_enable(dev, ce->clk);
281 if (!ret)
282 ce->status = PCE_STATUS_ENABLED;
283 }
284 }
285 308
286 spin_unlock_irqrestore(&psd->lock, flags); 309 spin_unlock_irqrestore(&psd->lock, flags);
287 310
@@ -346,74 +369,7 @@ static int pm_clk_notify(struct notifier_block *nb,
346 return 0; 369 return 0;
347} 370}
348 371
349#else /* !CONFIG_PM_RUNTIME */ 372#else /* !CONFIG_PM */
350
351#ifdef CONFIG_PM
352
353/**
354 * pm_clk_suspend - Disable clocks in a device's PM clock list.
355 * @dev: Device to disable the clocks for.
356 */
357int pm_clk_suspend(struct device *dev)
358{
359 struct pm_subsys_data *psd = dev_to_psd(dev);
360 struct pm_clock_entry *ce;
361 unsigned long flags;
362
363 dev_dbg(dev, "%s()\n", __func__);
364
365 /* If there is no driver, the clocks are already disabled. */
366 if (!psd || !dev->driver)
367 return 0;
368
369 spin_lock_irqsave(&psd->lock, flags);
370
371 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
372 if (ce->status < PCE_STATUS_ERROR) {
373 if (ce->status == PCE_STATUS_ENABLED)
374 clk_disable(ce->clk);
375 ce->status = PCE_STATUS_ACQUIRED;
376 }
377 }
378
379 spin_unlock_irqrestore(&psd->lock, flags);
380
381 return 0;
382}
383
384/**
385 * pm_clk_resume - Enable clocks in a device's PM clock list.
386 * @dev: Device to enable the clocks for.
387 */
388int pm_clk_resume(struct device *dev)
389{
390 struct pm_subsys_data *psd = dev_to_psd(dev);
391 struct pm_clock_entry *ce;
392 unsigned long flags;
393 int ret;
394
395 dev_dbg(dev, "%s()\n", __func__);
396
397 /* If there is no driver, the clocks should remain disabled. */
398 if (!psd || !dev->driver)
399 return 0;
400
401 spin_lock_irqsave(&psd->lock, flags);
402
403 list_for_each_entry(ce, &psd->clock_list, node) {
404 if (ce->status < PCE_STATUS_ERROR) {
405 ret = __pm_clk_enable(dev, ce->clk);
406 if (!ret)
407 ce->status = PCE_STATUS_ENABLED;
408 }
409 }
410
411 spin_unlock_irqrestore(&psd->lock, flags);
412
413 return 0;
414}
415
416#endif /* CONFIG_PM */
417 373
418/** 374/**
419 * enable_clock - Enable a device clock. 375 * enable_clock - Enable a device clock.
@@ -493,7 +449,7 @@ static int pm_clk_notify(struct notifier_block *nb,
493 return 0; 449 return 0;
494} 450}
495 451
496#endif /* !CONFIG_PM_RUNTIME */ 452#endif /* !CONFIG_PM */
497 453
498/** 454/**
499 * pm_clk_add_notifier - Add bus type notifier for power management clocks. 455 * pm_clk_add_notifier - Add bus type notifier for power management clocks.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index fb83d4acd400..6a103a35ea9b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -12,6 +12,7 @@
12#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h> 13#include <linux/pm_domain.h>
14#include <linux/pm_qos.h> 14#include <linux/pm_qos.h>
15#include <linux/pm_clock.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/err.h> 17#include <linux/err.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
@@ -151,6 +152,59 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
151 genpd->cpuidle_data->idle_state->exit_latency = usecs64; 152 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
152} 153}
153 154
155static int genpd_power_on(struct generic_pm_domain *genpd)
156{
157 ktime_t time_start;
158 s64 elapsed_ns;
159 int ret;
160
161 if (!genpd->power_on)
162 return 0;
163
164 time_start = ktime_get();
165 ret = genpd->power_on(genpd);
166 if (ret)
167 return ret;
168
169 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
170 if (elapsed_ns <= genpd->power_on_latency_ns)
171 return ret;
172
173 genpd->power_on_latency_ns = elapsed_ns;
174 genpd->max_off_time_changed = true;
175 genpd_recalc_cpu_exit_latency(genpd);
176 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
177 genpd->name, "on", elapsed_ns);
178
179 return ret;
180}
181
182static int genpd_power_off(struct generic_pm_domain *genpd)
183{
184 ktime_t time_start;
185 s64 elapsed_ns;
186 int ret;
187
188 if (!genpd->power_off)
189 return 0;
190
191 time_start = ktime_get();
192 ret = genpd->power_off(genpd);
193 if (ret == -EBUSY)
194 return ret;
195
196 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
197 if (elapsed_ns <= genpd->power_off_latency_ns)
198 return ret;
199
200 genpd->power_off_latency_ns = elapsed_ns;
201 genpd->max_off_time_changed = true;
202 pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
203 genpd->name, "off", elapsed_ns);
204
205 return ret;
206}
207
154/** 208/**
155 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 209 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
156 * @genpd: PM domain to power up. 210 * @genpd: PM domain to power up.
@@ -222,25 +276,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
222 } 276 }
223 } 277 }
224 278
225 if (genpd->power_on) { 279 ret = genpd_power_on(genpd);
226 ktime_t time_start = ktime_get(); 280 if (ret)
227 s64 elapsed_ns; 281 goto err;
228
229 ret = genpd->power_on(genpd);
230 if (ret)
231 goto err;
232
233 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
234 if (elapsed_ns > genpd->power_on_latency_ns) {
235 genpd->power_on_latency_ns = elapsed_ns;
236 genpd->max_off_time_changed = true;
237 genpd_recalc_cpu_exit_latency(genpd);
238 if (genpd->name)
239 pr_warning("%s: Power-on latency exceeded, "
240 "new value %lld ns\n", genpd->name,
241 elapsed_ns);
242 }
243 }
244 282
245 out: 283 out:
246 genpd_set_active(genpd); 284 genpd_set_active(genpd);
@@ -280,8 +318,6 @@ int pm_genpd_name_poweron(const char *domain_name)
280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 318 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
281} 319}
282 320
283#ifdef CONFIG_PM_RUNTIME
284
285static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 321static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
286 struct device *dev) 322 struct device *dev)
287{ 323{
@@ -544,16 +580,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
544 } 580 }
545 581
546 if (genpd->power_off) { 582 if (genpd->power_off) {
547 ktime_t time_start;
548 s64 elapsed_ns;
549
550 if (atomic_read(&genpd->sd_count) > 0) { 583 if (atomic_read(&genpd->sd_count) > 0) {
551 ret = -EBUSY; 584 ret = -EBUSY;
552 goto out; 585 goto out;
553 } 586 }
554 587
555 time_start = ktime_get();
556
557 /* 588 /*
558 * If sd_count > 0 at this point, one of the subdomains hasn't 589 * If sd_count > 0 at this point, one of the subdomains hasn't
559 * managed to call pm_genpd_poweron() for the master yet after 590 * managed to call pm_genpd_poweron() for the master yet after
@@ -562,21 +593,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
562 * the pm_genpd_poweron() restore power for us (this shouldn't 593 * the pm_genpd_poweron() restore power for us (this shouldn't
563 * happen very often). 594 * happen very often).
564 */ 595 */
565 ret = genpd->power_off(genpd); 596 ret = genpd_power_off(genpd);
566 if (ret == -EBUSY) { 597 if (ret == -EBUSY) {
567 genpd_set_active(genpd); 598 genpd_set_active(genpd);
568 goto out; 599 goto out;
569 } 600 }
570
571 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
572 if (elapsed_ns > genpd->power_off_latency_ns) {
573 genpd->power_off_latency_ns = elapsed_ns;
574 genpd->max_off_time_changed = true;
575 if (genpd->name)
576 pr_warning("%s: Power-off latency exceeded, "
577 "new value %lld ns\n", genpd->name,
578 elapsed_ns);
579 }
580 } 601 }
581 602
582 genpd->status = GPD_STATE_POWER_OFF; 603 genpd->status = GPD_STATE_POWER_OFF;
@@ -755,33 +776,15 @@ static int __init genpd_poweroff_unused(void)
755} 776}
756late_initcall(genpd_poweroff_unused); 777late_initcall(genpd_poweroff_unused);
757 778
758#else
759
760static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
761 unsigned long val, void *ptr)
762{
763 return NOTIFY_DONE;
764}
765
766static inline void
767genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
768
769static inline void genpd_power_off_work_fn(struct work_struct *work) {}
770
771#define pm_genpd_runtime_suspend NULL
772#define pm_genpd_runtime_resume NULL
773
774#endif /* CONFIG_PM_RUNTIME */
775
776#ifdef CONFIG_PM_SLEEP 779#ifdef CONFIG_PM_SLEEP
777 780
778/** 781/**
779 * pm_genpd_present - Check if the given PM domain has been initialized. 782 * pm_genpd_present - Check if the given PM domain has been initialized.
780 * @genpd: PM domain to check. 783 * @genpd: PM domain to check.
781 */ 784 */
782static bool pm_genpd_present(struct generic_pm_domain *genpd) 785static bool pm_genpd_present(const struct generic_pm_domain *genpd)
783{ 786{
784 struct generic_pm_domain *gpd; 787 const struct generic_pm_domain *gpd;
785 788
786 if (IS_ERR_OR_NULL(genpd)) 789 if (IS_ERR_OR_NULL(genpd))
787 return false; 790 return false;
@@ -822,8 +825,7 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
822 || atomic_read(&genpd->sd_count) > 0) 825 || atomic_read(&genpd->sd_count) > 0)
823 return; 826 return;
824 827
825 if (genpd->power_off) 828 genpd_power_off(genpd);
826 genpd->power_off(genpd);
827 829
828 genpd->status = GPD_STATE_POWER_OFF; 830 genpd->status = GPD_STATE_POWER_OFF;
829 831
@@ -854,8 +856,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
854 genpd_sd_counter_inc(link->master); 856 genpd_sd_counter_inc(link->master);
855 } 857 }
856 858
857 if (genpd->power_on) 859 genpd_power_on(genpd);
858 genpd->power_on(genpd);
859 860
860 genpd->status = GPD_STATE_ACTIVE; 861 genpd->status = GPD_STATE_ACTIVE;
861} 862}
@@ -1277,8 +1278,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
1277 * If the domain was off before the hibernation, make 1278 * If the domain was off before the hibernation, make
1278 * sure it will be off going forward. 1279 * sure it will be off going forward.
1279 */ 1280 */
1280 if (genpd->power_off) 1281 genpd_power_off(genpd);
1281 genpd->power_off(genpd);
1282 1282
1283 return 0; 1283 return 0;
1284 } 1284 }
@@ -1364,7 +1364,7 @@ void pm_genpd_syscore_poweron(struct device *dev)
1364} 1364}
1365EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1365EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1366 1366
1367#else 1367#else /* !CONFIG_PM_SLEEP */
1368 1368
1369#define pm_genpd_prepare NULL 1369#define pm_genpd_prepare NULL
1370#define pm_genpd_suspend NULL 1370#define pm_genpd_suspend NULL
@@ -1929,6 +1929,12 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1929 genpd->domain.ops.complete = pm_genpd_complete; 1929 genpd->domain.ops.complete = pm_genpd_complete;
1930 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1930 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1931 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1931 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1932
1933 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1934 genpd->dev_ops.stop = pm_clk_suspend;
1935 genpd->dev_ops.start = pm_clk_resume;
1936 }
1937
1932 mutex_lock(&gpd_list_lock); 1938 mutex_lock(&gpd_list_lock);
1933 list_add(&genpd->gpd_list_node, &gpd_list); 1939 list_add(&genpd->gpd_list_node, &gpd_list);
1934 mutex_unlock(&gpd_list_lock); 1940 mutex_unlock(&gpd_list_lock);
@@ -2216,11 +2222,12 @@ int genpd_dev_pm_attach(struct device *dev)
2216 } 2222 }
2217 2223
2218 dev->pm_domain->detach = genpd_dev_pm_detach; 2224 dev->pm_domain->detach = genpd_dev_pm_detach;
2225 pm_genpd_poweron(pd);
2219 2226
2220 return 0; 2227 return 0;
2221} 2228}
2222EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2229EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2223#endif 2230#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2224 2231
2225 2232
2226/*** debugfs support ***/ 2233/*** debugfs support ***/
@@ -2236,10 +2243,8 @@ static struct dentry *pm_genpd_debugfs_dir;
2236 2243
2237/* 2244/*
2238 * TODO: This function is a slightly modified version of rtpm_status_show 2245 * TODO: This function is a slightly modified version of rtpm_status_show
2239 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME 2246 * from sysfs.c, so generalize it.
2240 * are too loose to generalize it.
2241 */ 2247 */
2242#ifdef CONFIG_PM_RUNTIME
2243static void rtpm_status_str(struct seq_file *s, struct device *dev) 2248static void rtpm_status_str(struct seq_file *s, struct device *dev)
2244{ 2249{
2245 static const char * const status_lookup[] = { 2250 static const char * const status_lookup[] = {
@@ -2261,12 +2266,6 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
2261 2266
2262 seq_puts(s, p); 2267 seq_puts(s, p);
2263} 2268}
2264#else
2265static void rtpm_status_str(struct seq_file *s, struct device *dev)
2266{
2267 seq_puts(s, "active");
2268}
2269#endif
2270 2269
2271static int pm_genpd_summary_one(struct seq_file *s, 2270static int pm_genpd_summary_one(struct seq_file *s,
2272 struct generic_pm_domain *gpd) 2271 struct generic_pm_domain *gpd)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index d88a62e104d4..2a4154a09e4d 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -11,8 +11,6 @@
11#include <linux/pm_qos.h> 11#include <linux/pm_qos.h>
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13 13
14#ifdef CONFIG_PM_RUNTIME
15
16static int dev_update_qos_constraint(struct device *dev, void *data) 14static int dev_update_qos_constraint(struct device *dev, void *data)
17{ 15{
18 s64 *constraint_ns_p = data; 16 s64 *constraint_ns_p = data;
@@ -227,15 +225,6 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
227 return false; 225 return false;
228} 226}
229 227
230#else /* !CONFIG_PM_RUNTIME */
231
232static inline bool default_stop_ok(struct device *dev) { return false; }
233
234#define default_power_down_ok NULL
235#define always_on_power_down_ok NULL
236
237#endif /* !CONFIG_PM_RUNTIME */
238
239struct dev_power_governor simple_qos_governor = { 228struct dev_power_governor simple_qos_governor = {
240 .stop_ok = default_stop_ok, 229 .stop_ok = default_stop_ok,
241 .power_down_ok = default_power_down_ok, 230 .power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 89ced955fafa..2d195f3a1998 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -49,11 +49,12 @@
49 * are protected by the dev_opp_list_lock for integrity. 49 * are protected by the dev_opp_list_lock for integrity.
50 * IMPORTANT: the opp nodes should be maintained in increasing 50 * IMPORTANT: the opp nodes should be maintained in increasing
51 * order. 51 * order.
52 * @dynamic: not-created from static DT entries.
52 * @available: true/false - marks if this OPP as available or not 53 * @available: true/false - marks if this OPP as available or not
53 * @rate: Frequency in hertz 54 * @rate: Frequency in hertz
54 * @u_volt: Nominal voltage in microvolts corresponding to this OPP 55 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
55 * @dev_opp: points back to the device_opp struct this opp belongs to 56 * @dev_opp: points back to the device_opp struct this opp belongs to
56 * @head: RCU callback head used for deferred freeing 57 * @rcu_head: RCU callback head used for deferred freeing
57 * 58 *
58 * This structure stores the OPP information for a given device. 59 * This structure stores the OPP information for a given device.
59 */ 60 */
@@ -61,11 +62,12 @@ struct dev_pm_opp {
61 struct list_head node; 62 struct list_head node;
62 63
63 bool available; 64 bool available;
65 bool dynamic;
64 unsigned long rate; 66 unsigned long rate;
65 unsigned long u_volt; 67 unsigned long u_volt;
66 68
67 struct device_opp *dev_opp; 69 struct device_opp *dev_opp;
68 struct rcu_head head; 70 struct rcu_head rcu_head;
69}; 71};
70 72
71/** 73/**
@@ -76,7 +78,8 @@ struct dev_pm_opp {
76 * RCU usage: nodes are not modified in the list of device_opp, 78 * RCU usage: nodes are not modified in the list of device_opp,
77 * however addition is possible and is secured by dev_opp_list_lock 79 * however addition is possible and is secured by dev_opp_list_lock
78 * @dev: device pointer 80 * @dev: device pointer
79 * @head: notifier head to notify the OPP availability changes. 81 * @srcu_head: notifier head to notify the OPP availability changes.
82 * @rcu_head: RCU callback head used for deferred freeing
80 * @opp_list: list of opps 83 * @opp_list: list of opps
81 * 84 *
82 * This is an internal data structure maintaining the link to opps attached to 85 * This is an internal data structure maintaining the link to opps attached to
@@ -87,7 +90,8 @@ struct device_opp {
87 struct list_head node; 90 struct list_head node;
88 91
89 struct device *dev; 92 struct device *dev;
90 struct srcu_notifier_head head; 93 struct srcu_notifier_head srcu_head;
94 struct rcu_head rcu_head;
91 struct list_head opp_list; 95 struct list_head opp_list;
92}; 96};
93 97
@@ -378,30 +382,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
378} 382}
379EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 383EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
380 384
381/** 385static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
382 * dev_pm_opp_add() - Add an OPP table from a table definitions 386 unsigned long u_volt, bool dynamic)
383 * @dev: device for which we do this operation
384 * @freq: Frequency in Hz for this OPP
385 * @u_volt: Voltage in uVolts for this OPP
386 *
387 * This function adds an opp definition to the opp list and returns status.
388 * The opp is made available by default and it can be controlled using
389 * dev_pm_opp_enable/disable functions.
390 *
391 * Locking: The internal device_opp and opp structures are RCU protected.
392 * Hence this function internally uses RCU updater strategy with mutex locks
393 * to keep the integrity of the internal data structures. Callers should ensure
394 * that this function is *NOT* called under RCU protection or in contexts where
395 * mutex cannot be locked.
396 *
397 * Return:
398 * 0: On success OR
399 * Duplicate OPPs (both freq and volt are same) and opp->available
400 * -EEXIST: Freq are same and volt are different OR
401 * Duplicate OPPs (both freq and volt are same) and !opp->available
402 * -ENOMEM: Memory allocation failure
403 */
404int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
405{ 387{
406 struct device_opp *dev_opp = NULL; 388 struct device_opp *dev_opp = NULL;
407 struct dev_pm_opp *opp, *new_opp; 389 struct dev_pm_opp *opp, *new_opp;
@@ -417,6 +399,13 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
417 /* Hold our list modification lock here */ 399 /* Hold our list modification lock here */
418 mutex_lock(&dev_opp_list_lock); 400 mutex_lock(&dev_opp_list_lock);
419 401
402 /* populate the opp table */
403 new_opp->dev_opp = dev_opp;
404 new_opp->rate = freq;
405 new_opp->u_volt = u_volt;
406 new_opp->available = true;
407 new_opp->dynamic = dynamic;
408
420 /* Check for existing list for 'dev' */ 409 /* Check for existing list for 'dev' */
421 dev_opp = find_device_opp(dev); 410 dev_opp = find_device_opp(dev);
422 if (IS_ERR(dev_opp)) { 411 if (IS_ERR(dev_opp)) {
@@ -436,19 +425,15 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
436 } 425 }
437 426
438 dev_opp->dev = dev; 427 dev_opp->dev = dev;
439 srcu_init_notifier_head(&dev_opp->head); 428 srcu_init_notifier_head(&dev_opp->srcu_head);
440 INIT_LIST_HEAD(&dev_opp->opp_list); 429 INIT_LIST_HEAD(&dev_opp->opp_list);
441 430
442 /* Secure the device list modification */ 431 /* Secure the device list modification */
443 list_add_rcu(&dev_opp->node, &dev_opp_list); 432 list_add_rcu(&dev_opp->node, &dev_opp_list);
433 head = &dev_opp->opp_list;
434 goto list_add;
444 } 435 }
445 436
446 /* populate the opp table */
447 new_opp->dev_opp = dev_opp;
448 new_opp->rate = freq;
449 new_opp->u_volt = u_volt;
450 new_opp->available = true;
451
452 /* 437 /*
453 * Insert new OPP in order of increasing frequency 438 * Insert new OPP in order of increasing frequency
454 * and discard if already present 439 * and discard if already present
@@ -474,6 +459,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
474 return ret; 459 return ret;
475 } 460 }
476 461
462list_add:
477 list_add_rcu(&new_opp->node, head); 463 list_add_rcu(&new_opp->node, head);
478 mutex_unlock(&dev_opp_list_lock); 464 mutex_unlock(&dev_opp_list_lock);
479 465
@@ -481,11 +467,109 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
481 * Notify the changes in the availability of the operable 467 * Notify the changes in the availability of the operable
482 * frequency/voltage list. 468 * frequency/voltage list.
483 */ 469 */
484 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); 470 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
485 return 0; 471 return 0;
486} 472}
473
474/**
475 * dev_pm_opp_add() - Add an OPP table from a table definitions
476 * @dev: device for which we do this operation
477 * @freq: Frequency in Hz for this OPP
478 * @u_volt: Voltage in uVolts for this OPP
479 *
480 * This function adds an opp definition to the opp list and returns status.
481 * The opp is made available by default and it can be controlled using
482 * dev_pm_opp_enable/disable functions.
483 *
484 * Locking: The internal device_opp and opp structures are RCU protected.
485 * Hence this function internally uses RCU updater strategy with mutex locks
486 * to keep the integrity of the internal data structures. Callers should ensure
487 * that this function is *NOT* called under RCU protection or in contexts where
488 * mutex cannot be locked.
489 *
490 * Return:
491 * 0: On success OR
492 * Duplicate OPPs (both freq and volt are same) and opp->available
493 * -EEXIST: Freq are same and volt are different OR
494 * Duplicate OPPs (both freq and volt are same) and !opp->available
495 * -ENOMEM: Memory allocation failure
496 */
497int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
498{
499 return dev_pm_opp_add_dynamic(dev, freq, u_volt, true);
500}
487EXPORT_SYMBOL_GPL(dev_pm_opp_add); 501EXPORT_SYMBOL_GPL(dev_pm_opp_add);
488 502
503static void kfree_opp_rcu(struct rcu_head *head)
504{
505 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
506
507 kfree_rcu(opp, rcu_head);
508}
509
510static void kfree_device_rcu(struct rcu_head *head)
511{
512 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
513
514 kfree(device_opp);
515}
516
517void __dev_pm_opp_remove(struct device_opp *dev_opp, struct dev_pm_opp *opp)
518{
519 /*
520 * Notify the changes in the availability of the operable
521 * frequency/voltage list.
522 */
523 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
524 list_del_rcu(&opp->node);
525 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
526
527 if (list_empty(&dev_opp->opp_list)) {
528 list_del_rcu(&dev_opp->node);
529 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
530 kfree_device_rcu);
531 }
532}
533
534/**
535 * dev_pm_opp_remove() - Remove an OPP from OPP list
536 * @dev: device for which we do this operation
537 * @freq: OPP to remove with matching 'freq'
538 *
539 * This function removes an opp from the opp list.
540 */
541void dev_pm_opp_remove(struct device *dev, unsigned long freq)
542{
543 struct dev_pm_opp *opp;
544 struct device_opp *dev_opp;
545 bool found = false;
546
547 /* Hold our list modification lock here */
548 mutex_lock(&dev_opp_list_lock);
549
550 dev_opp = find_device_opp(dev);
551 if (IS_ERR(dev_opp))
552 goto unlock;
553
554 list_for_each_entry(opp, &dev_opp->opp_list, node) {
555 if (opp->rate == freq) {
556 found = true;
557 break;
558 }
559 }
560
561 if (!found) {
562 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
563 __func__, freq);
564 goto unlock;
565 }
566
567 __dev_pm_opp_remove(dev_opp, opp);
568unlock:
569 mutex_unlock(&dev_opp_list_lock);
570}
571EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
572
489/** 573/**
490 * opp_set_availability() - helper to set the availability of an opp 574 * opp_set_availability() - helper to set the availability of an opp
491 * @dev: device for which we do this operation 575 * @dev: device for which we do this operation
@@ -557,14 +641,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
557 641
558 list_replace_rcu(&opp->node, &new_opp->node); 642 list_replace_rcu(&opp->node, &new_opp->node);
559 mutex_unlock(&dev_opp_list_lock); 643 mutex_unlock(&dev_opp_list_lock);
560 kfree_rcu(opp, head); 644 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
561 645
562 /* Notify the change of the OPP availability */ 646 /* Notify the change of the OPP availability */
563 if (availability_req) 647 if (availability_req)
564 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, 648 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
565 new_opp); 649 new_opp);
566 else 650 else
567 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, 651 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
568 new_opp); 652 new_opp);
569 653
570 return 0; 654 return 0;
@@ -629,7 +713,7 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
629 if (IS_ERR(dev_opp)) 713 if (IS_ERR(dev_opp))
630 return ERR_CAST(dev_opp); /* matching type */ 714 return ERR_CAST(dev_opp); /* matching type */
631 715
632 return &dev_opp->head; 716 return &dev_opp->srcu_head;
633} 717}
634 718
635#ifdef CONFIG_OF 719#ifdef CONFIG_OF
@@ -666,7 +750,7 @@ int of_init_opp_table(struct device *dev)
666 unsigned long freq = be32_to_cpup(val++) * 1000; 750 unsigned long freq = be32_to_cpup(val++) * 1000;
667 unsigned long volt = be32_to_cpup(val++); 751 unsigned long volt = be32_to_cpup(val++);
668 752
669 if (dev_pm_opp_add(dev, freq, volt)) 753 if (dev_pm_opp_add_dynamic(dev, freq, volt, false))
670 dev_warn(dev, "%s: Failed to add OPP %ld\n", 754 dev_warn(dev, "%s: Failed to add OPP %ld\n",
671 __func__, freq); 755 __func__, freq);
672 nr -= 2; 756 nr -= 2;
@@ -675,4 +759,34 @@ int of_init_opp_table(struct device *dev)
675 return 0; 759 return 0;
676} 760}
677EXPORT_SYMBOL_GPL(of_init_opp_table); 761EXPORT_SYMBOL_GPL(of_init_opp_table);
762
763/**
764 * of_free_opp_table() - Free OPP table entries created from static DT entries
765 * @dev: device pointer used to lookup device OPPs.
766 *
767 * Free OPPs created using static entries present in DT.
768 */
769void of_free_opp_table(struct device *dev)
770{
771 struct device_opp *dev_opp = find_device_opp(dev);
772 struct dev_pm_opp *opp, *tmp;
773
774 /* Check for existing list for 'dev' */
775 dev_opp = find_device_opp(dev);
776 if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
777 PTR_ERR(dev_opp)))
778 return;
779
780 /* Hold our list modification lock here */
781 mutex_lock(&dev_opp_list_lock);
782
783 /* Free static OPPs */
784 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
785 if (!opp->dynamic)
786 __dev_pm_opp_remove(dev_opp, opp);
787 }
788
789 mutex_unlock(&dev_opp_list_lock);
790}
791EXPORT_SYMBOL_GPL(of_free_opp_table);
678#endif 792#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a21223d95926..b6b8a273c5da 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -9,7 +9,7 @@ static inline void device_pm_init_common(struct device *dev)
9 } 9 }
10} 10}
11 11
12#ifdef CONFIG_PM_RUNTIME 12#ifdef CONFIG_PM
13 13
14static inline void pm_runtime_early_init(struct device *dev) 14static inline void pm_runtime_early_init(struct device *dev)
15{ 15{
@@ -20,7 +20,21 @@ static inline void pm_runtime_early_init(struct device *dev)
20extern void pm_runtime_init(struct device *dev); 20extern void pm_runtime_init(struct device *dev);
21extern void pm_runtime_remove(struct device *dev); 21extern void pm_runtime_remove(struct device *dev);
22 22
23#else /* !CONFIG_PM_RUNTIME */ 23/*
24 * sysfs.c
25 */
26
27extern int dpm_sysfs_add(struct device *dev);
28extern void dpm_sysfs_remove(struct device *dev);
29extern void rpm_sysfs_remove(struct device *dev);
30extern int wakeup_sysfs_add(struct device *dev);
31extern void wakeup_sysfs_remove(struct device *dev);
32extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
33extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
34extern int pm_qos_sysfs_add_flags(struct device *dev);
35extern void pm_qos_sysfs_remove_flags(struct device *dev);
36
37#else /* CONFIG_PM */
24 38
25static inline void pm_runtime_early_init(struct device *dev) 39static inline void pm_runtime_early_init(struct device *dev)
26{ 40{
@@ -30,7 +44,15 @@ static inline void pm_runtime_early_init(struct device *dev)
30static inline void pm_runtime_init(struct device *dev) {} 44static inline void pm_runtime_init(struct device *dev) {}
31static inline void pm_runtime_remove(struct device *dev) {} 45static inline void pm_runtime_remove(struct device *dev) {}
32 46
33#endif /* !CONFIG_PM_RUNTIME */ 47static inline int dpm_sysfs_add(struct device *dev) { return 0; }
48static inline void dpm_sysfs_remove(struct device *dev) {}
49static inline void rpm_sysfs_remove(struct device *dev) {}
50static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
51static inline void wakeup_sysfs_remove(struct device *dev) {}
52static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
53static inline void pm_qos_sysfs_remove(struct device *dev) {}
54
55#endif
34 56
35#ifdef CONFIG_PM_SLEEP 57#ifdef CONFIG_PM_SLEEP
36 58
@@ -77,31 +99,3 @@ static inline void device_pm_init(struct device *dev)
77 device_pm_sleep_init(dev); 99 device_pm_sleep_init(dev);
78 pm_runtime_init(dev); 100 pm_runtime_init(dev);
79} 101}
80
81#ifdef CONFIG_PM
82
83/*
84 * sysfs.c
85 */
86
87extern int dpm_sysfs_add(struct device *dev);
88extern void dpm_sysfs_remove(struct device *dev);
89extern void rpm_sysfs_remove(struct device *dev);
90extern int wakeup_sysfs_add(struct device *dev);
91extern void wakeup_sysfs_remove(struct device *dev);
92extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
93extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
94extern int pm_qos_sysfs_add_flags(struct device *dev);
95extern void pm_qos_sysfs_remove_flags(struct device *dev);
96
97#else /* CONFIG_PM */
98
99static inline int dpm_sysfs_add(struct device *dev) { return 0; }
100static inline void dpm_sysfs_remove(struct device *dev) {}
101static inline void rpm_sysfs_remove(struct device *dev) {}
102static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
103static inline void wakeup_sysfs_remove(struct device *dev) {}
104static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
105static inline void pm_qos_sysfs_remove(struct device *dev) {}
106
107#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 36b9eb4862cb..a8fe4c1a8d07 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -599,7 +599,6 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
599} 599}
600EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 600EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
601 601
602#ifdef CONFIG_PM_RUNTIME
603static void __dev_pm_qos_drop_user_request(struct device *dev, 602static void __dev_pm_qos_drop_user_request(struct device *dev,
604 enum dev_pm_qos_req_type type) 603 enum dev_pm_qos_req_type type)
605{ 604{
@@ -880,7 +879,3 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
880 mutex_unlock(&dev_pm_qos_mtx); 879 mutex_unlock(&dev_pm_qos_mtx);
881 return ret; 880 return ret;
882} 881}
883#else /* !CONFIG_PM_RUNTIME */
884static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
885static void __dev_pm_qos_hide_flags(struct device *dev) {}
886#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 67c7938e430b..5070c4fe8542 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,43 +13,38 @@
13#include <trace/events/rpm.h> 13#include <trace/events/rpm.h>
14#include "power.h" 14#include "power.h"
15 15
16#define RPM_GET_CALLBACK(dev, cb) \ 16typedef int (*pm_callback_t)(struct device *);
17({ \
18 int (*__rpm_cb)(struct device *__d); \
19 \
20 if (dev->pm_domain) \
21 __rpm_cb = dev->pm_domain->ops.cb; \
22 else if (dev->type && dev->type->pm) \
23 __rpm_cb = dev->type->pm->cb; \
24 else if (dev->class && dev->class->pm) \
25 __rpm_cb = dev->class->pm->cb; \
26 else if (dev->bus && dev->bus->pm) \
27 __rpm_cb = dev->bus->pm->cb; \
28 else \
29 __rpm_cb = NULL; \
30 \
31 if (!__rpm_cb && dev->driver && dev->driver->pm) \
32 __rpm_cb = dev->driver->pm->cb; \
33 \
34 __rpm_cb; \
35})
36
37static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
38{
39 return RPM_GET_CALLBACK(dev, runtime_suspend);
40}
41 17
42static int (*rpm_get_resume_cb(struct device *dev))(struct device *) 18static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
43{ 19{
44 return RPM_GET_CALLBACK(dev, runtime_resume); 20 pm_callback_t cb;
45} 21 const struct dev_pm_ops *ops;
22
23 if (dev->pm_domain)
24 ops = &dev->pm_domain->ops;
25 else if (dev->type && dev->type->pm)
26 ops = dev->type->pm;
27 else if (dev->class && dev->class->pm)
28 ops = dev->class->pm;
29 else if (dev->bus && dev->bus->pm)
30 ops = dev->bus->pm;
31 else
32 ops = NULL;
46 33
47#ifdef CONFIG_PM_RUNTIME 34 if (ops)
48static int (*rpm_get_idle_cb(struct device *dev))(struct device *) 35 cb = *(pm_callback_t *)((void *)ops + cb_offset);
49{ 36 else
50 return RPM_GET_CALLBACK(dev, runtime_idle); 37 cb = NULL;
38
39 if (!cb && dev->driver && dev->driver->pm)
40 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
41
42 return cb;
51} 43}
52 44
45#define RPM_GET_CALLBACK(dev, callback) \
46 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
47
53static int rpm_resume(struct device *dev, int rpmflags); 48static int rpm_resume(struct device *dev, int rpmflags);
54static int rpm_suspend(struct device *dev, int rpmflags); 49static int rpm_suspend(struct device *dev, int rpmflags);
55 50
@@ -347,7 +342,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
347 342
348 dev->power.idle_notification = true; 343 dev->power.idle_notification = true;
349 344
350 callback = rpm_get_idle_cb(dev); 345 callback = RPM_GET_CALLBACK(dev, runtime_idle);
351 346
352 if (callback) 347 if (callback)
353 retval = __rpm_callback(callback, dev); 348 retval = __rpm_callback(callback, dev);
@@ -517,7 +512,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
517 512
518 __update_runtime_status(dev, RPM_SUSPENDING); 513 __update_runtime_status(dev, RPM_SUSPENDING);
519 514
520 callback = rpm_get_suspend_cb(dev); 515 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
521 516
522 retval = rpm_callback(callback, dev); 517 retval = rpm_callback(callback, dev);
523 if (retval) 518 if (retval)
@@ -737,7 +732,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
737 732
738 __update_runtime_status(dev, RPM_RESUMING); 733 __update_runtime_status(dev, RPM_RESUMING);
739 734
740 callback = rpm_get_resume_cb(dev); 735 callback = RPM_GET_CALLBACK(dev, runtime_resume);
741 736
742 retval = rpm_callback(callback, dev); 737 retval = rpm_callback(callback, dev);
743 if (retval) { 738 if (retval) {
@@ -1402,7 +1397,6 @@ void pm_runtime_remove(struct device *dev)
1402 if (dev->power.irq_safe && dev->parent) 1397 if (dev->power.irq_safe && dev->parent)
1403 pm_runtime_put(dev->parent); 1398 pm_runtime_put(dev->parent);
1404} 1399}
1405#endif
1406 1400
1407/** 1401/**
1408 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1402 * pm_runtime_force_suspend - Force a device into suspend state if needed.
@@ -1422,16 +1416,10 @@ int pm_runtime_force_suspend(struct device *dev)
1422 int ret = 0; 1416 int ret = 0;
1423 1417
1424 pm_runtime_disable(dev); 1418 pm_runtime_disable(dev);
1425
1426 /*
1427 * Note that pm_runtime_status_suspended() returns false while
1428 * !CONFIG_PM_RUNTIME, which means the device will be put into low
1429 * power state.
1430 */
1431 if (pm_runtime_status_suspended(dev)) 1419 if (pm_runtime_status_suspended(dev))
1432 return 0; 1420 return 0;
1433 1421
1434 callback = rpm_get_suspend_cb(dev); 1422 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1435 1423
1436 if (!callback) { 1424 if (!callback) {
1437 ret = -ENOSYS; 1425 ret = -ENOSYS;
@@ -1467,7 +1455,7 @@ int pm_runtime_force_resume(struct device *dev)
1467 int (*callback)(struct device *); 1455 int (*callback)(struct device *);
1468 int ret = 0; 1456 int ret = 0;
1469 1457
1470 callback = rpm_get_resume_cb(dev); 1458 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1471 1459
1472 if (!callback) { 1460 if (!callback) {
1473 ret = -ENOSYS; 1461 ret = -ENOSYS;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9d26ed11bf4..d2be3f9c211c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -95,7 +95,6 @@
95const char power_group_name[] = "power"; 95const char power_group_name[] = "power";
96EXPORT_SYMBOL_GPL(power_group_name); 96EXPORT_SYMBOL_GPL(power_group_name);
97 97
98#ifdef CONFIG_PM_RUNTIME
99static const char ctrl_auto[] = "auto"; 98static const char ctrl_auto[] = "auto";
100static const char ctrl_on[] = "on"; 99static const char ctrl_on[] = "on";
101 100
@@ -330,7 +329,6 @@ static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
330 329
331static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, 330static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
332 pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store); 331 pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
333#endif /* CONFIG_PM_RUNTIME */
334 332
335#ifdef CONFIG_PM_SLEEP 333#ifdef CONFIG_PM_SLEEP
336static const char _enabled[] = "enabled"; 334static const char _enabled[] = "enabled";
@@ -531,8 +529,6 @@ static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
531#endif /* CONFIG_PM_SLEEP */ 529#endif /* CONFIG_PM_SLEEP */
532 530
533#ifdef CONFIG_PM_ADVANCED_DEBUG 531#ifdef CONFIG_PM_ADVANCED_DEBUG
534#ifdef CONFIG_PM_RUNTIME
535
536static ssize_t rtpm_usagecount_show(struct device *dev, 532static ssize_t rtpm_usagecount_show(struct device *dev,
537 struct device_attribute *attr, char *buf) 533 struct device_attribute *attr, char *buf)
538{ 534{
@@ -562,10 +558,7 @@ static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
562static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); 558static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
563static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); 559static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
564 560
565#endif
566
567#ifdef CONFIG_PM_SLEEP 561#ifdef CONFIG_PM_SLEEP
568
569static ssize_t async_show(struct device *dev, struct device_attribute *attr, 562static ssize_t async_show(struct device *dev, struct device_attribute *attr,
570 char *buf) 563 char *buf)
571{ 564{
@@ -595,7 +588,7 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
595 588
596static DEVICE_ATTR(async, 0644, async_show, async_store); 589static DEVICE_ATTR(async, 0644, async_show, async_store);
597 590
598#endif 591#endif /* CONFIG_PM_SLEEP */
599#endif /* CONFIG_PM_ADVANCED_DEBUG */ 592#endif /* CONFIG_PM_ADVANCED_DEBUG */
600 593
601static struct attribute *power_attrs[] = { 594static struct attribute *power_attrs[] = {
@@ -603,12 +596,10 @@ static struct attribute *power_attrs[] = {
603#ifdef CONFIG_PM_SLEEP 596#ifdef CONFIG_PM_SLEEP
604 &dev_attr_async.attr, 597 &dev_attr_async.attr,
605#endif 598#endif
606#ifdef CONFIG_PM_RUNTIME
607 &dev_attr_runtime_status.attr, 599 &dev_attr_runtime_status.attr,
608 &dev_attr_runtime_usage.attr, 600 &dev_attr_runtime_usage.attr,
609 &dev_attr_runtime_active_kids.attr, 601 &dev_attr_runtime_active_kids.attr,
610 &dev_attr_runtime_enabled.attr, 602 &dev_attr_runtime_enabled.attr,
611#endif
612#endif /* CONFIG_PM_ADVANCED_DEBUG */ 603#endif /* CONFIG_PM_ADVANCED_DEBUG */
613 NULL, 604 NULL,
614}; 605};
@@ -640,7 +631,6 @@ static struct attribute_group pm_wakeup_attr_group = {
640}; 631};
641 632
642static struct attribute *runtime_attrs[] = { 633static struct attribute *runtime_attrs[] = {
643#ifdef CONFIG_PM_RUNTIME
644#ifndef CONFIG_PM_ADVANCED_DEBUG 634#ifndef CONFIG_PM_ADVANCED_DEBUG
645 &dev_attr_runtime_status.attr, 635 &dev_attr_runtime_status.attr,
646#endif 636#endif
@@ -648,7 +638,6 @@ static struct attribute *runtime_attrs[] = {
648 &dev_attr_runtime_suspended_time.attr, 638 &dev_attr_runtime_suspended_time.attr,
649 &dev_attr_runtime_active_time.attr, 639 &dev_attr_runtime_active_time.attr,
650 &dev_attr_autosuspend_delay_ms.attr, 640 &dev_attr_autosuspend_delay_ms.attr,
651#endif /* CONFIG_PM_RUNTIME */
652 NULL, 641 NULL,
653}; 642};
654static struct attribute_group pm_runtime_attr_group = { 643static struct attribute_group pm_runtime_attr_group = {
@@ -657,9 +646,7 @@ static struct attribute_group pm_runtime_attr_group = {
657}; 646};
658 647
659static struct attribute *pm_qos_resume_latency_attrs[] = { 648static struct attribute *pm_qos_resume_latency_attrs[] = {
660#ifdef CONFIG_PM_RUNTIME
661 &dev_attr_pm_qos_resume_latency_us.attr, 649 &dev_attr_pm_qos_resume_latency_us.attr,
662#endif /* CONFIG_PM_RUNTIME */
663 NULL, 650 NULL,
664}; 651};
665static struct attribute_group pm_qos_resume_latency_attr_group = { 652static struct attribute_group pm_qos_resume_latency_attr_group = {
@@ -668,9 +655,7 @@ static struct attribute_group pm_qos_resume_latency_attr_group = {
668}; 655};
669 656
670static struct attribute *pm_qos_latency_tolerance_attrs[] = { 657static struct attribute *pm_qos_latency_tolerance_attrs[] = {
671#ifdef CONFIG_PM_RUNTIME
672 &dev_attr_pm_qos_latency_tolerance_us.attr, 658 &dev_attr_pm_qos_latency_tolerance_us.attr,
673#endif /* CONFIG_PM_RUNTIME */
674 NULL, 659 NULL,
675}; 660};
676static struct attribute_group pm_qos_latency_tolerance_attr_group = { 661static struct attribute_group pm_qos_latency_tolerance_attr_group = {
@@ -679,10 +664,8 @@ static struct attribute_group pm_qos_latency_tolerance_attr_group = {
679}; 664};
680 665
681static struct attribute *pm_qos_flags_attrs[] = { 666static struct attribute *pm_qos_flags_attrs[] = {
682#ifdef CONFIG_PM_RUNTIME
683 &dev_attr_pm_qos_no_power_off.attr, 667 &dev_attr_pm_qos_no_power_off.attr,
684 &dev_attr_pm_qos_remote_wakeup.attr, 668 &dev_attr_pm_qos_remote_wakeup.attr,
685#endif /* CONFIG_PM_RUNTIME */
686 NULL, 669 NULL,
687}; 670};
688static struct attribute_group pm_qos_flags_attr_group = { 671static struct attribute_group pm_qos_flags_attr_group = {
diff --git a/drivers/base/property.c b/drivers/base/property.c
new file mode 100644
index 000000000000..c45845874d4f
--- /dev/null
+++ b/drivers/base/property.c
@@ -0,0 +1,431 @@
1/*
2 * property.c - Unified device property interface.
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/property.h>
14#include <linux/export.h>
15#include <linux/acpi.h>
16#include <linux/of.h>
17
18/**
19 * device_property_present - check if a property of a device is present
20 * @dev: Device whose property is being checked
21 * @propname: Name of the property
22 *
23 * Check if property @propname is present in the device firmware description.
24 */
25bool device_property_present(struct device *dev, const char *propname)
26{
27 if (IS_ENABLED(CONFIG_OF) && dev->of_node)
28 return of_property_read_bool(dev->of_node, propname);
29
30 return !acpi_dev_prop_get(ACPI_COMPANION(dev), propname, NULL);
31}
32EXPORT_SYMBOL_GPL(device_property_present);
33
34/**
35 * fwnode_property_present - check if a property of a firmware node is present
36 * @fwnode: Firmware node whose property to check
37 * @propname: Name of the property
38 */
39bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
40{
41 if (is_of_node(fwnode))
42 return of_property_read_bool(of_node(fwnode), propname);
43 else if (is_acpi_node(fwnode))
44 return !acpi_dev_prop_get(acpi_node(fwnode), propname, NULL);
45
46 return false;
47}
48EXPORT_SYMBOL_GPL(fwnode_property_present);
49
50#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
51 (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
52 : of_property_count_elems_of_size((node), (propname), sizeof(type))
53
54#define DEV_PROP_READ_ARRAY(_dev_, _propname_, _type_, _proptype_, _val_, _nval_) \
55 IS_ENABLED(CONFIG_OF) && _dev_->of_node ? \
56 (OF_DEV_PROP_READ_ARRAY(_dev_->of_node, _propname_, _type_, \
57 _val_, _nval_)) : \
58 acpi_dev_prop_read(ACPI_COMPANION(_dev_), _propname_, \
59 _proptype_, _val_, _nval_)
60
61/**
62 * device_property_read_u8_array - return a u8 array property of a device
63 * @dev: Device to get the property of
64 * @propname: Name of the property
65 * @val: The values are stored here
66 * @nval: Size of the @val array
67 *
68 * Function reads an array of u8 properties with @propname from the device
69 * firmware description and stores them to @val if found.
70 *
71 * Return: %0 if the property was found (success),
72 * %-EINVAL if given arguments are not valid,
73 * %-ENODATA if the property does not have a value,
74 * %-EPROTO if the property is not an array of numbers,
75 * %-EOVERFLOW if the size of the property is not as expected.
76 */
77int device_property_read_u8_array(struct device *dev, const char *propname,
78 u8 *val, size_t nval)
79{
80 return DEV_PROP_READ_ARRAY(dev, propname, u8, DEV_PROP_U8, val, nval);
81}
82EXPORT_SYMBOL_GPL(device_property_read_u8_array);
83
84/**
85 * device_property_read_u16_array - return a u16 array property of a device
86 * @dev: Device to get the property of
87 * @propname: Name of the property
88 * @val: The values are stored here
89 * @nval: Size of the @val array
90 *
91 * Function reads an array of u16 properties with @propname from the device
92 * firmware description and stores them to @val if found.
93 *
94 * Return: %0 if the property was found (success),
95 * %-EINVAL if given arguments are not valid,
96 * %-ENODATA if the property does not have a value,
97 * %-EPROTO if the property is not an array of numbers,
98 * %-EOVERFLOW if the size of the property is not as expected.
99 */
100int device_property_read_u16_array(struct device *dev, const char *propname,
101 u16 *val, size_t nval)
102{
103 return DEV_PROP_READ_ARRAY(dev, propname, u16, DEV_PROP_U16, val, nval);
104}
105EXPORT_SYMBOL_GPL(device_property_read_u16_array);
106
107/**
108 * device_property_read_u32_array - return a u32 array property of a device
109 * @dev: Device to get the property of
110 * @propname: Name of the property
111 * @val: The values are stored here
112 * @nval: Size of the @val array
113 *
114 * Function reads an array of u32 properties with @propname from the device
115 * firmware description and stores them to @val if found.
116 *
117 * Return: %0 if the property was found (success),
118 * %-EINVAL if given arguments are not valid,
119 * %-ENODATA if the property does not have a value,
120 * %-EPROTO if the property is not an array of numbers,
121 * %-EOVERFLOW if the size of the property is not as expected.
122 */
123int device_property_read_u32_array(struct device *dev, const char *propname,
124 u32 *val, size_t nval)
125{
126 return DEV_PROP_READ_ARRAY(dev, propname, u32, DEV_PROP_U32, val, nval);
127}
128EXPORT_SYMBOL_GPL(device_property_read_u32_array);
129
130/**
131 * device_property_read_u64_array - return a u64 array property of a device
132 * @dev: Device to get the property of
133 * @propname: Name of the property
134 * @val: The values are stored here
135 * @nval: Size of the @val array
136 *
137 * Function reads an array of u64 properties with @propname from the device
138 * firmware description and stores them to @val if found.
139 *
140 * Return: %0 if the property was found (success),
141 * %-EINVAL if given arguments are not valid,
142 * %-ENODATA if the property does not have a value,
143 * %-EPROTO if the property is not an array of numbers,
144 * %-EOVERFLOW if the size of the property is not as expected.
145 */
146int device_property_read_u64_array(struct device *dev, const char *propname,
147 u64 *val, size_t nval)
148{
149 return DEV_PROP_READ_ARRAY(dev, propname, u64, DEV_PROP_U64, val, nval);
150}
151EXPORT_SYMBOL_GPL(device_property_read_u64_array);
152
153/**
154 * device_property_read_string_array - return a string array property of device
155 * @dev: Device to get the property of
156 * @propname: Name of the property
157 * @val: The values are stored here
158 * @nval: Size of the @val array
159 *
160 * Function reads an array of string properties with @propname from the device
161 * firmware description and stores them to @val if found.
162 *
163 * Return: %0 if the property was found (success),
164 * %-EINVAL if given arguments are not valid,
165 * %-ENODATA if the property does not have a value,
166 * %-EPROTO or %-EILSEQ if the property is not an array of strings,
167 * %-EOVERFLOW if the size of the property is not as expected.
168 */
169int device_property_read_string_array(struct device *dev, const char *propname,
170 const char **val, size_t nval)
171{
172 return IS_ENABLED(CONFIG_OF) && dev->of_node ?
173 of_property_read_string_array(dev->of_node, propname, val, nval) :
174 acpi_dev_prop_read(ACPI_COMPANION(dev), propname,
175 DEV_PROP_STRING, val, nval);
176}
177EXPORT_SYMBOL_GPL(device_property_read_string_array);
178
179/**
180 * device_property_read_string - return a string property of a device
181 * @dev: Device to get the property of
182 * @propname: Name of the property
183 * @val: The value is stored here
184 *
185 * Function reads property @propname from the device firmware description and
186 * stores the value into @val if found. The value is checked to be a string.
187 *
188 * Return: %0 if the property was found (success),
189 * %-EINVAL if given arguments are not valid,
190 * %-ENODATA if the property does not have a value,
191 * %-EPROTO or %-EILSEQ if the property type is not a string.
192 */
193int device_property_read_string(struct device *dev, const char *propname,
194 const char **val)
195{
196 return IS_ENABLED(CONFIG_OF) && dev->of_node ?
197 of_property_read_string(dev->of_node, propname, val) :
198 acpi_dev_prop_read(ACPI_COMPANION(dev), propname,
199 DEV_PROP_STRING, val, 1);
200}
201EXPORT_SYMBOL_GPL(device_property_read_string);
202
203#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \
204({ \
205 int _ret_; \
206 if (is_of_node(_fwnode_)) \
207 _ret_ = OF_DEV_PROP_READ_ARRAY(of_node(_fwnode_), _propname_, \
208 _type_, _val_, _nval_); \
209 else if (is_acpi_node(_fwnode_)) \
210 _ret_ = acpi_dev_prop_read(acpi_node(_fwnode_), _propname_, \
211 _proptype_, _val_, _nval_); \
212 else \
213 _ret_ = -ENXIO; \
214 _ret_; \
215})
216
217/**
218 * fwnode_property_read_u8_array - return a u8 array property of firmware node
219 * @fwnode: Firmware node to get the property of
220 * @propname: Name of the property
221 * @val: The values are stored here
222 * @nval: Size of the @val array
223 *
224 * Read an array of u8 properties with @propname from @fwnode and stores them to
225 * @val if found.
226 *
227 * Return: %0 if the property was found (success),
228 * %-EINVAL if given arguments are not valid,
229 * %-ENODATA if the property does not have a value,
230 * %-EPROTO if the property is not an array of numbers,
231 * %-EOVERFLOW if the size of the property is not as expected,
232 * %-ENXIO if no suitable firmware interface is present.
233 */
234int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
235 const char *propname, u8 *val, size_t nval)
236{
237 return FWNODE_PROP_READ_ARRAY(fwnode, propname, u8, DEV_PROP_U8,
238 val, nval);
239}
240EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
241
242/**
243 * fwnode_property_read_u16_array - return a u16 array property of firmware node
244 * @fwnode: Firmware node to get the property of
245 * @propname: Name of the property
246 * @val: The values are stored here
247 * @nval: Size of the @val array
248 *
249 * Read an array of u16 properties with @propname from @fwnode and store them to
250 * @val if found.
251 *
252 * Return: %0 if the property was found (success),
253 * %-EINVAL if given arguments are not valid,
254 * %-ENODATA if the property does not have a value,
255 * %-EPROTO if the property is not an array of numbers,
256 * %-EOVERFLOW if the size of the property is not as expected,
257 * %-ENXIO if no suitable firmware interface is present.
258 */
259int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
260 const char *propname, u16 *val, size_t nval)
261{
262 return FWNODE_PROP_READ_ARRAY(fwnode, propname, u16, DEV_PROP_U16,
263 val, nval);
264}
265EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
266
267/**
268 * fwnode_property_read_u32_array - return a u32 array property of firmware node
269 * @fwnode: Firmware node to get the property of
270 * @propname: Name of the property
271 * @val: The values are stored here
272 * @nval: Size of the @val array
273 *
274 * Read an array of u32 properties with @propname from @fwnode store them to
275 * @val if found.
276 *
277 * Return: %0 if the property was found (success),
278 * %-EINVAL if given arguments are not valid,
279 * %-ENODATA if the property does not have a value,
280 * %-EPROTO if the property is not an array of numbers,
281 * %-EOVERFLOW if the size of the property is not as expected,
282 * %-ENXIO if no suitable firmware interface is present.
283 */
284int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
285 const char *propname, u32 *val, size_t nval)
286{
287 return FWNODE_PROP_READ_ARRAY(fwnode, propname, u32, DEV_PROP_U32,
288 val, nval);
289}
290EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
291
292/**
293 * fwnode_property_read_u64_array - return a u64 array property firmware node
294 * @fwnode: Firmware node to get the property of
295 * @propname: Name of the property
296 * @val: The values are stored here
297 * @nval: Size of the @val array
298 *
299 * Read an array of u64 properties with @propname from @fwnode and store them to
300 * @val if found.
301 *
302 * Return: %0 if the property was found (success),
303 * %-EINVAL if given arguments are not valid,
304 * %-ENODATA if the property does not have a value,
305 * %-EPROTO if the property is not an array of numbers,
306 * %-EOVERFLOW if the size of the property is not as expected,
307 * %-ENXIO if no suitable firmware interface is present.
308 */
309int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
310 const char *propname, u64 *val, size_t nval)
311{
312 return FWNODE_PROP_READ_ARRAY(fwnode, propname, u64, DEV_PROP_U64,
313 val, nval);
314}
315EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
316
317/**
318 * fwnode_property_read_string_array - return string array property of a node
319 * @fwnode: Firmware node to get the property of
320 * @propname: Name of the property
321 * @val: The values are stored here
322 * @nval: Size of the @val array
323 *
324 * Read an string list property @propname from the given firmware node and store
325 * them to @val if found.
326 *
327 * Return: %0 if the property was found (success),
328 * %-EINVAL if given arguments are not valid,
329 * %-ENODATA if the property does not have a value,
330 * %-EPROTO if the property is not an array of strings,
331 * %-EOVERFLOW if the size of the property is not as expected,
332 * %-ENXIO if no suitable firmware interface is present.
333 */
334int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
335 const char *propname, const char **val,
336 size_t nval)
337{
338 if (is_of_node(fwnode))
339 return of_property_read_string_array(of_node(fwnode), propname,
340 val, nval);
341 else if (is_acpi_node(fwnode))
342 return acpi_dev_prop_read(acpi_node(fwnode), propname,
343 DEV_PROP_STRING, val, nval);
344
345 return -ENXIO;
346}
347EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
348
349/**
350 * fwnode_property_read_string - return a string property of a firmware node
351 * @fwnode: Firmware node to get the property of
352 * @propname: Name of the property
353 * @val: The value is stored here
354 *
355 * Read property @propname from the given firmware node and store the value into
356 * @val if found. The value is checked to be a string.
357 *
358 * Return: %0 if the property was found (success),
359 * %-EINVAL if given arguments are not valid,
360 * %-ENODATA if the property does not have a value,
361 * %-EPROTO or %-EILSEQ if the property is not a string,
362 * %-ENXIO if no suitable firmware interface is present.
363 */
364int fwnode_property_read_string(struct fwnode_handle *fwnode,
365 const char *propname, const char **val)
366{
367 if (is_of_node(fwnode))
368 return of_property_read_string(of_node(fwnode),propname, val);
369 else if (is_acpi_node(fwnode))
370 return acpi_dev_prop_read(acpi_node(fwnode), propname,
371 DEV_PROP_STRING, val, 1);
372
373 return -ENXIO;
374}
375EXPORT_SYMBOL_GPL(fwnode_property_read_string);
376
377/**
378 * device_get_next_child_node - Return the next child node handle for a device
379 * @dev: Device to find the next child node for.
380 * @child: Handle to one of the device's child nodes or a null handle.
381 */
382struct fwnode_handle *device_get_next_child_node(struct device *dev,
383 struct fwnode_handle *child)
384{
385 if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
386 struct device_node *node;
387
388 node = of_get_next_available_child(dev->of_node, of_node(child));
389 if (node)
390 return &node->fwnode;
391 } else if (IS_ENABLED(CONFIG_ACPI)) {
392 struct acpi_device *node;
393
394 node = acpi_get_next_child(dev, acpi_node(child));
395 if (node)
396 return acpi_fwnode_handle(node);
397 }
398 return NULL;
399}
400EXPORT_SYMBOL_GPL(device_get_next_child_node);
401
402/**
403 * fwnode_handle_put - Drop reference to a device node
404 * @fwnode: Pointer to the device node to drop the reference to.
405 *
406 * This has to be used when terminating device_for_each_child_node() iteration
407 * with break or return to prevent stale device node references from being left
408 * behind.
409 */
410void fwnode_handle_put(struct fwnode_handle *fwnode)
411{
412 if (is_of_node(fwnode))
413 of_node_put(of_node(fwnode));
414}
415EXPORT_SYMBOL_GPL(fwnode_handle_put);
416
417/**
418 * device_get_child_node_count - return the number of child nodes for device
419 * @dev: Device to cound the child nodes for
420 */
421unsigned int device_get_child_node_count(struct device *dev)
422{
423 struct fwnode_handle *child;
424 unsigned int count = 0;
425
426 device_for_each_child_node(dev, child)
427 count++;
428
429 return count;
430}
431EXPORT_SYMBOL_GPL(device_get_child_node_count);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 8a3f51f7b1b9..db9d00c36a3e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,12 +3,15 @@
3# subsystems should select the appropriate symbols. 3# subsystems should select the appropriate symbols.
4 4
5config REGMAP 5config REGMAP
6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ) 6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
7 select LZO_COMPRESS 7 select LZO_COMPRESS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select IRQ_DOMAIN if REGMAP_IRQ 9 select IRQ_DOMAIN if REGMAP_IRQ
10 bool 10 bool
11 11
12config REGMAP_AC97
13 tristate
14
12config REGMAP_I2C 15config REGMAP_I2C
13 tristate 16 tristate
14 depends on I2C 17 depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index a7c670b4123a..0a533653ef3b 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_REGMAP) += regmap.o regcache.o 1obj-$(CONFIG_REGMAP) += regmap.o regcache.o
2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o 2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o 3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
4obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o 5obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o 6obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
6obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o 7obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index d9762e41959b..0246f44ded74 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -10,9 +10,9 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/slab.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/slab.h>
16 16
17#include "internal.h" 17#include "internal.h"
18 18
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index e210a6d1406a..2d53f6f138e1 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -10,9 +10,9 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/slab.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/lzo.h> 14#include <linux/lzo.h>
15#include <linux/slab.h>
16 16
17#include "internal.h" 17#include "internal.h"
18 18
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index f3e8fe0cc650..d453a2c98ad0 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -10,11 +10,11 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/slab.h>
14#include <linux/device.h>
15#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/device.h>
16#include <linux/rbtree.h> 15#include <linux/rbtree.h>
17#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/slab.h>
18 18
19#include "internal.h" 19#include "internal.h"
20 20
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f1280dc356d0..f373c35f9e1d 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -10,12 +10,12 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/device.h>
16#include <trace/events/regmap.h>
17#include <linux/bsearch.h> 13#include <linux/bsearch.h>
14#include <linux/device.h>
15#include <linux/export.h>
16#include <linux/slab.h>
18#include <linux/sort.h> 17#include <linux/sort.h>
18#include <trace/events/regmap.h>
19 19
20#include "internal.h" 20#include "internal.h"
21 21
@@ -36,6 +36,23 @@ static int regcache_hw_init(struct regmap *map)
36 if (!map->num_reg_defaults_raw) 36 if (!map->num_reg_defaults_raw)
37 return -EINVAL; 37 return -EINVAL;
38 38
39 /* calculate the size of reg_defaults */
40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
41 if (!regmap_volatile(map, i * map->reg_stride))
42 count++;
43
44 /* all registers are volatile, so just bypass */
45 if (!count) {
46 map->cache_bypass = true;
47 return 0;
48 }
49
50 map->num_reg_defaults = count;
51 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
52 GFP_KERNEL);
53 if (!map->reg_defaults)
54 return -ENOMEM;
55
39 if (!map->reg_defaults_raw) { 56 if (!map->reg_defaults_raw) {
40 u32 cache_bypass = map->cache_bypass; 57 u32 cache_bypass = map->cache_bypass;
41 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); 58 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
@@ -43,40 +60,25 @@ static int regcache_hw_init(struct regmap *map)
43 /* Bypass the cache access till data read from HW*/ 60 /* Bypass the cache access till data read from HW*/
44 map->cache_bypass = 1; 61 map->cache_bypass = 1;
45 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 62 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
46 if (!tmp_buf) 63 if (!tmp_buf) {
47 return -EINVAL; 64 ret = -ENOMEM;
65 goto err_free;
66 }
48 ret = regmap_raw_read(map, 0, tmp_buf, 67 ret = regmap_raw_read(map, 0, tmp_buf,
49 map->num_reg_defaults_raw); 68 map->num_reg_defaults_raw);
50 map->cache_bypass = cache_bypass; 69 map->cache_bypass = cache_bypass;
51 if (ret < 0) { 70 if (ret < 0)
52 kfree(tmp_buf); 71 goto err_cache_free;
53 return ret; 72
54 }
55 map->reg_defaults_raw = tmp_buf; 73 map->reg_defaults_raw = tmp_buf;
56 map->cache_free = 1; 74 map->cache_free = 1;
57 } 75 }
58 76
59 /* calculate the size of reg_defaults */
60 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
61 val = regcache_get_val(map, map->reg_defaults_raw, i);
62 if (regmap_volatile(map, i * map->reg_stride))
63 continue;
64 count++;
65 }
66
67 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
68 GFP_KERNEL);
69 if (!map->reg_defaults) {
70 ret = -ENOMEM;
71 goto err_free;
72 }
73
74 /* fill the reg_defaults */ 77 /* fill the reg_defaults */
75 map->num_reg_defaults = count;
76 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 78 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 val = regcache_get_val(map, map->reg_defaults_raw, i);
78 if (regmap_volatile(map, i * map->reg_stride)) 79 if (regmap_volatile(map, i * map->reg_stride))
79 continue; 80 continue;
81 val = regcache_get_val(map, map->reg_defaults_raw, i);
80 map->reg_defaults[j].reg = i * map->reg_stride; 82 map->reg_defaults[j].reg = i * map->reg_stride;
81 map->reg_defaults[j].def = val; 83 map->reg_defaults[j].def = val;
82 j++; 84 j++;
@@ -84,9 +86,10 @@ static int regcache_hw_init(struct regmap *map)
84 86
85 return 0; 87 return 0;
86 88
89err_cache_free:
90 kfree(tmp_buf);
87err_free: 91err_free:
88 if (map->cache_free) 92 kfree(map->reg_defaults);
89 kfree(map->reg_defaults_raw);
90 93
91 return ret; 94 return ret;
92} 95}
@@ -150,6 +153,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
150 ret = regcache_hw_init(map); 153 ret = regcache_hw_init(map);
151 if (ret < 0) 154 if (ret < 0)
152 return ret; 155 return ret;
156 if (map->cache_bypass)
157 return 0;
153 } 158 }
154 159
155 if (!map->max_register) 160 if (!map->max_register)
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
new file mode 100644
index 000000000000..e4c45d2299c1
--- /dev/null
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -0,0 +1,114 @@
1/*
2 * Register map access API - AC'97 support
3 *
4 * Copyright 2013 Linaro Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/regmap.h>
25#include <linux/slab.h>
26
27#include <sound/ac97_codec.h>
28
29bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
30{
31 switch (reg) {
32 case AC97_RESET:
33 case AC97_POWERDOWN:
34 case AC97_INT_PAGING:
35 case AC97_EXTENDED_ID:
36 case AC97_EXTENDED_STATUS:
37 case AC97_EXTENDED_MID:
38 case AC97_EXTENDED_MSTATUS:
39 case AC97_GPIO_STATUS:
40 case AC97_MISC_AFE:
41 case AC97_VENDOR_ID1:
42 case AC97_VENDOR_ID2:
43 case AC97_CODEC_CLASS_REV:
44 case AC97_PCI_SVID:
45 case AC97_PCI_SID:
46 case AC97_FUNC_SELECT:
47 case AC97_FUNC_INFO:
48 case AC97_SENSE_INFO:
49 return true;
50 default:
51 return false;
52 }
53}
54EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
55
56static int regmap_ac97_reg_read(void *context, unsigned int reg,
57 unsigned int *val)
58{
59 struct snd_ac97 *ac97 = context;
60
61 *val = ac97->bus->ops->read(ac97, reg);
62
63 return 0;
64}
65
66static int regmap_ac97_reg_write(void *context, unsigned int reg,
67 unsigned int val)
68{
69 struct snd_ac97 *ac97 = context;
70
71 ac97->bus->ops->write(ac97, reg, val);
72
73 return 0;
74}
75
76static const struct regmap_bus ac97_regmap_bus = {
77 .reg_write = regmap_ac97_reg_write,
78 .reg_read = regmap_ac97_reg_read,
79};
80
81/**
82 * regmap_init_ac97(): Initialise AC'97 register map
83 *
84 * @ac97: Device that will be interacted with
85 * @config: Configuration for register map
86 *
87 * The return value will be an ERR_PTR() on error or a valid pointer to
88 * a struct regmap.
89 */
90struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
91 const struct regmap_config *config)
92{
93 return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
94}
95EXPORT_SYMBOL_GPL(regmap_init_ac97);
96
97/**
98 * devm_regmap_init_ac97(): Initialise AC'97 register map
99 *
100 * @ac97: Device that will be interacted with
101 * @config: Configuration for register map
102 *
103 * The return value will be an ERR_PTR() on error or a valid pointer
104 * to a struct regmap. The regmap will be automatically freed by the
105 * device management code.
106 */
107struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
108 const struct regmap_config *config)
109{
110 return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
111}
112EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
113
114MODULE_LICENSE("GPL v2");
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 900d4d3272d1..9a950022ff88 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -419,7 +419,7 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
419 return 0; 419 return 0;
420} 420}
421 421
422/* simple_positive(file->f_dentry) respectively debugfs_positive(), 422/* simple_positive(file->f_path.dentry) respectively debugfs_positive(),
423 * but neither is "reachable" from here. 423 * but neither is "reachable" from here.
424 * So we have our own inline version of it above. :-( */ 424 * So we have our own inline version of it above. :-( */
425static inline int debugfs_positive(struct dentry *dentry) 425static inline int debugfs_positive(struct dentry *dentry)
@@ -437,14 +437,14 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
437 437
438 /* Are we still linked, 438 /* Are we still linked,
439 * or has debugfs_remove() already been called? */ 439 * or has debugfs_remove() already been called? */
440 parent = file->f_dentry->d_parent; 440 parent = file->f_path.dentry->d_parent;
441 /* not sure if this can happen: */ 441 /* not sure if this can happen: */
442 if (!parent || !parent->d_inode) 442 if (!parent || !parent->d_inode)
443 goto out; 443 goto out;
444 /* serialize with d_delete() */ 444 /* serialize with d_delete() */
445 mutex_lock(&parent->d_inode->i_mutex); 445 mutex_lock(&parent->d_inode->i_mutex);
446 /* Make sure the object is still alive */ 446 /* Make sure the object is still alive */
447 if (debugfs_positive(file->f_dentry) 447 if (debugfs_positive(file->f_path.dentry)
448 && kref_get_unless_zero(kref)) 448 && kref_get_unless_zero(kref))
449 ret = 0; 449 ret = 0;
450 mutex_unlock(&parent->d_inode->i_mutex); 450 mutex_unlock(&parent->d_inode->i_mutex);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index a4cd6d691c63..0b4b2775600e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -329,7 +329,7 @@ INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
329(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) 329(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
330 330
331#define IS_READ_CAP_16(cdb) \ 331#define IS_READ_CAP_16(cdb) \
332((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) 332((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
333 333
334/* Request Sense Helper Macros */ 334/* Request Sense Helper Macros */
335#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ 335#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
@@ -2947,7 +2947,7 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2947 case READ_CAPACITY: 2947 case READ_CAPACITY:
2948 retcode = nvme_trans_read_capacity(ns, hdr, cmd); 2948 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2949 break; 2949 break;
2950 case SERVICE_ACTION_IN: 2950 case SERVICE_ACTION_IN_16:
2951 if (IS_READ_CAP_16(cmd)) 2951 if (IS_READ_CAP_16(cmd))
2952 retcode = nvme_trans_read_capacity(ns, hdr, cmd); 2952 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2953 else 2953 else
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c6a27d54ad62..1fb9e09fbbc5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -80,7 +80,7 @@ static int __virtblk_add_req(struct virtqueue *vq,
80{ 80{
81 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; 81 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
82 unsigned int num_out = 0, num_in = 0; 82 unsigned int num_out = 0, num_in = 0;
83 int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT; 83 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
84 84
85 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 85 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
86 sgs[num_out++] = &hdr; 86 sgs[num_out++] = &hdr;
@@ -91,19 +91,19 @@ static int __virtblk_add_req(struct virtqueue *vq,
91 * block, and before the normal inhdr we put the sense data and the 91 * block, and before the normal inhdr we put the sense data and the
92 * inhdr with additional status information. 92 * inhdr with additional status information.
93 */ 93 */
94 if (type == VIRTIO_BLK_T_SCSI_CMD) { 94 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
95 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); 95 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
96 sgs[num_out++] = &cmd; 96 sgs[num_out++] = &cmd;
97 } 97 }
98 98
99 if (have_data) { 99 if (have_data) {
100 if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT) 100 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
101 sgs[num_out++] = data_sg; 101 sgs[num_out++] = data_sg;
102 else 102 else
103 sgs[num_out + num_in++] = data_sg; 103 sgs[num_out + num_in++] = data_sg;
104 } 104 }
105 105
106 if (type == VIRTIO_BLK_T_SCSI_CMD) { 106 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
107 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); 107 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
108 sgs[num_out + num_in++] = &sense; 108 sgs[num_out + num_in++] = &sense;
109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); 109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
@@ -119,12 +119,13 @@ static int __virtblk_add_req(struct virtqueue *vq,
119static inline void virtblk_request_done(struct request *req) 119static inline void virtblk_request_done(struct request *req)
120{ 120{
121 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 121 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
122 struct virtio_blk *vblk = req->q->queuedata;
122 int error = virtblk_result(vbr); 123 int error = virtblk_result(vbr);
123 124
124 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { 125 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
125 req->resid_len = vbr->in_hdr.residual; 126 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
126 req->sense_len = vbr->in_hdr.sense_len; 127 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
127 req->errors = vbr->in_hdr.errors; 128 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
128 } else if (req->cmd_type == REQ_TYPE_SPECIAL) { 129 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
129 req->errors = (error != 0); 130 req->errors = (error != 0);
130 } 131 }
@@ -173,25 +174,25 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
173 174
174 vbr->req = req; 175 vbr->req = req;
175 if (req->cmd_flags & REQ_FLUSH) { 176 if (req->cmd_flags & REQ_FLUSH) {
176 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 177 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
177 vbr->out_hdr.sector = 0; 178 vbr->out_hdr.sector = 0;
178 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 179 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
179 } else { 180 } else {
180 switch (req->cmd_type) { 181 switch (req->cmd_type) {
181 case REQ_TYPE_FS: 182 case REQ_TYPE_FS:
182 vbr->out_hdr.type = 0; 183 vbr->out_hdr.type = 0;
183 vbr->out_hdr.sector = blk_rq_pos(vbr->req); 184 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
184 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 185 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
185 break; 186 break;
186 case REQ_TYPE_BLOCK_PC: 187 case REQ_TYPE_BLOCK_PC:
187 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 188 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
188 vbr->out_hdr.sector = 0; 189 vbr->out_hdr.sector = 0;
189 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 190 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
190 break; 191 break;
191 case REQ_TYPE_SPECIAL: 192 case REQ_TYPE_SPECIAL:
192 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; 193 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
193 vbr->out_hdr.sector = 0; 194 vbr->out_hdr.sector = 0;
194 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 195 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
195 break; 196 break;
196 default: 197 default:
197 /* We don't put anything else in the queue. */ 198 /* We don't put anything else in the queue. */
@@ -204,9 +205,9 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); 205 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
205 if (num) { 206 if (num) {
206 if (rq_data_dir(vbr->req) == WRITE) 207 if (rq_data_dir(vbr->req) == WRITE)
207 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 208 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
208 else 209 else
209 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 210 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
210 } 211 }
211 212
212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 213 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
@@ -331,7 +332,8 @@ static ssize_t virtblk_serial_show(struct device *dev,
331 332
332 return err; 333 return err;
333} 334}
334DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); 335
336static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
335 337
336static void virtblk_config_changed_work(struct work_struct *work) 338static void virtblk_config_changed_work(struct work_struct *work)
337{ 339{
@@ -476,7 +478,8 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
476 struct virtio_blk_config, wce, 478 struct virtio_blk_config, wce,
477 &writeback); 479 &writeback);
478 if (err) 480 if (err)
479 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); 481 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
482 virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
480 483
481 return writeback; 484 return writeback;
482} 485}
@@ -821,25 +824,34 @@ static const struct virtio_device_id id_table[] = {
821 { 0 }, 824 { 0 },
822}; 825};
823 826
824static unsigned int features[] = { 827static unsigned int features_legacy[] = {
825 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 828 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
826 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, 829 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
827 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 830 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
828 VIRTIO_BLK_F_MQ, 831 VIRTIO_BLK_F_MQ,
832}
833;
834static unsigned int features[] = {
835 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
836 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
837 VIRTIO_BLK_F_TOPOLOGY,
838 VIRTIO_BLK_F_MQ,
829}; 839};
830 840
831static struct virtio_driver virtio_blk = { 841static struct virtio_driver virtio_blk = {
832 .feature_table = features, 842 .feature_table = features,
833 .feature_table_size = ARRAY_SIZE(features), 843 .feature_table_size = ARRAY_SIZE(features),
834 .driver.name = KBUILD_MODNAME, 844 .feature_table_legacy = features_legacy,
835 .driver.owner = THIS_MODULE, 845 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
836 .id_table = id_table, 846 .driver.name = KBUILD_MODNAME,
837 .probe = virtblk_probe, 847 .driver.owner = THIS_MODULE,
838 .remove = virtblk_remove, 848 .id_table = id_table,
839 .config_changed = virtblk_config_changed, 849 .probe = virtblk_probe,
850 .remove = virtblk_remove,
851 .config_changed = virtblk_config_changed,
840#ifdef CONFIG_PM_SLEEP 852#ifdef CONFIG_PM_SLEEP
841 .freeze = virtblk_freeze, 853 .freeze = virtblk_freeze,
842 .restore = virtblk_restore, 854 .restore = virtblk_restore,
843#endif 855#endif
844}; 856};
845 857
@@ -871,8 +883,8 @@ out_destroy_workqueue:
871 883
872static void __exit fini(void) 884static void __exit fini(void)
873{ 885{
874 unregister_blkdev(major, "virtblk");
875 unregister_virtio_driver(&virtio_blk); 886 unregister_virtio_driver(&virtio_blk);
887 unregister_blkdev(major, "virtblk");
876 destroy_workqueue(virtblk_wq); 888 destroy_workqueue(virtblk_wq);
877} 889}
878module_init(init); 890module_init(init);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 603eb1be4f6a..b99729e36860 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -6,7 +6,7 @@ menu "Bus devices"
6 6
7config BRCMSTB_GISB_ARB 7config BRCMSTB_GISB_ARB
8 bool "Broadcom STB GISB bus arbiter" 8 bool "Broadcom STB GISB bus arbiter"
9 depends on ARM 9 depends on ARM || MIPS
10 help 10 help
11 Driver for the Broadcom Set Top Box System-on-a-chip internal bus 11 Driver for the Broadcom Set Top Box System-on-a-chip internal bus
12 arbiter. This driver provides timeout and target abort error handling 12 arbiter. This driver provides timeout and target abort error handling
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 7af78df241f2..860da40b78ef 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -16,17 +16,17 @@
16 16
17#include <linux/arm-cci.h> 17#include <linux/arm-cci.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/interrupt.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/of_address.h> 21#include <linux/of_address.h>
21#include <linux/of_irq.h> 22#include <linux/of_irq.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/perf_event.h>
23#include <linux/platform_device.h> 25#include <linux/platform_device.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <linux/spinlock.h> 27#include <linux/spinlock.h>
26 28
27#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
28#include <asm/irq_regs.h>
29#include <asm/pmu.h>
30#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
31 31
32#define DRIVER_NAME "CCI-400" 32#define DRIVER_NAME "CCI-400"
@@ -98,6 +98,8 @@ static unsigned long cci_ctrl_phys;
98 98
99#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) 99#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
100 100
101#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
102
101/* 103/*
102 * Instead of an event id to monitor CCI cycles, a dedicated counter is 104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
103 * provided. Use 0xff to represent CCI cycles and hope that no future revisions 105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
@@ -170,18 +172,29 @@ static char *const pmu_names[] = {
170 [CCI_REV_R1] = "CCI_400_r1", 172 [CCI_REV_R1] = "CCI_400_r1",
171}; 173};
172 174
173struct cci_pmu_drv_data { 175struct cci_pmu_hw_events {
176 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
177 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
178 raw_spinlock_t pmu_lock;
179};
180
181struct cci_pmu {
174 void __iomem *base; 182 void __iomem *base;
175 struct arm_pmu *cci_pmu; 183 struct pmu pmu;
176 int nr_irqs; 184 int nr_irqs;
177 int irqs[CCI_PMU_MAX_HW_EVENTS]; 185 int irqs[CCI_PMU_MAX_HW_EVENTS];
178 unsigned long active_irqs; 186 unsigned long active_irqs;
179 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
180 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
181 struct pmu_port_event_ranges *port_ranges; 187 struct pmu_port_event_ranges *port_ranges;
182 struct pmu_hw_events hw_events; 188 struct cci_pmu_hw_events hw_events;
189 struct platform_device *plat_device;
190 int num_events;
191 atomic_t active_events;
192 struct mutex reserve_mutex;
193 cpumask_t cpus;
183}; 194};
184static struct cci_pmu_drv_data *pmu; 195static struct cci_pmu *pmu;
196
197#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
185 198
186static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) 199static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
187{ 200{
@@ -252,7 +265,7 @@ static int pmu_validate_hw_event(u8 hw_event)
252 return -ENOENT; 265 return -ENOENT;
253} 266}
254 267
255static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) 268static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
256{ 269{
257 return CCI_PMU_CYCLE_CNTR_IDX <= idx && 270 return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
258 idx <= CCI_PMU_CNTR_LAST(cci_pmu); 271 idx <= CCI_PMU_CNTR_LAST(cci_pmu);
@@ -293,14 +306,9 @@ static u32 pmu_get_max_counters(void)
293 return n_cnts + 1; 306 return n_cnts + 1;
294} 307}
295 308
296static struct pmu_hw_events *pmu_get_hw_events(void) 309static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
297{
298 return &pmu->hw_events;
299}
300
301static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
302{ 310{
303 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 311 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
304 struct hw_perf_event *hw_event = &event->hw; 312 struct hw_perf_event *hw_event = &event->hw;
305 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; 313 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
306 int idx; 314 int idx;
@@ -336,7 +344,7 @@ static int pmu_map_event(struct perf_event *event)
336 return mapping; 344 return mapping;
337} 345}
338 346
339static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) 347static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
340{ 348{
341 int i; 349 int i;
342 struct platform_device *pmu_device = cci_pmu->plat_device; 350 struct platform_device *pmu_device = cci_pmu->plat_device;
@@ -371,17 +379,91 @@ static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
371 return 0; 379 return 0;
372} 380}
373 381
382static void pmu_free_irq(struct cci_pmu *cci_pmu)
383{
384 int i;
385
386 for (i = 0; i < pmu->nr_irqs; i++) {
387 if (!test_and_clear_bit(i, &pmu->active_irqs))
388 continue;
389
390 free_irq(pmu->irqs[i], cci_pmu);
391 }
392}
393
394static u32 pmu_read_counter(struct perf_event *event)
395{
396 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
397 struct hw_perf_event *hw_counter = &event->hw;
398 int idx = hw_counter->idx;
399 u32 value;
400
401 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
402 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
403 return 0;
404 }
405 value = pmu_read_register(idx, CCI_PMU_CNTR);
406
407 return value;
408}
409
410static void pmu_write_counter(struct perf_event *event, u32 value)
411{
412 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
413 struct hw_perf_event *hw_counter = &event->hw;
414 int idx = hw_counter->idx;
415
416 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
417 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
418 else
419 pmu_write_register(value, idx, CCI_PMU_CNTR);
420}
421
422static u64 pmu_event_update(struct perf_event *event)
423{
424 struct hw_perf_event *hwc = &event->hw;
425 u64 delta, prev_raw_count, new_raw_count;
426
427 do {
428 prev_raw_count = local64_read(&hwc->prev_count);
429 new_raw_count = pmu_read_counter(event);
430 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
431 new_raw_count) != prev_raw_count);
432
433 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
434
435 local64_add(delta, &event->count);
436
437 return new_raw_count;
438}
439
440static void pmu_read(struct perf_event *event)
441{
442 pmu_event_update(event);
443}
444
445void pmu_event_set_period(struct perf_event *event)
446{
447 struct hw_perf_event *hwc = &event->hw;
448 /*
449 * The CCI PMU counters have a period of 2^32. To account for the
450 * possiblity of extreme interrupt latency we program for a period of
451 * half that. Hopefully we can handle the interrupt before another 2^31
452 * events occur and the counter overtakes its previous value.
453 */
454 u64 val = 1ULL << 31;
455 local64_set(&hwc->prev_count, val);
456 pmu_write_counter(event, val);
457}
458
374static irqreturn_t pmu_handle_irq(int irq_num, void *dev) 459static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
375{ 460{
376 unsigned long flags; 461 unsigned long flags;
377 struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; 462 struct cci_pmu *cci_pmu = dev;
378 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 463 struct cci_pmu_hw_events *events = &pmu->hw_events;
379 struct perf_sample_data data;
380 struct pt_regs *regs;
381 int idx, handled = IRQ_NONE; 464 int idx, handled = IRQ_NONE;
382 465
383 raw_spin_lock_irqsave(&events->pmu_lock, flags); 466 raw_spin_lock_irqsave(&events->pmu_lock, flags);
384 regs = get_irq_regs();
385 /* 467 /*
386 * Iterate over counters and update the corresponding perf events. 468 * Iterate over counters and update the corresponding perf events.
387 * This should work regardless of whether we have per-counter overflow 469 * This should work regardless of whether we have per-counter overflow
@@ -403,154 +485,407 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
403 485
404 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); 486 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
405 487
488 pmu_event_update(event);
489 pmu_event_set_period(event);
406 handled = IRQ_HANDLED; 490 handled = IRQ_HANDLED;
407
408 armpmu_event_update(event);
409 perf_sample_data_init(&data, 0, hw_counter->last_period);
410 if (!armpmu_event_set_period(event))
411 continue;
412
413 if (perf_event_overflow(event, &data, regs))
414 cci_pmu->disable(event);
415 } 491 }
416 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 492 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
417 493
418 return IRQ_RETVAL(handled); 494 return IRQ_RETVAL(handled);
419} 495}
420 496
421static void pmu_free_irq(struct arm_pmu *cci_pmu) 497static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
422{ 498{
423 int i; 499 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
500 if (ret) {
501 pmu_free_irq(cci_pmu);
502 return ret;
503 }
504 return 0;
505}
424 506
425 for (i = 0; i < pmu->nr_irqs; i++) { 507static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
426 if (!test_and_clear_bit(i, &pmu->active_irqs)) 508{
427 continue; 509 pmu_free_irq(cci_pmu);
510}
428 511
429 free_irq(pmu->irqs[i], cci_pmu); 512static void hw_perf_event_destroy(struct perf_event *event)
513{
514 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
515 atomic_t *active_events = &cci_pmu->active_events;
516 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
517
518 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
519 cci_pmu_put_hw(cci_pmu);
520 mutex_unlock(reserve_mutex);
430 } 521 }
431} 522}
432 523
433static void pmu_enable_event(struct perf_event *event) 524static void cci_pmu_enable(struct pmu *pmu)
434{ 525{
526 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
527 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
528 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events);
435 unsigned long flags; 529 unsigned long flags;
436 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 530 u32 val;
437 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 531
438 struct hw_perf_event *hw_counter = &event->hw; 532 if (!enabled)
439 int idx = hw_counter->idx; 533 return;
534
535 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
536
537 /* Enable all the PMU counters. */
538 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
539 writel(val, cci_ctrl_base + CCI_PMCR);
540 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
541
542}
543
544static void cci_pmu_disable(struct pmu *pmu)
545{
546 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
547 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
548 unsigned long flags;
549 u32 val;
550
551 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
552
553 /* Disable all the PMU counters. */
554 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
555 writel(val, cci_ctrl_base + CCI_PMCR);
556 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
557}
558
559static void cci_pmu_start(struct perf_event *event, int pmu_flags)
560{
561 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
562 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
563 struct hw_perf_event *hwc = &event->hw;
564 int idx = hwc->idx;
565 unsigned long flags;
566
567 /*
568 * To handle interrupt latency, we always reprogram the period
569 * regardlesss of PERF_EF_RELOAD.
570 */
571 if (pmu_flags & PERF_EF_RELOAD)
572 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
573
574 hwc->state = 0;
440 575
441 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 576 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
442 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 577 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
443 return; 578 return;
444 } 579 }
445 580
446 raw_spin_lock_irqsave(&events->pmu_lock, flags); 581 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
447 582
448 /* Configure the event to count, unless you are counting cycles */ 583 /* Configure the event to count, unless you are counting cycles */
449 if (idx != CCI_PMU_CYCLE_CNTR_IDX) 584 if (idx != CCI_PMU_CYCLE_CNTR_IDX)
450 pmu_set_event(idx, hw_counter->config_base); 585 pmu_set_event(idx, hwc->config_base);
451 586
587 pmu_event_set_period(event);
452 pmu_enable_counter(idx); 588 pmu_enable_counter(idx);
453 589
454 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 590 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
455} 591}
456 592
457static void pmu_disable_event(struct perf_event *event) 593static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
458{ 594{
459 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 595 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
460 struct hw_perf_event *hw_counter = &event->hw; 596 struct hw_perf_event *hwc = &event->hw;
461 int idx = hw_counter->idx; 597 int idx = hwc->idx;
598
599 if (hwc->state & PERF_HES_STOPPED)
600 return;
462 601
463 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 602 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
464 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 603 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
465 return; 604 return;
466 } 605 }
467 606
607 /*
608 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
609 * cci_pmu_start()
610 */
468 pmu_disable_counter(idx); 611 pmu_disable_counter(idx);
612 pmu_event_update(event);
613 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
469} 614}
470 615
471static void pmu_start(struct arm_pmu *cci_pmu) 616static int cci_pmu_add(struct perf_event *event, int flags)
472{ 617{
473 u32 val; 618 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
474 unsigned long flags; 619 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
475 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 620 struct hw_perf_event *hwc = &event->hw;
621 int idx;
622 int err = 0;
476 623
477 raw_spin_lock_irqsave(&events->pmu_lock, flags); 624 perf_pmu_disable(event->pmu);
478 625
479 /* Enable all the PMU counters. */ 626 /* If we don't have a space for the counter then finish early. */
480 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; 627 idx = pmu_get_event_idx(hw_events, event);
481 writel(val, cci_ctrl_base + CCI_PMCR); 628 if (idx < 0) {
629 err = idx;
630 goto out;
631 }
482 632
483 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 633 event->hw.idx = idx;
634 hw_events->events[idx] = event;
635
636 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
637 if (flags & PERF_EF_START)
638 cci_pmu_start(event, PERF_EF_RELOAD);
639
640 /* Propagate our changes to the userspace mapping. */
641 perf_event_update_userpage(event);
642
643out:
644 perf_pmu_enable(event->pmu);
645 return err;
484} 646}
485 647
486static void pmu_stop(struct arm_pmu *cci_pmu) 648static void cci_pmu_del(struct perf_event *event, int flags)
487{ 649{
488 u32 val; 650 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
489 unsigned long flags; 651 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
490 struct pmu_hw_events *events = cci_pmu->get_hw_events(); 652 struct hw_perf_event *hwc = &event->hw;
653 int idx = hwc->idx;
491 654
492 raw_spin_lock_irqsave(&events->pmu_lock, flags); 655 cci_pmu_stop(event, PERF_EF_UPDATE);
656 hw_events->events[idx] = NULL;
657 clear_bit(idx, hw_events->used_mask);
493 658
494 /* Disable all the PMU counters. */ 659 perf_event_update_userpage(event);
495 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; 660}
496 writel(val, cci_ctrl_base + CCI_PMCR);
497 661
498 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 662static int
663validate_event(struct cci_pmu_hw_events *hw_events,
664 struct perf_event *event)
665{
666 if (is_software_event(event))
667 return 1;
668
669 if (event->state < PERF_EVENT_STATE_OFF)
670 return 1;
671
672 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
673 return 1;
674
675 return pmu_get_event_idx(hw_events, event) >= 0;
499} 676}
500 677
501static u32 pmu_read_counter(struct perf_event *event) 678static int
679validate_group(struct perf_event *event)
502{ 680{
503 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 681 struct perf_event *sibling, *leader = event->group_leader;
504 struct hw_perf_event *hw_counter = &event->hw; 682 struct cci_pmu_hw_events fake_pmu = {
505 int idx = hw_counter->idx; 683 /*
506 u32 value; 684 * Initialise the fake PMU. We only need to populate the
685 * used_mask for the purposes of validation.
686 */
687 .used_mask = CPU_BITS_NONE,
688 };
507 689
508 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { 690 if (!validate_event(&fake_pmu, leader))
509 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 691 return -EINVAL;
510 return 0; 692
693 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
694 if (!validate_event(&fake_pmu, sibling))
695 return -EINVAL;
511 } 696 }
512 value = pmu_read_register(idx, CCI_PMU_CNTR);
513 697
514 return value; 698 if (!validate_event(&fake_pmu, event))
699 return -EINVAL;
700
701 return 0;
515} 702}
516 703
517static void pmu_write_counter(struct perf_event *event, u32 value) 704static int
705__hw_perf_event_init(struct perf_event *event)
518{ 706{
519 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); 707 struct hw_perf_event *hwc = &event->hw;
520 struct hw_perf_event *hw_counter = &event->hw; 708 int mapping;
521 int idx = hw_counter->idx;
522 709
523 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) 710 mapping = pmu_map_event(event);
524 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); 711
525 else 712 if (mapping < 0) {
526 pmu_write_register(value, idx, CCI_PMU_CNTR); 713 pr_debug("event %x:%llx not supported\n", event->attr.type,
714 event->attr.config);
715 return mapping;
716 }
717
718 /*
719 * We don't assign an index until we actually place the event onto
720 * hardware. Use -1 to signify that we haven't decided where to put it
721 * yet.
722 */
723 hwc->idx = -1;
724 hwc->config_base = 0;
725 hwc->config = 0;
726 hwc->event_base = 0;
727
728 /*
729 * Store the event encoding into the config_base field.
730 */
731 hwc->config_base |= (unsigned long)mapping;
732
733 /*
734 * Limit the sample_period to half of the counter width. That way, the
735 * new counter value is far less likely to overtake the previous one
736 * unless you have some serious IRQ latency issues.
737 */
738 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
739 hwc->last_period = hwc->sample_period;
740 local64_set(&hwc->period_left, hwc->sample_period);
741
742 if (event->group_leader != event) {
743 if (validate_group(event) != 0)
744 return -EINVAL;
745 }
746
747 return 0;
748}
749
750static int cci_pmu_event_init(struct perf_event *event)
751{
752 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
753 atomic_t *active_events = &cci_pmu->active_events;
754 int err = 0;
755 int cpu;
756
757 if (event->attr.type != event->pmu->type)
758 return -ENOENT;
759
760 /* Shared by all CPUs, no meaningful state to sample */
761 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
762 return -EOPNOTSUPP;
763
764 /* We have no filtering of any kind */
765 if (event->attr.exclude_user ||
766 event->attr.exclude_kernel ||
767 event->attr.exclude_hv ||
768 event->attr.exclude_idle ||
769 event->attr.exclude_host ||
770 event->attr.exclude_guest)
771 return -EINVAL;
772
773 /*
774 * Following the example set by other "uncore" PMUs, we accept any CPU
775 * and rewrite its affinity dynamically rather than having perf core
776 * handle cpu == -1 and pid == -1 for this case.
777 *
778 * The perf core will pin online CPUs for the duration of this call and
779 * the event being installed into its context, so the PMU's CPU can't
780 * change under our feet.
781 */
782 cpu = cpumask_first(&cci_pmu->cpus);
783 if (event->cpu < 0 || cpu < 0)
784 return -EINVAL;
785 event->cpu = cpu;
786
787 event->destroy = hw_perf_event_destroy;
788 if (!atomic_inc_not_zero(active_events)) {
789 mutex_lock(&cci_pmu->reserve_mutex);
790 if (atomic_read(active_events) == 0)
791 err = cci_pmu_get_hw(cci_pmu);
792 if (!err)
793 atomic_inc(active_events);
794 mutex_unlock(&cci_pmu->reserve_mutex);
795 }
796 if (err)
797 return err;
798
799 err = __hw_perf_event_init(event);
800 if (err)
801 hw_perf_event_destroy(event);
802
803 return err;
527} 804}
528 805
529static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) 806static ssize_t pmu_attr_cpumask_show(struct device *dev,
807 struct device_attribute *attr, char *buf)
530{ 808{
531 *cci_pmu = (struct arm_pmu){ 809 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &pmu->cpus);
532 .name = pmu_names[probe_cci_revision()], 810
533 .max_period = (1LLU << 32) - 1, 811 buf[n++] = '\n';
534 .get_hw_events = pmu_get_hw_events, 812 buf[n] = '\0';
535 .get_event_idx = pmu_get_event_idx, 813 return n;
536 .map_event = pmu_map_event, 814}
537 .request_irq = pmu_request_irq, 815
538 .handle_irq = pmu_handle_irq, 816static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL);
539 .free_irq = pmu_free_irq, 817
540 .enable = pmu_enable_event, 818static struct attribute *pmu_attrs[] = {
541 .disable = pmu_disable_event, 819 &dev_attr_cpumask.attr,
542 .start = pmu_start, 820 NULL,
543 .stop = pmu_stop, 821};
544 .read_counter = pmu_read_counter, 822
545 .write_counter = pmu_write_counter, 823static struct attribute_group pmu_attr_group = {
824 .attrs = pmu_attrs,
825};
826
827static const struct attribute_group *pmu_attr_groups[] = {
828 &pmu_attr_group,
829 NULL
830};
831
832static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
833{
834 char *name = pmu_names[probe_cci_revision()];
835 cci_pmu->pmu = (struct pmu) {
836 .name = pmu_names[probe_cci_revision()],
837 .task_ctx_nr = perf_invalid_context,
838 .pmu_enable = cci_pmu_enable,
839 .pmu_disable = cci_pmu_disable,
840 .event_init = cci_pmu_event_init,
841 .add = cci_pmu_add,
842 .del = cci_pmu_del,
843 .start = cci_pmu_start,
844 .stop = cci_pmu_stop,
845 .read = pmu_read,
846 .attr_groups = pmu_attr_groups,
546 }; 847 };
547 848
548 cci_pmu->plat_device = pdev; 849 cci_pmu->plat_device = pdev;
549 cci_pmu->num_events = pmu_get_max_counters(); 850 cci_pmu->num_events = pmu_get_max_counters();
550 851
551 return armpmu_register(cci_pmu, -1); 852 return perf_pmu_register(&cci_pmu->pmu, name, -1);
552} 853}
553 854
855static int cci_pmu_cpu_notifier(struct notifier_block *self,
856 unsigned long action, void *hcpu)
857{
858 unsigned int cpu = (long)hcpu;
859 unsigned int target;
860
861 switch (action & ~CPU_TASKS_FROZEN) {
862 case CPU_DOWN_PREPARE:
863 if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus))
864 break;
865 target = cpumask_any_but(cpu_online_mask, cpu);
866 if (target < 0) // UP, last CPU
867 break;
868 /*
869 * TODO: migrate context once core races on event->ctx have
870 * been fixed.
871 */
872 cpumask_set_cpu(target, &pmu->cpus);
873 default:
874 break;
875 }
876
877 return NOTIFY_OK;
878}
879
880static struct notifier_block cci_pmu_cpu_nb = {
881 .notifier_call = cci_pmu_cpu_notifier,
882 /*
883 * to migrate uncore events, our notifier should be executed
884 * before perf core's notifier.
885 */
886 .priority = CPU_PRI_PERF + 1,
887};
888
554static const struct of_device_id arm_cci_pmu_matches[] = { 889static const struct of_device_id arm_cci_pmu_matches[] = {
555 { 890 {
556 .compatible = "arm,cci-400-pmu", 891 .compatible = "arm,cci-400-pmu",
@@ -604,15 +939,16 @@ static int cci_pmu_probe(struct platform_device *pdev)
604 return -EINVAL; 939 return -EINVAL;
605 } 940 }
606 941
607 pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
608 if (!pmu->cci_pmu)
609 return -ENOMEM;
610
611 pmu->hw_events.events = pmu->events;
612 pmu->hw_events.used_mask = pmu->used_mask;
613 raw_spin_lock_init(&pmu->hw_events.pmu_lock); 942 raw_spin_lock_init(&pmu->hw_events.pmu_lock);
943 mutex_init(&pmu->reserve_mutex);
944 atomic_set(&pmu->active_events, 0);
945 cpumask_set_cpu(smp_processor_id(), &pmu->cpus);
946
947 ret = register_cpu_notifier(&cci_pmu_cpu_nb);
948 if (ret)
949 return ret;
614 950
615 ret = cci_pmu_init(pmu->cci_pmu, pdev); 951 ret = cci_pmu_init(pmu, pdev);
616 if (ret) 952 if (ret)
617 return ret; 953 return ret;
618 954
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f2cd6a2d40b4..46de8dc39eb4 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -23,35 +23,103 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/pm.h>
26 27
28#ifdef CONFIG_ARM
27#include <asm/bug.h> 29#include <asm/bug.h>
28#include <asm/signal.h> 30#include <asm/signal.h>
31#endif
29 32
30#define ARB_TIMER 0x008
31#define ARB_ERR_CAP_CLR 0x7e4
32#define ARB_ERR_CAP_CLEAR (1 << 0) 33#define ARB_ERR_CAP_CLEAR (1 << 0)
33#define ARB_ERR_CAP_HI_ADDR 0x7e8
34#define ARB_ERR_CAP_ADDR 0x7ec
35#define ARB_ERR_CAP_DATA 0x7f0
36#define ARB_ERR_CAP_STATUS 0x7f4
37#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) 34#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
38#define ARB_ERR_CAP_STATUS_TEA (1 << 11) 35#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
39#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) 36#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
40#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c 37#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
41#define ARB_ERR_CAP_STATUS_WRITE (1 << 1) 38#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
42#define ARB_ERR_CAP_STATUS_VALID (1 << 0) 39#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
43#define ARB_ERR_CAP_MASTER 0x7f8 40
41enum {
42 ARB_TIMER,
43 ARB_ERR_CAP_CLR,
44 ARB_ERR_CAP_HI_ADDR,
45 ARB_ERR_CAP_ADDR,
46 ARB_ERR_CAP_DATA,
47 ARB_ERR_CAP_STATUS,
48 ARB_ERR_CAP_MASTER,
49};
50
51static const int gisb_offsets_bcm7038[] = {
52 [ARB_TIMER] = 0x00c,
53 [ARB_ERR_CAP_CLR] = 0x0c4,
54 [ARB_ERR_CAP_HI_ADDR] = -1,
55 [ARB_ERR_CAP_ADDR] = 0x0c8,
56 [ARB_ERR_CAP_DATA] = 0x0cc,
57 [ARB_ERR_CAP_STATUS] = 0x0d0,
58 [ARB_ERR_CAP_MASTER] = -1,
59};
60
61static const int gisb_offsets_bcm7400[] = {
62 [ARB_TIMER] = 0x00c,
63 [ARB_ERR_CAP_CLR] = 0x0c8,
64 [ARB_ERR_CAP_HI_ADDR] = -1,
65 [ARB_ERR_CAP_ADDR] = 0x0cc,
66 [ARB_ERR_CAP_DATA] = 0x0d0,
67 [ARB_ERR_CAP_STATUS] = 0x0d4,
68 [ARB_ERR_CAP_MASTER] = 0x0d8,
69};
70
71static const int gisb_offsets_bcm7435[] = {
72 [ARB_TIMER] = 0x00c,
73 [ARB_ERR_CAP_CLR] = 0x168,
74 [ARB_ERR_CAP_HI_ADDR] = -1,
75 [ARB_ERR_CAP_ADDR] = 0x16c,
76 [ARB_ERR_CAP_DATA] = 0x170,
77 [ARB_ERR_CAP_STATUS] = 0x174,
78 [ARB_ERR_CAP_MASTER] = 0x178,
79};
80
81static const int gisb_offsets_bcm7445[] = {
82 [ARB_TIMER] = 0x008,
83 [ARB_ERR_CAP_CLR] = 0x7e4,
84 [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
85 [ARB_ERR_CAP_ADDR] = 0x7ec,
86 [ARB_ERR_CAP_DATA] = 0x7f0,
87 [ARB_ERR_CAP_STATUS] = 0x7f4,
88 [ARB_ERR_CAP_MASTER] = 0x7f8,
89};
44 90
45struct brcmstb_gisb_arb_device { 91struct brcmstb_gisb_arb_device {
46 void __iomem *base; 92 void __iomem *base;
93 const int *gisb_offsets;
47 struct mutex lock; 94 struct mutex lock;
48 struct list_head next; 95 struct list_head next;
49 u32 valid_mask; 96 u32 valid_mask;
50 const char *master_names[sizeof(u32) * BITS_PER_BYTE]; 97 const char *master_names[sizeof(u32) * BITS_PER_BYTE];
98 u32 saved_timeout;
51}; 99};
52 100
53static LIST_HEAD(brcmstb_gisb_arb_device_list); 101static LIST_HEAD(brcmstb_gisb_arb_device_list);
54 102
103static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
104{
105 int offset = gdev->gisb_offsets[reg];
106
107 /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
108 if (offset == -1)
109 return 1;
110
111 return ioread32(gdev->base + offset);
112}
113
114static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
115{
116 int offset = gdev->gisb_offsets[reg];
117
118 if (offset == -1)
119 return;
120 iowrite32(val, gdev->base + reg);
121}
122
55static ssize_t gisb_arb_get_timeout(struct device *dev, 123static ssize_t gisb_arb_get_timeout(struct device *dev,
56 struct device_attribute *attr, 124 struct device_attribute *attr,
57 char *buf) 125 char *buf)
@@ -61,7 +129,7 @@ static ssize_t gisb_arb_get_timeout(struct device *dev,
61 u32 timeout; 129 u32 timeout;
62 130
63 mutex_lock(&gdev->lock); 131 mutex_lock(&gdev->lock);
64 timeout = ioread32(gdev->base + ARB_TIMER); 132 timeout = gisb_read(gdev, ARB_TIMER);
65 mutex_unlock(&gdev->lock); 133 mutex_unlock(&gdev->lock);
66 134
67 return sprintf(buf, "%d", timeout); 135 return sprintf(buf, "%d", timeout);
@@ -83,7 +151,7 @@ static ssize_t gisb_arb_set_timeout(struct device *dev,
83 return -EINVAL; 151 return -EINVAL;
84 152
85 mutex_lock(&gdev->lock); 153 mutex_lock(&gdev->lock);
86 iowrite32(val, gdev->base + ARB_TIMER); 154 gisb_write(gdev, val, ARB_TIMER);
87 mutex_unlock(&gdev->lock); 155 mutex_unlock(&gdev->lock);
88 156
89 return count; 157 return count;
@@ -110,18 +178,18 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
110 const char *m_name; 178 const char *m_name;
111 char m_fmt[11]; 179 char m_fmt[11];
112 180
113 cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS); 181 cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
114 182
115 /* Invalid captured address, bail out */ 183 /* Invalid captured address, bail out */
116 if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) 184 if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
117 return 1; 185 return 1;
118 186
119 /* Read the address and master */ 187 /* Read the address and master */
120 arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff; 188 arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
121#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) 189#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
122 arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32; 190 arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
123#endif 191#endif
124 master = ioread32(gdev->base + ARB_ERR_CAP_MASTER); 192 master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
125 193
126 m_name = brcmstb_gisb_master_to_str(gdev, master); 194 m_name = brcmstb_gisb_master_to_str(gdev, master);
127 if (!m_name) { 195 if (!m_name) {
@@ -136,11 +204,12 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
136 m_name); 204 m_name);
137 205
138 /* clear the GISB error */ 206 /* clear the GISB error */
139 iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR); 207 gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
140 208
141 return 0; 209 return 0;
142} 210}
143 211
212#ifdef CONFIG_ARM
144static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, 213static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
145 struct pt_regs *regs) 214 struct pt_regs *regs)
146{ 215{
@@ -159,12 +228,7 @@ static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
159 228
160 return ret; 229 return ret;
161} 230}
162 231#endif
163void __init brcmstb_hook_fault_code(void)
164{
165 hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
166 "imprecise external abort");
167}
168 232
169static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) 233static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
170{ 234{
@@ -192,10 +256,20 @@ static struct attribute_group gisb_arb_sysfs_attr_group = {
192 .attrs = gisb_arb_sysfs_attrs, 256 .attrs = gisb_arb_sysfs_attrs,
193}; 257};
194 258
195static int brcmstb_gisb_arb_probe(struct platform_device *pdev) 259static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
260 { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
261 { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
262 { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
263 { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
264 { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
265 { },
266};
267
268static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
196{ 269{
197 struct device_node *dn = pdev->dev.of_node; 270 struct device_node *dn = pdev->dev.of_node;
198 struct brcmstb_gisb_arb_device *gdev; 271 struct brcmstb_gisb_arb_device *gdev;
272 const struct of_device_id *of_id;
199 struct resource *r; 273 struct resource *r;
200 int err, timeout_irq, tea_irq; 274 int err, timeout_irq, tea_irq;
201 unsigned int num_masters, j = 0; 275 unsigned int num_masters, j = 0;
@@ -216,6 +290,13 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
216 if (IS_ERR(gdev->base)) 290 if (IS_ERR(gdev->base))
217 return PTR_ERR(gdev->base); 291 return PTR_ERR(gdev->base);
218 292
293 of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
294 if (!of_id) {
295 pr_err("failed to look up compatible string\n");
296 return -EINVAL;
297 }
298 gdev->gisb_offsets = of_id->data;
299
219 err = devm_request_irq(&pdev->dev, timeout_irq, 300 err = devm_request_irq(&pdev->dev, timeout_irq,
220 brcmstb_gisb_timeout_handler, 0, pdev->name, 301 brcmstb_gisb_timeout_handler, 0, pdev->name,
221 gdev); 302 gdev);
@@ -261,29 +342,63 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
261 342
262 list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); 343 list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
263 344
345#ifdef CONFIG_ARM
346 hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
347 "imprecise external abort");
348#endif
349
264 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", 350 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
265 gdev->base, timeout_irq, tea_irq); 351 gdev->base, timeout_irq, tea_irq);
266 352
267 return 0; 353 return 0;
268} 354}
269 355
270static const struct of_device_id brcmstb_gisb_arb_of_match[] = { 356#ifdef CONFIG_PM_SLEEP
271 { .compatible = "brcm,gisb-arb" }, 357static int brcmstb_gisb_arb_suspend(struct device *dev)
272 { }, 358{
359 struct platform_device *pdev = to_platform_device(dev);
360 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
361
362 gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
363
364 return 0;
365}
366
367/* Make sure we provide the same timeout value that was configured before, and
368 * do this before the GISB timeout interrupt handler has any chance to run.
369 */
370static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
371{
372 struct platform_device *pdev = to_platform_device(dev);
373 struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
374
375 gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
376
377 return 0;
378}
379#else
380#define brcmstb_gisb_arb_suspend NULL
381#define brcmstb_gisb_arb_resume_noirq NULL
382#endif
383
384static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
385 .suspend = brcmstb_gisb_arb_suspend,
386 .resume_noirq = brcmstb_gisb_arb_resume_noirq,
273}; 387};
274 388
275static struct platform_driver brcmstb_gisb_arb_driver = { 389static struct platform_driver brcmstb_gisb_arb_driver = {
276 .probe = brcmstb_gisb_arb_probe,
277 .driver = { 390 .driver = {
278 .name = "brcm-gisb-arb", 391 .name = "brcm-gisb-arb",
279 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
280 .of_match_table = brcmstb_gisb_arb_of_match, 393 .of_match_table = brcmstb_gisb_arb_of_match,
394 .pm = &brcmstb_gisb_arb_pm_ops,
281 }, 395 },
282}; 396};
283 397
284static int __init brcm_gisb_driver_init(void) 398static int __init brcm_gisb_driver_init(void)
285{ 399{
286 return platform_driver_register(&brcmstb_gisb_arb_driver); 400 return platform_driver_probe(&brcmstb_gisb_arb_driver,
401 brcmstb_gisb_arb_probe);
287} 402}
288 403
289module_init(brcm_gisb_driver_init); 404module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 26c3779d871d..eb7682dc123b 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -57,6 +57,7 @@
57#include <linux/of_address.h> 57#include <linux/of_address.h>
58#include <linux/debugfs.h> 58#include <linux/debugfs.h>
59#include <linux/log2.h> 59#include <linux/log2.h>
60#include <linux/syscore_ops.h>
60 61
61/* 62/*
62 * DDR target is the same on all platforms. 63 * DDR target is the same on all platforms.
@@ -94,20 +95,42 @@
94 95
95#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4) 96#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
96 97
98/* Relative to mbusbridge_base */
99#define MBUS_BRIDGE_CTRL_OFF 0x0
100#define MBUS_BRIDGE_BASE_OFF 0x4
101
102/* Maximum number of windows, for all known platforms */
103#define MBUS_WINS_MAX 20
104
97struct mvebu_mbus_state; 105struct mvebu_mbus_state;
98 106
99struct mvebu_mbus_soc_data { 107struct mvebu_mbus_soc_data {
100 unsigned int num_wins; 108 unsigned int num_wins;
101 unsigned int num_remappable_wins; 109 unsigned int num_remappable_wins;
110 bool has_mbus_bridge;
102 unsigned int (*win_cfg_offset)(const int win); 111 unsigned int (*win_cfg_offset)(const int win);
103 void (*setup_cpu_target)(struct mvebu_mbus_state *s); 112 void (*setup_cpu_target)(struct mvebu_mbus_state *s);
113 int (*save_cpu_target)(struct mvebu_mbus_state *s,
114 u32 *store_addr);
104 int (*show_cpu_target)(struct mvebu_mbus_state *s, 115 int (*show_cpu_target)(struct mvebu_mbus_state *s,
105 struct seq_file *seq, void *v); 116 struct seq_file *seq, void *v);
106}; 117};
107 118
119/*
120 * Used to store the state of one MBus window accross suspend/resume.
121 */
122struct mvebu_mbus_win_data {
123 u32 ctrl;
124 u32 base;
125 u32 remap_lo;
126 u32 remap_hi;
127};
128
108struct mvebu_mbus_state { 129struct mvebu_mbus_state {
109 void __iomem *mbuswins_base; 130 void __iomem *mbuswins_base;
110 void __iomem *sdramwins_base; 131 void __iomem *sdramwins_base;
132 void __iomem *mbusbridge_base;
133 phys_addr_t sdramwins_phys_base;
111 struct dentry *debugfs_root; 134 struct dentry *debugfs_root;
112 struct dentry *debugfs_sdram; 135 struct dentry *debugfs_sdram;
113 struct dentry *debugfs_devs; 136 struct dentry *debugfs_devs;
@@ -115,6 +138,11 @@ struct mvebu_mbus_state {
115 struct resource pcie_io_aperture; 138 struct resource pcie_io_aperture;
116 const struct mvebu_mbus_soc_data *soc; 139 const struct mvebu_mbus_soc_data *soc;
117 int hw_io_coherency; 140 int hw_io_coherency;
141
142 /* Used during suspend/resume */
143 u32 mbus_bridge_ctrl;
144 u32 mbus_bridge_base;
145 struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
118}; 146};
119 147
120static struct mvebu_mbus_state mbus_state; 148static struct mvebu_mbus_state mbus_state;
@@ -516,6 +544,28 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
516 mvebu_mbus_dram_info.num_cs = cs; 544 mvebu_mbus_dram_info.num_cs = cs;
517} 545}
518 546
547static int
548mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
549 u32 *store_addr)
550{
551 int i;
552
553 for (i = 0; i < 4; i++) {
554 u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
555 u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
556
557 writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
558 store_addr++);
559 writel(base, store_addr++);
560 writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
561 store_addr++);
562 writel(size, store_addr++);
563 }
564
565 /* We've written 16 words to the store address */
566 return 16;
567}
568
519static void __init 569static void __init
520mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus) 570mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
521{ 571{
@@ -546,10 +596,35 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
546 mvebu_mbus_dram_info.num_cs = cs; 596 mvebu_mbus_dram_info.num_cs = cs;
547} 597}
548 598
599static int
600mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
601 u32 *store_addr)
602{
603 int i;
604
605 for (i = 0; i < 2; i++) {
606 u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
607
608 writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
609 store_addr++);
610 writel(map, store_addr++);
611 }
612
613 /* We've written 4 words to the store address */
614 return 4;
615}
616
617int mvebu_mbus_save_cpu_target(u32 *store_addr)
618{
619 return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
620}
621
549static const struct mvebu_mbus_soc_data armada_370_xp_mbus_data = { 622static const struct mvebu_mbus_soc_data armada_370_xp_mbus_data = {
550 .num_wins = 20, 623 .num_wins = 20,
551 .num_remappable_wins = 8, 624 .num_remappable_wins = 8,
625 .has_mbus_bridge = true,
552 .win_cfg_offset = armada_370_xp_mbus_win_offset, 626 .win_cfg_offset = armada_370_xp_mbus_win_offset,
627 .save_cpu_target = mvebu_mbus_default_save_cpu_target,
553 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, 628 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
554 .show_cpu_target = mvebu_sdram_debug_show_orion, 629 .show_cpu_target = mvebu_sdram_debug_show_orion,
555}; 630};
@@ -558,6 +633,7 @@ static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
558 .num_wins = 8, 633 .num_wins = 8,
559 .num_remappable_wins = 4, 634 .num_remappable_wins = 4,
560 .win_cfg_offset = orion_mbus_win_offset, 635 .win_cfg_offset = orion_mbus_win_offset,
636 .save_cpu_target = mvebu_mbus_default_save_cpu_target,
561 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, 637 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
562 .show_cpu_target = mvebu_sdram_debug_show_orion, 638 .show_cpu_target = mvebu_sdram_debug_show_orion,
563}; 639};
@@ -566,6 +642,7 @@ static const struct mvebu_mbus_soc_data dove_mbus_data = {
566 .num_wins = 8, 642 .num_wins = 8,
567 .num_remappable_wins = 4, 643 .num_remappable_wins = 4,
568 .win_cfg_offset = orion_mbus_win_offset, 644 .win_cfg_offset = orion_mbus_win_offset,
645 .save_cpu_target = mvebu_mbus_dove_save_cpu_target,
569 .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target, 646 .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
570 .show_cpu_target = mvebu_sdram_debug_show_dove, 647 .show_cpu_target = mvebu_sdram_debug_show_dove,
571}; 648};
@@ -578,6 +655,7 @@ static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
578 .num_wins = 8, 655 .num_wins = 8,
579 .num_remappable_wins = 4, 656 .num_remappable_wins = 4,
580 .win_cfg_offset = orion_mbus_win_offset, 657 .win_cfg_offset = orion_mbus_win_offset,
658 .save_cpu_target = mvebu_mbus_default_save_cpu_target,
581 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, 659 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
582 .show_cpu_target = mvebu_sdram_debug_show_orion, 660 .show_cpu_target = mvebu_sdram_debug_show_orion,
583}; 661};
@@ -586,6 +664,7 @@ static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
586 .num_wins = 8, 664 .num_wins = 8,
587 .num_remappable_wins = 2, 665 .num_remappable_wins = 2,
588 .win_cfg_offset = orion_mbus_win_offset, 666 .win_cfg_offset = orion_mbus_win_offset,
667 .save_cpu_target = mvebu_mbus_default_save_cpu_target,
589 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, 668 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
590 .show_cpu_target = mvebu_sdram_debug_show_orion, 669 .show_cpu_target = mvebu_sdram_debug_show_orion,
591}; 670};
@@ -594,6 +673,7 @@ static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
594 .num_wins = 14, 673 .num_wins = 14,
595 .num_remappable_wins = 8, 674 .num_remappable_wins = 8,
596 .win_cfg_offset = mv78xx0_mbus_win_offset, 675 .win_cfg_offset = mv78xx0_mbus_win_offset,
676 .save_cpu_target = mvebu_mbus_default_save_cpu_target,
597 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target, 677 .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
598 .show_cpu_target = mvebu_sdram_debug_show_orion, 678 .show_cpu_target = mvebu_sdram_debug_show_orion,
599}; 679};
@@ -698,11 +778,73 @@ static __init int mvebu_mbus_debugfs_init(void)
698} 778}
699fs_initcall(mvebu_mbus_debugfs_init); 779fs_initcall(mvebu_mbus_debugfs_init);
700 780
781static int mvebu_mbus_suspend(void)
782{
783 struct mvebu_mbus_state *s = &mbus_state;
784 int win;
785
786 if (!s->mbusbridge_base)
787 return -ENODEV;
788
789 for (win = 0; win < s->soc->num_wins; win++) {
790 void __iomem *addr = s->mbuswins_base +
791 s->soc->win_cfg_offset(win);
792
793 s->wins[win].base = readl(addr + WIN_BASE_OFF);
794 s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
795
796 if (win >= s->soc->num_remappable_wins)
797 continue;
798
799 s->wins[win].remap_lo = readl(addr + WIN_REMAP_LO_OFF);
800 s->wins[win].remap_hi = readl(addr + WIN_REMAP_HI_OFF);
801 }
802
803 s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
804 MBUS_BRIDGE_CTRL_OFF);
805 s->mbus_bridge_base = readl(s->mbusbridge_base +
806 MBUS_BRIDGE_BASE_OFF);
807
808 return 0;
809}
810
811static void mvebu_mbus_resume(void)
812{
813 struct mvebu_mbus_state *s = &mbus_state;
814 int win;
815
816 writel(s->mbus_bridge_ctrl,
817 s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
818 writel(s->mbus_bridge_base,
819 s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
820
821 for (win = 0; win < s->soc->num_wins; win++) {
822 void __iomem *addr = s->mbuswins_base +
823 s->soc->win_cfg_offset(win);
824
825 writel(s->wins[win].base, addr + WIN_BASE_OFF);
826 writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
827
828 if (win >= s->soc->num_remappable_wins)
829 continue;
830
831 writel(s->wins[win].remap_lo, addr + WIN_REMAP_LO_OFF);
832 writel(s->wins[win].remap_hi, addr + WIN_REMAP_HI_OFF);
833 }
834}
835
836struct syscore_ops mvebu_mbus_syscore_ops = {
837 .suspend = mvebu_mbus_suspend,
838 .resume = mvebu_mbus_resume,
839};
840
701static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, 841static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
702 phys_addr_t mbuswins_phys_base, 842 phys_addr_t mbuswins_phys_base,
703 size_t mbuswins_size, 843 size_t mbuswins_size,
704 phys_addr_t sdramwins_phys_base, 844 phys_addr_t sdramwins_phys_base,
705 size_t sdramwins_size) 845 size_t sdramwins_size,
846 phys_addr_t mbusbridge_phys_base,
847 size_t mbusbridge_size)
706{ 848{
707 int win; 849 int win;
708 850
@@ -716,11 +858,26 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
716 return -ENOMEM; 858 return -ENOMEM;
717 } 859 }
718 860
861 mbus->sdramwins_phys_base = sdramwins_phys_base;
862
863 if (mbusbridge_phys_base) {
864 mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
865 mbusbridge_size);
866 if (!mbus->mbusbridge_base) {
867 iounmap(mbus->sdramwins_base);
868 iounmap(mbus->mbuswins_base);
869 return -ENOMEM;
870 }
871 } else
872 mbus->mbusbridge_base = NULL;
873
719 for (win = 0; win < mbus->soc->num_wins; win++) 874 for (win = 0; win < mbus->soc->num_wins; win++)
720 mvebu_mbus_disable_window(mbus, win); 875 mvebu_mbus_disable_window(mbus, win);
721 876
722 mbus->soc->setup_cpu_target(mbus); 877 mbus->soc->setup_cpu_target(mbus);
723 878
879 register_syscore_ops(&mvebu_mbus_syscore_ops);
880
724 return 0; 881 return 0;
725} 882}
726 883
@@ -746,7 +903,7 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
746 mbuswins_phys_base, 903 mbuswins_phys_base,
747 mbuswins_size, 904 mbuswins_size,
748 sdramwins_phys_base, 905 sdramwins_phys_base,
749 sdramwins_size); 906 sdramwins_size, 0, 0);
750} 907}
751 908
752#ifdef CONFIG_OF 909#ifdef CONFIG_OF
@@ -887,7 +1044,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
887 1044
888int __init mvebu_mbus_dt_init(bool is_coherent) 1045int __init mvebu_mbus_dt_init(bool is_coherent)
889{ 1046{
890 struct resource mbuswins_res, sdramwins_res; 1047 struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
891 struct device_node *np, *controller; 1048 struct device_node *np, *controller;
892 const struct of_device_id *of_id; 1049 const struct of_device_id *of_id;
893 const __be32 *prop; 1050 const __be32 *prop;
@@ -923,6 +1080,19 @@ int __init mvebu_mbus_dt_init(bool is_coherent)
923 return -EINVAL; 1080 return -EINVAL;
924 } 1081 }
925 1082
1083 /*
1084 * Set the resource to 0 so that it can be left unmapped by
1085 * mvebu_mbus_common_init() if the DT doesn't carry the
1086 * necessary information. This is needed to preserve backward
1087 * compatibility.
1088 */
1089 memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
1090
1091 if (mbus_state.soc->has_mbus_bridge) {
1092 if (of_address_to_resource(controller, 2, &mbusbridge_res))
1093 pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
1094 }
1095
926 mbus_state.hw_io_coherency = is_coherent; 1096 mbus_state.hw_io_coherency = is_coherent;
927 1097
928 /* Get optional pcie-{mem,io}-aperture properties */ 1098 /* Get optional pcie-{mem,io}-aperture properties */
@@ -933,7 +1103,9 @@ int __init mvebu_mbus_dt_init(bool is_coherent)
933 mbuswins_res.start, 1103 mbuswins_res.start,
934 resource_size(&mbuswins_res), 1104 resource_size(&mbuswins_res),
935 sdramwins_res.start, 1105 sdramwins_res.start,
936 resource_size(&sdramwins_res)); 1106 resource_size(&sdramwins_res),
1107 mbusbridge_res.start,
1108 resource_size(&mbusbridge_res));
937 if (ret) 1109 if (ret)
938 return ret; 1110 return ret;
939 1111
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 531ae591783b..17d86595951c 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -222,10 +222,14 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
222 } 222 }
223 223
224 /* Error found so break the for loop */ 224 /* Error found so break the for loop */
225 break; 225 return IRQ_HANDLED;
226 } 226 }
227 } 227 }
228 return IRQ_HANDLED; 228
229 dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
230 inttype ? "debug" : "application");
231
232 return IRQ_NONE;
229} 233}
230 234
231static const struct of_device_id l3_noc_match[] = { 235static const struct of_device_id l3_noc_match[] = {
@@ -296,11 +300,66 @@ static int omap_l3_probe(struct platform_device *pdev)
296 return ret; 300 return ret;
297} 301}
298 302
303#ifdef CONFIG_PM
304
305/**
306 * l3_resume_noirq() - resume function for l3_noc
307 * @dev: pointer to l3_noc device structure
308 *
309 * We only have the resume handler only since we
310 * have already maintained the delta register
311 * configuration as part of configuring the system
312 */
313static int l3_resume_noirq(struct device *dev)
314{
315 struct omap_l3 *l3 = dev_get_drvdata(dev);
316 int i;
317 struct l3_flagmux_data *flag_mux;
318 void __iomem *base, *mask_regx = NULL;
319 u32 mask_val;
320
321 for (i = 0; i < l3->num_modules; i++) {
322 base = l3->l3_base[i];
323 flag_mux = l3->l3_flagmux[i];
324 if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
325 continue;
326
327 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
328 (L3_APPLICATION_ERROR << 3);
329 mask_val = readl_relaxed(mask_regx);
330 mask_val &= ~(flag_mux->mask_app_bits);
331
332 writel_relaxed(mask_val, mask_regx);
333 mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
334 (L3_DEBUG_ERROR << 3);
335 mask_val = readl_relaxed(mask_regx);
336 mask_val &= ~(flag_mux->mask_dbg_bits);
337
338 writel_relaxed(mask_val, mask_regx);
339 }
340
341 /* Dummy read to force OCP barrier */
342 if (mask_regx)
343 (void)readl(mask_regx);
344
345 return 0;
346}
347
348static const struct dev_pm_ops l3_dev_pm_ops = {
349 .resume_noirq = l3_resume_noirq,
350};
351
352#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
353#else
354#define L3_DEV_PM_OPS NULL
355#endif
356
299static struct platform_driver omap_l3_driver = { 357static struct platform_driver omap_l3_driver = {
300 .probe = omap_l3_probe, 358 .probe = omap_l3_probe,
301 .driver = { 359 .driver = {
302 .name = "omap_l3_noc", 360 .name = "omap_l3_noc",
303 .owner = THIS_MODULE, 361 .owner = THIS_MODULE,
362 .pm = L3_DEV_PM_OPS,
304 .of_match_table = of_match_ptr(l3_noc_match), 363 .of_match_table = of_match_ptr(l3_noc_match),
305 }, 364 },
306}; 365};
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index acc216491b8a..597fdaee7315 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -27,6 +27,10 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/module.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
33
30#include "omap_l3_smx.h" 34#include "omap_l3_smx.h"
31 35
32static inline u64 omap3_l3_readll(void __iomem *base, u16 reg) 36static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
@@ -211,7 +215,17 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
211 return ret; 215 return ret;
212} 216}
213 217
214static int __init omap3_l3_probe(struct platform_device *pdev) 218#if IS_BUILTIN(CONFIG_OF)
219static const struct of_device_id omap3_l3_match[] = {
220 {
221 .compatible = "ti,omap3-l3-smx",
222 },
223 { },
224};
225MODULE_DEVICE_TABLE(of, omap3_l3_match);
226#endif
227
228static int omap3_l3_probe(struct platform_device *pdev)
215{ 229{
216 struct omap3_l3 *l3; 230 struct omap3_l3 *l3;
217 struct resource *res; 231 struct resource *res;
@@ -265,7 +279,7 @@ err0:
265 return ret; 279 return ret;
266} 280}
267 281
268static int __exit omap3_l3_remove(struct platform_device *pdev) 282static int omap3_l3_remove(struct platform_device *pdev)
269{ 283{
270 struct omap3_l3 *l3 = platform_get_drvdata(pdev); 284 struct omap3_l3 *l3 = platform_get_drvdata(pdev);
271 285
@@ -278,15 +292,17 @@ static int __exit omap3_l3_remove(struct platform_device *pdev)
278} 292}
279 293
280static struct platform_driver omap3_l3_driver = { 294static struct platform_driver omap3_l3_driver = {
281 .remove = __exit_p(omap3_l3_remove), 295 .probe = omap3_l3_probe,
296 .remove = omap3_l3_remove,
282 .driver = { 297 .driver = {
283 .name = "omap_l3_smx", 298 .name = "omap_l3_smx",
299 .of_match_table = of_match_ptr(omap3_l3_match),
284 }, 300 },
285}; 301};
286 302
287static int __init omap3_l3_init(void) 303static int __init omap3_l3_init(void)
288{ 304{
289 return platform_driver_probe(&omap3_l3_driver, omap3_l3_probe); 305 return platform_driver_register(&omap3_l3_driver);
290} 306}
291postcore_initcall_sync(omap3_l3_init); 307postcore_initcall_sync(omap3_l3_init);
292 308
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 9f8277cc44b4..993efd7f6c7e 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -143,7 +143,7 @@ static int exynos_rng_remove(struct platform_device *pdev)
143 return 0; 143 return 0;
144} 144}
145 145
146#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) 146#ifdef CONFIG_PM
147static int exynos_rng_runtime_suspend(struct device *dev) 147static int exynos_rng_runtime_suspend(struct device *dev)
148{ 148{
149 struct platform_device *pdev = to_platform_device(dev); 149 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 524b707894ef..4c58333b4257 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -84,9 +84,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84} 84}
85#endif 85#endif
86 86
87void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) 87#ifndef unxlate_dev_mem_ptr
88#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
89void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
88{ 90{
89} 91}
92#endif
90 93
91/* 94/*
92 * This funcion reads the *physical* memory. The f_pos points directly to the 95 * This funcion reads the *physical* memory. The f_pos points directly to the
@@ -97,7 +100,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
97{ 100{
98 phys_addr_t p = *ppos; 101 phys_addr_t p = *ppos;
99 ssize_t read, sz; 102 ssize_t read, sz;
100 char *ptr; 103 void *ptr;
101 104
102 if (p != *ppos) 105 if (p != *ppos)
103 return 0; 106 return 0;
@@ -400,7 +403,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
400 * uncached, then it must also be accessed uncached 403 * uncached, then it must also be accessed uncached
401 * by the kernel or data corruption may occur 404 * by the kernel or data corruption may occur
402 */ 405 */
403 kbuf = xlate_dev_kmem_ptr((char *)p); 406 kbuf = xlate_dev_kmem_ptr((void *)p);
404 407
405 if (copy_to_user(buf, kbuf, sz)) 408 if (copy_to_user(buf, kbuf, sz))
406 return -EFAULT; 409 return -EFAULT;
@@ -461,7 +464,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
461#endif 464#endif
462 465
463 while (count > 0) { 466 while (count > 0) {
464 char *ptr; 467 void *ptr;
465 468
466 sz = size_inside_page(p, count); 469 sz = size_inside_page(p, count);
467 470
@@ -470,7 +473,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
470 * it must also be accessed uncached by the kernel or data 473 * it must also be accessed uncached by the kernel or data
471 * corruption may occur. 474 * corruption may occur.
472 */ 475 */
473 ptr = xlate_dev_kmem_ptr((char *)p); 476 ptr = xlate_dev_kmem_ptr((void *)p);
474 477
475 copied = copy_from_user(ptr, buf, sz); 478 copied = copy_from_user(ptr, buf, sz);
476 if (copied) { 479 if (copied) {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index cf7a561fad7c..de03df9dd7c9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -355,7 +355,7 @@ static inline bool use_multiport(struct ports_device *portdev)
355 */ 355 */
356 if (!portdev->vdev) 356 if (!portdev->vdev)
357 return 0; 357 return 0;
358 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); 358 return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
359} 359}
360 360
361static DEFINE_SPINLOCK(dma_bufs_lock); 361static DEFINE_SPINLOCK(dma_bufs_lock);
@@ -566,9 +566,9 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
566 if (!use_multiport(portdev)) 566 if (!use_multiport(portdev))
567 return 0; 567 return 0;
568 568
569 cpkt.id = port_id; 569 cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
570 cpkt.event = event; 570 cpkt.event = cpu_to_virtio16(portdev->vdev, event);
571 cpkt.value = value; 571 cpkt.value = cpu_to_virtio16(portdev->vdev, value);
572 572
573 vq = portdev->c_ovq; 573 vq = portdev->c_ovq;
574 574
@@ -669,8 +669,8 @@ done:
669 * Give out the data that's requested from the buffer that we have 669 * Give out the data that's requested from the buffer that we have
670 * queued up. 670 * queued up.
671 */ 671 */
672static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, 672static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
673 bool to_user) 673 size_t out_count, bool to_user)
674{ 674{
675 struct port_buffer *buf; 675 struct port_buffer *buf;
676 unsigned long flags; 676 unsigned long flags;
@@ -688,7 +688,8 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
688 if (ret) 688 if (ret)
689 return -EFAULT; 689 return -EFAULT;
690 } else { 690 } else {
691 memcpy(out_buf, buf->buf + buf->offset, out_count); 691 memcpy((__force char *)out_buf, buf->buf + buf->offset,
692 out_count);
692 } 693 }
693 694
694 buf->offset += out_count; 695 buf->offset += out_count;
@@ -1162,7 +1163,7 @@ static int get_chars(u32 vtermno, char *buf, int count)
1162 /* If we don't have an input queue yet, we can't get input. */ 1163 /* If we don't have an input queue yet, we can't get input. */
1163 BUG_ON(!port->in_vq); 1164 BUG_ON(!port->in_vq);
1164 1165
1165 return fill_readbuf(port, buf, count, false); 1166 return fill_readbuf(port, (__force char __user *)buf, count, false);
1166} 1167}
1167 1168
1168static void resize_console(struct port *port) 1169static void resize_console(struct port *port)
@@ -1602,7 +1603,8 @@ static void unplug_port(struct port *port)
1602} 1603}
1603 1604
1604/* Any private messages that the Host and Guest want to share */ 1605/* Any private messages that the Host and Guest want to share */
1605static void handle_control_message(struct ports_device *portdev, 1606static void handle_control_message(struct virtio_device *vdev,
1607 struct ports_device *portdev,
1606 struct port_buffer *buf) 1608 struct port_buffer *buf)
1607{ 1609{
1608 struct virtio_console_control *cpkt; 1610 struct virtio_console_control *cpkt;
@@ -1612,15 +1614,16 @@ static void handle_control_message(struct ports_device *portdev,
1612 1614
1613 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); 1615 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1614 1616
1615 port = find_port_by_id(portdev, cpkt->id); 1617 port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
1616 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { 1618 if (!port &&
1619 cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
1617 /* No valid header at start of buffer. Drop it. */ 1620 /* No valid header at start of buffer. Drop it. */
1618 dev_dbg(&portdev->vdev->dev, 1621 dev_dbg(&portdev->vdev->dev,
1619 "Invalid index %u in control packet\n", cpkt->id); 1622 "Invalid index %u in control packet\n", cpkt->id);
1620 return; 1623 return;
1621 } 1624 }
1622 1625
1623 switch (cpkt->event) { 1626 switch (virtio16_to_cpu(vdev, cpkt->event)) {
1624 case VIRTIO_CONSOLE_PORT_ADD: 1627 case VIRTIO_CONSOLE_PORT_ADD:
1625 if (port) { 1628 if (port) {
1626 dev_dbg(&portdev->vdev->dev, 1629 dev_dbg(&portdev->vdev->dev,
@@ -1628,13 +1631,15 @@ static void handle_control_message(struct ports_device *portdev,
1628 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1631 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1629 break; 1632 break;
1630 } 1633 }
1631 if (cpkt->id >= portdev->config.max_nr_ports) { 1634 if (virtio32_to_cpu(vdev, cpkt->id) >=
1635 portdev->config.max_nr_ports) {
1632 dev_warn(&portdev->vdev->dev, 1636 dev_warn(&portdev->vdev->dev,
1633 "Request for adding port with out-of-bound id %u, max. supported id: %u\n", 1637 "Request for adding port with "
1638 "out-of-bound id %u, max. supported id: %u\n",
1634 cpkt->id, portdev->config.max_nr_ports - 1); 1639 cpkt->id, portdev->config.max_nr_ports - 1);
1635 break; 1640 break;
1636 } 1641 }
1637 add_port(portdev, cpkt->id); 1642 add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
1638 break; 1643 break;
1639 case VIRTIO_CONSOLE_PORT_REMOVE: 1644 case VIRTIO_CONSOLE_PORT_REMOVE:
1640 unplug_port(port); 1645 unplug_port(port);
@@ -1670,7 +1675,7 @@ static void handle_control_message(struct ports_device *portdev,
1670 break; 1675 break;
1671 } 1676 }
1672 case VIRTIO_CONSOLE_PORT_OPEN: 1677 case VIRTIO_CONSOLE_PORT_OPEN:
1673 port->host_connected = cpkt->value; 1678 port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
1674 wake_up_interruptible(&port->waitqueue); 1679 wake_up_interruptible(&port->waitqueue);
1675 /* 1680 /*
1676 * If the host port got closed and the host had any 1681 * If the host port got closed and the host had any
@@ -1752,7 +1757,7 @@ static void control_work_handler(struct work_struct *work)
1752 buf->len = len; 1757 buf->len = len;
1753 buf->offset = 0; 1758 buf->offset = 0;
1754 1759
1755 handle_control_message(portdev, buf); 1760 handle_control_message(vq->vdev, portdev, buf);
1756 1761
1757 spin_lock(&portdev->c_ivq_lock); 1762 spin_lock(&portdev->c_ivq_lock);
1758 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1763 if (add_inbuf(portdev->c_ivq, buf) < 0) {
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 24b5b020753a..a23ac0c724f0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
52 52
53 tmp = pmc_read(pmc, AT91_PMC_USB); 53 tmp = pmc_read(pmc, AT91_PMC_USB);
54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; 54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
55 return parent_rate / (usbdiv + 1); 55
56 return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
56} 57}
57 58
58static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, 59static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long *parent_rate) 60 unsigned long *parent_rate)
60{ 61{
61 unsigned long div; 62 unsigned long div;
62 unsigned long bestrate; 63
63 unsigned long tmp; 64 if (!rate)
65 return -EINVAL;
64 66
65 if (rate >= *parent_rate) 67 if (rate >= *parent_rate)
66 return *parent_rate; 68 return *parent_rate;
67 69
68 div = *parent_rate / rate; 70 div = DIV_ROUND_CLOSEST(*parent_rate, rate);
69 if (div >= SAM9X5_USB_MAX_DIV) 71 if (div > SAM9X5_USB_MAX_DIV + 1)
70 return *parent_rate / (SAM9X5_USB_MAX_DIV + 1); 72 div = SAM9X5_USB_MAX_DIV + 1;
71
72 bestrate = *parent_rate / div;
73 tmp = *parent_rate / (div + 1);
74 if (bestrate - rate > rate - tmp)
75 bestrate = tmp;
76 73
77 return bestrate; 74 return DIV_ROUND_CLOSEST(*parent_rate, div);
78} 75}
79 76
80static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) 77static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
106 u32 tmp; 103 u32 tmp;
107 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 104 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
108 struct at91_pmc *pmc = usb->pmc; 105 struct at91_pmc *pmc = usb->pmc;
109 unsigned long div = parent_rate / rate; 106 unsigned long div;
107
108 if (!rate)
109 return -EINVAL;
110 110
111 if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV) 111 div = DIV_ROUND_CLOSEST(parent_rate, rate);
112 if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
112 return -EINVAL; 113 return -EINVAL;
113 114
114 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; 115 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
253 254
254 tmp_parent_rate = rate * usb->divisors[i]; 255 tmp_parent_rate = rate * usb->divisors[i];
255 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); 256 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
256 tmprate = tmp_parent_rate / usb->divisors[i]; 257 tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
257 if (tmprate < rate) 258 if (tmprate < rate)
258 tmpdiff = rate - tmprate; 259 tmpdiff = rate - tmprate;
259 else 260 else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
281 struct at91_pmc *pmc = usb->pmc; 282 struct at91_pmc *pmc = usb->pmc;
282 unsigned long div; 283 unsigned long div;
283 284
284 if (!rate || parent_rate % rate) 285 if (!rate)
285 return -EINVAL; 286 return -EINVAL;
286 287
287 div = parent_rate / rate; 288 div = DIV_ROUND_CLOSEST(parent_rate, rate);
288 289
289 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { 290 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
290 if (usb->divisors[i] == div) { 291 if (usb->divisors[i] == div) {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 18a9de29df0e..c0a842b335c5 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
263 if (!rate) 263 if (!rate)
264 rate = 1; 264 rate = 1;
265 265
266 /* if read only, just return current value */
267 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
268 bestdiv = readl(divider->reg) >> divider->shift;
269 bestdiv &= div_mask(divider);
270 bestdiv = _get_div(divider, bestdiv);
271 return bestdiv;
272 }
273
266 maxdiv = _get_maxdiv(divider); 274 maxdiv = _get_maxdiv(divider);
267 275
268 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 276 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
361}; 369};
362EXPORT_SYMBOL_GPL(clk_divider_ops); 370EXPORT_SYMBOL_GPL(clk_divider_ops);
363 371
364const struct clk_ops clk_divider_ro_ops = {
365 .recalc_rate = clk_divider_recalc_rate,
366};
367EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
368
369static struct clk *_register_divider(struct device *dev, const char *name, 372static struct clk *_register_divider(struct device *dev, const char *name,
370 const char *parent_name, unsigned long flags, 373 const char *parent_name, unsigned long flags,
371 void __iomem *reg, u8 shift, u8 width, 374 void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
391 } 394 }
392 395
393 init.name = name; 396 init.name = name;
394 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) 397 init.ops = &clk_divider_ops;
395 init.ops = &clk_divider_ro_ops;
396 else
397 init.ops = &clk_divider_ops;
398 init.flags = flags | CLK_IS_BASIC; 398 init.flags = flags | CLK_IS_BASIC;
399 init.parent_names = (parent_name ? &parent_name: NULL); 399 init.parent_names = (parent_name ? &parent_name: NULL);
400 init.num_parents = (parent_name ? 1 : 0); 400 init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index b7797fb12e12..7bb13af8e214 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -23,6 +23,7 @@
23#include <linux/clk-provider.h> 23#include <linux/clk-provider.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/mfd/samsung/s2mps11.h> 25#include <linux/mfd/samsung/s2mps11.h>
26#include <linux/mfd/samsung/s2mps13.h>
26#include <linux/mfd/samsung/s2mps14.h> 27#include <linux/mfd/samsung/s2mps14.h>
27#include <linux/mfd/samsung/s5m8767.h> 28#include <linux/mfd/samsung/s5m8767.h>
28#include <linux/mfd/samsung/core.h> 29#include <linux/mfd/samsung/core.h>
@@ -120,6 +121,24 @@ static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
120 }, 121 },
121}; 122};
122 123
124static struct clk_init_data s2mps13_clks_init[S2MPS11_CLKS_NUM] = {
125 [S2MPS11_CLK_AP] = {
126 .name = "s2mps13_ap",
127 .ops = &s2mps11_clk_ops,
128 .flags = CLK_IS_ROOT,
129 },
130 [S2MPS11_CLK_CP] = {
131 .name = "s2mps13_cp",
132 .ops = &s2mps11_clk_ops,
133 .flags = CLK_IS_ROOT,
134 },
135 [S2MPS11_CLK_BT] = {
136 .name = "s2mps13_bt",
137 .ops = &s2mps11_clk_ops,
138 .flags = CLK_IS_ROOT,
139 },
140};
141
123static struct clk_init_data s2mps14_clks_init[S2MPS11_CLKS_NUM] = { 142static struct clk_init_data s2mps14_clks_init[S2MPS11_CLKS_NUM] = {
124 [S2MPS11_CLK_AP] = { 143 [S2MPS11_CLK_AP] = {
125 .name = "s2mps14_ap", 144 .name = "s2mps14_ap",
@@ -184,6 +203,10 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
184 s2mps11_reg = S2MPS11_REG_RTC_CTRL; 203 s2mps11_reg = S2MPS11_REG_RTC_CTRL;
185 clks_init = s2mps11_clks_init; 204 clks_init = s2mps11_clks_init;
186 break; 205 break;
206 case S2MPS13X:
207 s2mps11_reg = S2MPS13_REG_RTCCTRL;
208 clks_init = s2mps13_clks_init;
209 break;
187 case S2MPS14X: 210 case S2MPS14X:
188 s2mps11_reg = S2MPS14_REG_RTCCTRL; 211 s2mps11_reg = S2MPS14_REG_RTCCTRL;
189 clks_init = s2mps14_clks_init; 212 clks_init = s2mps14_clks_init;
@@ -279,6 +302,7 @@ static int s2mps11_clk_remove(struct platform_device *pdev)
279 302
280static const struct platform_device_id s2mps11_clk_id[] = { 303static const struct platform_device_id s2mps11_clk_id[] = {
281 { "s2mps11-clk", S2MPS11X}, 304 { "s2mps11-clk", S2MPS11X},
305 { "s2mps13-clk", S2MPS13X},
282 { "s2mps14-clk", S2MPS14X}, 306 { "s2mps14-clk", S2MPS14X},
283 { "s5m8767-clk", S5M8767X}, 307 { "s5m8767-clk", S5M8767X},
284 { }, 308 { },
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index b7fcb469c87a..0d4d1216f2dd 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -19,6 +19,7 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/syscore_ops.h>
22 23
23#include "common.h" 24#include "common.h"
24 25
@@ -177,14 +178,17 @@ struct clk_gating_ctrl {
177 spinlock_t *lock; 178 spinlock_t *lock;
178 struct clk **gates; 179 struct clk **gates;
179 int num_gates; 180 int num_gates;
181 void __iomem *base;
182 u32 saved_reg;
180}; 183};
181 184
182#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) 185#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
183 186
187static struct clk_gating_ctrl *ctrl;
188
184static struct clk *clk_gating_get_src( 189static struct clk *clk_gating_get_src(
185 struct of_phandle_args *clkspec, void *data) 190 struct of_phandle_args *clkspec, void *data)
186{ 191{
187 struct clk_gating_ctrl *ctrl = (struct clk_gating_ctrl *)data;
188 int n; 192 int n;
189 193
190 if (clkspec->args_count < 1) 194 if (clkspec->args_count < 1)
@@ -199,15 +203,35 @@ static struct clk *clk_gating_get_src(
199 return ERR_PTR(-ENODEV); 203 return ERR_PTR(-ENODEV);
200} 204}
201 205
206static int mvebu_clk_gating_suspend(void)
207{
208 ctrl->saved_reg = readl(ctrl->base);
209 return 0;
210}
211
212static void mvebu_clk_gating_resume(void)
213{
214 writel(ctrl->saved_reg, ctrl->base);
215}
216
217static struct syscore_ops clk_gate_syscore_ops = {
218 .suspend = mvebu_clk_gating_suspend,
219 .resume = mvebu_clk_gating_resume,
220};
221
202void __init mvebu_clk_gating_setup(struct device_node *np, 222void __init mvebu_clk_gating_setup(struct device_node *np,
203 const struct clk_gating_soc_desc *desc) 223 const struct clk_gating_soc_desc *desc)
204{ 224{
205 struct clk_gating_ctrl *ctrl;
206 struct clk *clk; 225 struct clk *clk;
207 void __iomem *base; 226 void __iomem *base;
208 const char *default_parent = NULL; 227 const char *default_parent = NULL;
209 int n; 228 int n;
210 229
230 if (ctrl) {
231 pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
232 return;
233 }
234
211 base = of_iomap(np, 0); 235 base = of_iomap(np, 0);
212 if (WARN_ON(!base)) 236 if (WARN_ON(!base))
213 return; 237 return;
@@ -225,6 +249,8 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
225 /* lock must already be initialized */ 249 /* lock must already be initialized */
226 ctrl->lock = &ctrl_gating_lock; 250 ctrl->lock = &ctrl_gating_lock;
227 251
252 ctrl->base = base;
253
228 /* Count, allocate, and register clock gates */ 254 /* Count, allocate, and register clock gates */
229 for (n = 0; desc[n].name;) 255 for (n = 0; desc[n].name;)
230 n++; 256 n++;
@@ -246,6 +272,8 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
246 272
247 of_clk_add_provider(np, clk_gating_get_src, ctrl); 273 of_clk_add_provider(np, clk_gating_get_src, ctrl);
248 274
275 register_syscore_ops(&clk_gate_syscore_ops);
276
249 return; 277 return;
250gates_out: 278gates_out:
251 kfree(ctrl); 279 kfree(ctrl);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index b345cc791e5d..88b9fe13fa44 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
322 unsigned long ccsr = CCSR; 322 unsigned long ccsr = CCSR;
323 323
324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
325 a = cccr & CCCR_A_BIT; 325 a = cccr & (1 << CCCR_A_BIT);
326 l = ccsr & CCSR_L_MASK; 326 l = ccsr & CCSR_L_MASK;
327 327
328 if (osc_forced || a) 328 if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
341 unsigned long ccsr = CCSR; 341 unsigned long ccsr = CCSR;
342 342
343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
344 a = cccr & CCCR_A_BIT; 344 a = cccr & (1 << CCCR_A_BIT);
345 if (osc_forced) 345 if (osc_forced)
346 return PXA_MEM_13Mhz; 346 return PXA_MEM_13Mhz;
347 if (a) 347 if (a)
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index dab988ab8cf1..157139a5c1ca 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr, 3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, 3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, 3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
3125 [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, 3125 [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, 3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr, 3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr, 3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr,
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 1e68bff481b8..880a266f0143 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
90 div->width = div_width; 90 div->width = div_width;
91 div->lock = lock; 91 div->lock = lock;
92 div->table = div_table; 92 div->table = div_table;
93 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 93 div_ops = &clk_divider_ops;
94 ? &clk_divider_ro_ops
95 : &clk_divider_ops;
96 } 94 }
97 95
98 clk = clk_register_composite(NULL, name, parent_names, num_parents, 96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 00d1d00a41de..979e81389cdd 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -15,6 +15,8 @@
15#include <linux/clk-provider.h> 15#include <linux/clk-provider.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/notifier.h>
19#include <linux/reboot.h>
18 20
19#include "clk.h" 21#include "clk.h"
20#include "clk-pll.h" 22#include "clk-pll.h"
@@ -23,6 +25,8 @@
23#define CPU_CLK_STATUS 0xfc 25#define CPU_CLK_STATUS 0xfc
24#define MISC_DOUT1 0x558 26#define MISC_DOUT1 0x558
25 27
28static void __iomem *reg_base;
29
26/* parent clock name list */ 30/* parent clock name list */
27PNAME(mout_armclk_p) = { "cplla", "cpllb" }; 31PNAME(mout_armclk_p) = { "cplla", "cpllb" };
28PNAME(mout_spi_p) = { "div125", "div200" }; 32PNAME(mout_spi_p) = { "div125", "div200" };
@@ -89,10 +93,30 @@ static const struct of_device_id ext_clk_match[] __initconst = {
89 {}, 93 {},
90}; 94};
91 95
96static int exynos5440_clk_restart_notify(struct notifier_block *this,
97 unsigned long code, void *unused)
98{
99 u32 val, status;
100
101 status = readl_relaxed(reg_base + 0xbc);
102 val = readl_relaxed(reg_base + 0xcc);
103 val = (val & 0xffff0000) | (status & 0xffff);
104 writel_relaxed(val, reg_base + 0xcc);
105
106 return NOTIFY_DONE;
107}
108
109/*
110 * Exynos5440 Clock restart notifier, handles restart functionality
111 */
112static struct notifier_block exynos5440_clk_restart_handler = {
113 .notifier_call = exynos5440_clk_restart_notify,
114 .priority = 128,
115};
116
92/* register exynos5440 clocks */ 117/* register exynos5440 clocks */
93static void __init exynos5440_clk_init(struct device_node *np) 118static void __init exynos5440_clk_init(struct device_node *np)
94{ 119{
95 void __iomem *reg_base;
96 struct samsung_clk_provider *ctx; 120 struct samsung_clk_provider *ctx;
97 121
98 reg_base = of_iomap(np, 0); 122 reg_base = of_iomap(np, 0);
@@ -125,6 +149,9 @@ static void __init exynos5440_clk_init(struct device_node *np)
125 149
126 samsung_clk_of_add_provider(np, ctx); 150 samsung_clk_of_add_provider(np, ctx);
127 151
152 if (register_restart_handler(&exynos5440_clk_restart_handler))
153 pr_warn("exynos5440 clock can't register restart handler\n");
154
128 pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk")); 155 pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
129 pr_info("exynos5440 clock initialization complete\n"); 156 pr_info("exynos5440 clock initialization complete\n");
130} 157}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 290f9c1a3749..59a5714dfe18 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -185,3 +185,16 @@ struct clk *tegra_clk_register_divider(const char *name,
185 185
186 return clk; 186 return clk;
187} 187}
188
189static const struct clk_div_table mc_div_table[] = {
190 { .val = 0, .div = 2 },
191 { .val = 1, .div = 1 },
192 { .val = 0, .div = 0 },
193};
194
195struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
196 void __iomem *reg, spinlock_t *lock)
197{
198 return clk_register_divider_table(NULL, name, parent_name, 0, reg,
199 16, 1, 0, mc_div_table, lock);
200}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index f760f31d05c4..0b03d2cf7264 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -173,6 +173,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
173static DEFINE_SPINLOCK(pll_d2_lock); 173static DEFINE_SPINLOCK(pll_d2_lock);
174static DEFINE_SPINLOCK(pll_u_lock); 174static DEFINE_SPINLOCK(pll_u_lock);
175static DEFINE_SPINLOCK(pll_re_lock); 175static DEFINE_SPINLOCK(pll_re_lock);
176static DEFINE_SPINLOCK(emc_lock);
176 177
177static struct div_nmp pllxc_nmp = { 178static struct div_nmp pllxc_nmp = {
178 .divm_shift = 0, 179 .divm_shift = 0,
@@ -1228,7 +1229,11 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
1228 ARRAY_SIZE(mux_pllmcp_clkm), 1229 ARRAY_SIZE(mux_pllmcp_clkm),
1229 CLK_SET_RATE_NO_REPARENT, 1230 CLK_SET_RATE_NO_REPARENT,
1230 clk_base + CLK_SOURCE_EMC, 1231 clk_base + CLK_SOURCE_EMC,
1231 29, 3, 0, NULL); 1232 29, 3, 0, &emc_lock);
1233
1234 clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
1235 &emc_lock);
1236 clks[TEGRA114_CLK_MC] = clk;
1232 1237
1233 for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) { 1238 for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
1234 data = &tegra_periph_clk_list[i]; 1239 data = &tegra_periph_clk_list[i];
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e3a85842ce0c..f5f9baca7bb6 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -132,6 +132,7 @@ static DEFINE_SPINLOCK(pll_d2_lock);
132static DEFINE_SPINLOCK(pll_e_lock); 132static DEFINE_SPINLOCK(pll_e_lock);
133static DEFINE_SPINLOCK(pll_re_lock); 133static DEFINE_SPINLOCK(pll_re_lock);
134static DEFINE_SPINLOCK(pll_u_lock); 134static DEFINE_SPINLOCK(pll_u_lock);
135static DEFINE_SPINLOCK(emc_lock);
135 136
136/* possible OSC frequencies in Hz */ 137/* possible OSC frequencies in Hz */
137static unsigned long tegra124_input_freq[] = { 138static unsigned long tegra124_input_freq[] = {
@@ -1127,7 +1128,11 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1127 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, 1128 clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
1128 ARRAY_SIZE(mux_pllmcp_clkm), 0, 1129 ARRAY_SIZE(mux_pllmcp_clkm), 0,
1129 clk_base + CLK_SOURCE_EMC, 1130 clk_base + CLK_SOURCE_EMC,
1130 29, 3, 0, NULL); 1131 29, 3, 0, &emc_lock);
1132
1133 clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
1134 &emc_lock);
1135 clks[TEGRA124_CLK_MC] = clk;
1131 1136
1132 /* cml0 */ 1137 /* cml0 */
1133 clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX, 1138 clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index dace2b1b5ae6..41272dcc9e22 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -140,6 +140,8 @@ static struct cpu_clk_suspend_context {
140static void __iomem *clk_base; 140static void __iomem *clk_base;
141static void __iomem *pmc_base; 141static void __iomem *pmc_base;
142 142
143static DEFINE_SPINLOCK(emc_lock);
144
143#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \ 145#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
144 _clk_num, _gate_flags, _clk_id) \ 146 _clk_num, _gate_flags, _clk_id) \
145 TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \ 147 TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
@@ -819,11 +821,15 @@ static void __init tegra20_periph_clk_init(void)
819 ARRAY_SIZE(mux_pllmcp_clkm), 821 ARRAY_SIZE(mux_pllmcp_clkm),
820 CLK_SET_RATE_NO_REPARENT, 822 CLK_SET_RATE_NO_REPARENT,
821 clk_base + CLK_SOURCE_EMC, 823 clk_base + CLK_SOURCE_EMC,
822 30, 2, 0, NULL); 824 30, 2, 0, &emc_lock);
823 clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0, 825 clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
824 57, periph_clk_enb_refcnt); 826 57, periph_clk_enb_refcnt);
825 clks[TEGRA20_CLK_EMC] = clk; 827 clks[TEGRA20_CLK_EMC] = clk;
826 828
829 clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
830 &emc_lock);
831 clks[TEGRA20_CLK_MC] = clk;
832
827 /* dsi */ 833 /* dsi */
828 clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0, 834 clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
829 48, periph_clk_enb_refcnt); 835 48, periph_clk_enb_refcnt);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 5bbacd01094f..4b9d8bd3d0bf 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -177,6 +177,7 @@ static unsigned long input_freq;
177 177
178static DEFINE_SPINLOCK(cml_lock); 178static DEFINE_SPINLOCK(cml_lock);
179static DEFINE_SPINLOCK(pll_d_lock); 179static DEFINE_SPINLOCK(pll_d_lock);
180static DEFINE_SPINLOCK(emc_lock);
180 181
181#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \ 182#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
182 _clk_num, _gate_flags, _clk_id) \ 183 _clk_num, _gate_flags, _clk_id) \
@@ -1157,11 +1158,15 @@ static void __init tegra30_periph_clk_init(void)
1157 ARRAY_SIZE(mux_pllmcp_clkm), 1158 ARRAY_SIZE(mux_pllmcp_clkm),
1158 CLK_SET_RATE_NO_REPARENT, 1159 CLK_SET_RATE_NO_REPARENT,
1159 clk_base + CLK_SOURCE_EMC, 1160 clk_base + CLK_SOURCE_EMC,
1160 30, 2, 0, NULL); 1161 30, 2, 0, &emc_lock);
1161 clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0, 1162 clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
1162 57, periph_clk_enb_refcnt); 1163 57, periph_clk_enb_refcnt);
1163 clks[TEGRA30_CLK_EMC] = clk; 1164 clks[TEGRA30_CLK_EMC] = clk;
1164 1165
1166 clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
1167 &emc_lock);
1168 clks[TEGRA30_CLK_MC] = clk;
1169
1165 /* cml0 */ 1170 /* cml0 */
1166 clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX, 1171 clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
1167 0, 0, &cml_lock); 1172 0, 0, &cml_lock);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 16ec8d6bb87f..4e458aa8d45c 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -86,6 +86,8 @@ struct clk *tegra_clk_register_divider(const char *name,
86 const char *parent_name, void __iomem *reg, 86 const char *parent_name, void __iomem *reg,
87 unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width, 87 unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
88 u8 frac_width, spinlock_t *lock); 88 u8 frac_width, spinlock_t *lock);
89struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
90 void __iomem *reg, spinlock_t *lock);
89 91
90/* 92/*
91 * Tegra PLL: 93 * Tegra PLL:
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 79791e1bf282..85ac0dd501de 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -33,6 +33,9 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
33 .recalc_rate = &omap4_dpll_regm4xen_recalc, 33 .recalc_rate = &omap4_dpll_regm4xen_recalc,
34 .round_rate = &omap4_dpll_regm4xen_round_rate, 34 .round_rate = &omap4_dpll_regm4xen_round_rate,
35 .set_rate = &omap3_noncore_dpll_set_rate, 35 .set_rate = &omap3_noncore_dpll_set_rate,
36 .set_parent = &omap3_noncore_dpll_set_parent,
37 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
38 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
36 .get_parent = &omap2_init_dpll_parent, 39 .get_parent = &omap2_init_dpll_parent,
37}; 40};
38#else 41#else
@@ -53,6 +56,9 @@ static const struct clk_ops dpll_ck_ops = {
53 .recalc_rate = &omap3_dpll_recalc, 56 .recalc_rate = &omap3_dpll_recalc,
54 .round_rate = &omap2_dpll_round_rate, 57 .round_rate = &omap2_dpll_round_rate,
55 .set_rate = &omap3_noncore_dpll_set_rate, 58 .set_rate = &omap3_noncore_dpll_set_rate,
59 .set_parent = &omap3_noncore_dpll_set_parent,
60 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
61 .determine_rate = &omap3_noncore_dpll_determine_rate,
56 .get_parent = &omap2_init_dpll_parent, 62 .get_parent = &omap2_init_dpll_parent,
57}; 63};
58 64
@@ -61,6 +67,9 @@ static const struct clk_ops dpll_no_gate_ck_ops = {
61 .get_parent = &omap2_init_dpll_parent, 67 .get_parent = &omap2_init_dpll_parent,
62 .round_rate = &omap2_dpll_round_rate, 68 .round_rate = &omap2_dpll_round_rate,
63 .set_rate = &omap3_noncore_dpll_set_rate, 69 .set_rate = &omap3_noncore_dpll_set_rate,
70 .set_parent = &omap3_noncore_dpll_set_parent,
71 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
72 .determine_rate = &omap3_noncore_dpll_determine_rate,
64}; 73};
65#else 74#else
66static const struct clk_ops dpll_core_ck_ops = {}; 75static const struct clk_ops dpll_core_ck_ops = {};
@@ -97,6 +106,9 @@ static const struct clk_ops omap3_dpll_ck_ops = {
97 .get_parent = &omap2_init_dpll_parent, 106 .get_parent = &omap2_init_dpll_parent,
98 .recalc_rate = &omap3_dpll_recalc, 107 .recalc_rate = &omap3_dpll_recalc,
99 .set_rate = &omap3_noncore_dpll_set_rate, 108 .set_rate = &omap3_noncore_dpll_set_rate,
109 .set_parent = &omap3_noncore_dpll_set_parent,
110 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
111 .determine_rate = &omap3_noncore_dpll_determine_rate,
100 .round_rate = &omap2_dpll_round_rate, 112 .round_rate = &omap2_dpll_round_rate,
101}; 113};
102 114
@@ -106,6 +118,9 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
106 .get_parent = &omap2_init_dpll_parent, 118 .get_parent = &omap2_init_dpll_parent,
107 .recalc_rate = &omap3_dpll_recalc, 119 .recalc_rate = &omap3_dpll_recalc,
108 .set_rate = &omap3_dpll4_set_rate, 120 .set_rate = &omap3_dpll4_set_rate,
121 .set_parent = &omap3_noncore_dpll_set_parent,
122 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
123 .determine_rate = &omap3_noncore_dpll_determine_rate,
109 .round_rate = &omap2_dpll_round_rate, 124 .round_rate = &omap2_dpll_round_rate,
110}; 125};
111#endif 126#endif
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index 162e519cb0f9..8ff03744fe98 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -2,6 +2,5 @@
2obj-$(CONFIG_ICST) += clk-icst.o clk-versatile.o 2obj-$(CONFIG_ICST) += clk-icst.o clk-versatile.o
3obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o 3obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
4obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o 4obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
5obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o
6obj-$(CONFIG_CLK_SP810) += clk-sp810.o 5obj-$(CONFIG_CLK_SP810) += clk-sp810.o
7obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o 6obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 529a59c0fbfa..765f1e0eeeb2 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -70,7 +70,6 @@ static struct clk_ops vexpress_osc_ops = {
70 70
71static int vexpress_osc_probe(struct platform_device *pdev) 71static int vexpress_osc_probe(struct platform_device *pdev)
72{ 72{
73 struct clk_lookup *cl = pdev->dev.platform_data; /* Non-DT lookup */
74 struct clk_init_data init; 73 struct clk_init_data init;
75 struct vexpress_osc *osc; 74 struct vexpress_osc *osc;
76 struct clk *clk; 75 struct clk *clk;
@@ -106,12 +105,6 @@ static int vexpress_osc_probe(struct platform_device *pdev)
106 105
107 of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk); 106 of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
108 107
109 /* Only happens for non-DT cases */
110 if (cl) {
111 cl->clk = clk;
112 clkdev_add(cl);
113 }
114
115 dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name); 108 dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
116 109
117 return 0; 110 return 0;
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
deleted file mode 100644
index 2d5e1b4820e0..000000000000
--- a/drivers/clk/versatile/clk-vexpress.c
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#include <linux/amba/sp810.h>
15#include <linux/clkdev.h>
16#include <linux/clk-provider.h>
17#include <linux/err.h>
18#include <linux/vexpress.h>
19
20static struct clk *vexpress_sp810_timerclken[4];
21static DEFINE_SPINLOCK(vexpress_sp810_lock);
22
23static void __init vexpress_sp810_init(void __iomem *base)
24{
25 int i;
26
27 if (WARN_ON(!base))
28 return;
29
30 for (i = 0; i < ARRAY_SIZE(vexpress_sp810_timerclken); i++) {
31 char name[12];
32 const char *parents[] = {
33 "v2m:refclk32khz", /* REFCLK */
34 "v2m:refclk1mhz" /* TIMCLK */
35 };
36
37 snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
38
39 vexpress_sp810_timerclken[i] = clk_register_mux(NULL, name,
40 parents, 2, CLK_SET_RATE_NO_REPARENT,
41 base + SCCTRL, SCCTRL_TIMERENnSEL_SHIFT(i), 1,
42 0, &vexpress_sp810_lock);
43
44 if (WARN_ON(IS_ERR(vexpress_sp810_timerclken[i])))
45 break;
46 }
47}
48
49
50static const char * const vexpress_clk_24mhz_periphs[] __initconst = {
51 "mb:uart0", "mb:uart1", "mb:uart2", "mb:uart3",
52 "mb:mmci", "mb:kmi0", "mb:kmi1"
53};
54
55void __init vexpress_clk_init(void __iomem *sp810_base)
56{
57 struct clk *clk;
58 int i;
59
60 clk = clk_register_fixed_rate(NULL, "dummy_apb_pclk", NULL,
61 CLK_IS_ROOT, 0);
62 WARN_ON(clk_register_clkdev(clk, "apb_pclk", NULL));
63
64 clk = clk_register_fixed_rate(NULL, "v2m:clk_24mhz", NULL,
65 CLK_IS_ROOT, 24000000);
66 for (i = 0; i < ARRAY_SIZE(vexpress_clk_24mhz_periphs); i++)
67 WARN_ON(clk_register_clkdev(clk, NULL,
68 vexpress_clk_24mhz_periphs[i]));
69
70 clk = clk_register_fixed_rate(NULL, "v2m:refclk32khz", NULL,
71 CLK_IS_ROOT, 32768);
72 WARN_ON(clk_register_clkdev(clk, NULL, "v2m:wdt"));
73
74 clk = clk_register_fixed_rate(NULL, "v2m:refclk1mhz", NULL,
75 CLK_IS_ROOT, 1000000);
76
77 vexpress_sp810_init(sp810_base);
78
79 for (i = 0; i < ARRAY_SIZE(vexpress_sp810_timerclken); i++)
80 WARN_ON(clk_set_parent(vexpress_sp810_timerclken[i], clk));
81
82 WARN_ON(clk_register_clkdev(vexpress_sp810_timerclken[0],
83 "v2m-timer0", "sp804"));
84 WARN_ON(clk_register_clkdev(vexpress_sp810_timerclken[1],
85 "v2m-timer1", "sp804"));
86}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 90420600e1eb..f657a48d20eb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -32,6 +32,7 @@ config ARMADA_370_XP_TIMER
32 32
33config MESON6_TIMER 33config MESON6_TIMER
34 bool 34 bool
35 select CLKSRC_MMIO
35 36
36config ORION_TIMER 37config ORION_TIMER
37 select CLKSRC_OF 38 select CLKSRC_OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 756f6f10efa0..fae0435cc23d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -45,4 +45,5 @@ obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
45obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o 45obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
46obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 46obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
47obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o 47obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
48obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
48obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 49obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 43005d4d3348..6a79fc4f900c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,10 @@ static void __init arch_counter_register(unsigned type)
462 462
463 /* Register the CP15 based counter if we have one */ 463 /* Register the CP15 based counter if we have one */
464 if (type & ARCH_CP15_TIMER) { 464 if (type & ARCH_CP15_TIMER) {
465 arch_timer_read_counter = arch_counter_get_cntvct; 465 if (arch_timer_use_virtual)
466 arch_timer_read_counter = arch_counter_get_cntvct;
467 else
468 arch_timer_read_counter = arch_counter_get_cntpct;
466 } else { 469 } else {
467 arch_timer_read_counter = arch_counter_get_cntvct_mem; 470 arch_timer_read_counter = arch_counter_get_cntvct_mem;
468 471
@@ -702,6 +705,14 @@ static void __init arch_timer_init(struct device_node *np)
702 arch_timer_detect_rate(NULL, np); 705 arch_timer_detect_rate(NULL, np);
703 706
704 /* 707 /*
708 * If we cannot rely on firmware initializing the timer registers then
709 * we should use the physical timers instead.
710 */
711 if (IS_ENABLED(CONFIG_ARM) &&
712 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
713 arch_timer_use_virtual = false;
714
715 /*
705 * If HYP mode is available, we know that the physical timer 716 * If HYP mode is available, we know that the physical timer
706 * has been configured to be accessible from PL1. Use it, so 717 * has been configured to be accessible from PL1. Use it, so
707 * that a guest can use the virtual timer instead. 718 * that a guest can use the virtual timer instead.
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 0451e62fac7a..0c8c5e337540 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -43,6 +43,7 @@
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/sched_clock.h> 44#include <linux/sched_clock.h>
45#include <linux/percpu.h> 45#include <linux/percpu.h>
46#include <linux/syscore_ops.h>
46 47
47/* 48/*
48 * Timer block registers. 49 * Timer block registers.
@@ -223,6 +224,28 @@ static struct notifier_block armada_370_xp_timer_cpu_nb = {
223 .notifier_call = armada_370_xp_timer_cpu_notify, 224 .notifier_call = armada_370_xp_timer_cpu_notify,
224}; 225};
225 226
227static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
228
229static int armada_370_xp_timer_suspend(void)
230{
231 timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
232 timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
233 return 0;
234}
235
236static void armada_370_xp_timer_resume(void)
237{
238 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
239 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
240 writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
241 writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
242}
243
244struct syscore_ops armada_370_xp_timer_syscore_ops = {
245 .suspend = armada_370_xp_timer_suspend,
246 .resume = armada_370_xp_timer_resume,
247};
248
226static void __init armada_370_xp_timer_common_init(struct device_node *np) 249static void __init armada_370_xp_timer_common_init(struct device_node *np)
227{ 250{
228 u32 clr = 0, set = 0; 251 u32 clr = 0, set = 0;
@@ -285,6 +308,8 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
285 /* Immediately configure the timer on the boot CPU */ 308 /* Immediately configure the timer on the boot CPU */
286 if (!res) 309 if (!res)
287 armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); 310 armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
311
312 register_syscore_ops(&armada_370_xp_timer_syscore_ops);
288} 313}
289 314
290static void __init armada_xp_timer_init(struct device_node *np) 315static void __init armada_xp_timer_init(struct device_node *np)
@@ -293,6 +318,7 @@ static void __init armada_xp_timer_init(struct device_node *np)
293 318
294 /* The 25Mhz fixed clock is mandatory, and must always be available */ 319 /* The 25Mhz fixed clock is mandatory, and must always be available */
295 BUG_ON(IS_ERR(clk)); 320 BUG_ON(IS_ERR(clk));
321 clk_prepare_enable(clk);
296 timer_clk = clk_get_rate(clk); 322 timer_clk = clk_get_rate(clk);
297 323
298 armada_370_xp_timer_common_init(np); 324 armada_370_xp_timer_common_init(np);
@@ -300,11 +326,40 @@ static void __init armada_xp_timer_init(struct device_node *np)
300CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", 326CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
301 armada_xp_timer_init); 327 armada_xp_timer_init);
302 328
329static void __init armada_375_timer_init(struct device_node *np)
330{
331 struct clk *clk;
332
333 clk = of_clk_get_by_name(np, "fixed");
334 if (!IS_ERR(clk)) {
335 clk_prepare_enable(clk);
336 timer_clk = clk_get_rate(clk);
337 } else {
338
339 /*
340 * This fallback is required in order to retain proper
341 * devicetree backwards compatibility.
342 */
343 clk = of_clk_get(np, 0);
344
345 /* Must have at least a clock */
346 BUG_ON(IS_ERR(clk));
347 clk_prepare_enable(clk);
348 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
349 timer25Mhz = false;
350 }
351
352 armada_370_xp_timer_common_init(np);
353}
354CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
355 armada_375_timer_init);
356
303static void __init armada_370_timer_init(struct device_node *np) 357static void __init armada_370_timer_init(struct device_node *np)
304{ 358{
305 struct clk *clk = of_clk_get(np, 0); 359 struct clk *clk = of_clk_get(np, 0);
306 360
307 BUG_ON(IS_ERR(clk)); 361 BUG_ON(IS_ERR(clk));
362 clk_prepare_enable(clk);
308 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; 363 timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
309 timer25Mhz = false; 364 timer25Mhz = false;
310 365
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index d5289098b3df..b5b4d4585c9a 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -262,35 +262,3 @@ static void __init at91sam926x_pit_dt_init(struct device_node *node)
262} 262}
263CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", 263CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
264 at91sam926x_pit_dt_init); 264 at91sam926x_pit_dt_init);
265
266static void __iomem *pit_base_addr;
267
268void __init at91sam926x_pit_init(int irq)
269{
270 struct pit_data *data;
271
272 data = kzalloc(sizeof(*data), GFP_KERNEL);
273 if (!data)
274 panic(pr_fmt("Unable to allocate memory\n"));
275
276 data->base = pit_base_addr;
277
278 data->mck = clk_get(NULL, "mck");
279 if (IS_ERR(data->mck))
280 panic(pr_fmt("Unable to get mck clk\n"));
281
282 data->irq = irq;
283
284 at91sam926x_pit_common_init(data);
285}
286
287void __init at91sam926x_ioremap_pit(u32 addr)
288{
289 if (of_have_populated_dt())
290 return;
291
292 pit_base_addr = ioremap(addr, 16);
293
294 if (!pit_base_addr)
295 panic(pr_fmt("Impossible to ioremap PIT\n"));
296}
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
new file mode 100644
index 000000000000..b9efd30513d5
--- /dev/null
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -0,0 +1,210 @@
1/*
2 * Integrator/AP timer driver
3 * Copyright (C) 2000-2003 Deep Blue Solutions Ltd
4 * Copyright (c) 2014, Linaro Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/clk.h>
22#include <linux/clocksource.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
25#include <linux/of_platform.h>
26#include <linux/clockchips.h>
27#include <linux/interrupt.h>
28#include <linux/sched_clock.h>
29#include <asm/hardware/arm_timer.h>
30
31static void __iomem * sched_clk_base;
32
33static u64 notrace integrator_read_sched_clock(void)
34{
35 return -readl(sched_clk_base + TIMER_VALUE);
36}
37
38static void integrator_clocksource_init(unsigned long inrate,
39 void __iomem *base)
40{
41 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
42 unsigned long rate = inrate;
43
44 if (rate >= 1500000) {
45 rate /= 16;
46 ctrl |= TIMER_CTRL_DIV16;
47 }
48
49 writel(0xffff, base + TIMER_LOAD);
50 writel(ctrl, base + TIMER_CTRL);
51
52 clocksource_mmio_init(base + TIMER_VALUE, "timer2",
53 rate, 200, 16, clocksource_mmio_readl_down);
54
55 sched_clk_base = base;
56 sched_clock_register(integrator_read_sched_clock, 16, rate);
57}
58
59static unsigned long timer_reload;
60static void __iomem * clkevt_base;
61
62/*
63 * IRQ handler for the timer
64 */
65static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id)
66{
67 struct clock_event_device *evt = dev_id;
68
69 /* clear the interrupt */
70 writel(1, clkevt_base + TIMER_INTCLR);
71
72 evt->event_handler(evt);
73
74 return IRQ_HANDLED;
75}
76
77static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt)
78{
79 u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
80
81 /* Disable timer */
82 writel(ctrl, clkevt_base + TIMER_CTRL);
83
84 switch (mode) {
85 case CLOCK_EVT_MODE_PERIODIC:
86 /* Enable the timer and start the periodic tick */
87 writel(timer_reload, clkevt_base + TIMER_LOAD);
88 ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
89 writel(ctrl, clkevt_base + TIMER_CTRL);
90 break;
91 case CLOCK_EVT_MODE_ONESHOT:
92 /* Leave the timer disabled, .set_next_event will enable it */
93 ctrl &= ~TIMER_CTRL_PERIODIC;
94 writel(ctrl, clkevt_base + TIMER_CTRL);
95 break;
96 case CLOCK_EVT_MODE_UNUSED:
97 case CLOCK_EVT_MODE_SHUTDOWN:
98 case CLOCK_EVT_MODE_RESUME:
99 default:
100 /* Just leave in disabled state */
101 break;
102 }
103
104}
105
106static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
107{
108 unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
109
110 writel(ctrl & ~TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
111 writel(next, clkevt_base + TIMER_LOAD);
112 writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
113
114 return 0;
115}
116
117static struct clock_event_device integrator_clockevent = {
118 .name = "timer1",
119 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
120 .set_mode = clkevt_set_mode,
121 .set_next_event = clkevt_set_next_event,
122 .rating = 300,
123};
124
125static struct irqaction integrator_timer_irq = {
126 .name = "timer",
127 .flags = IRQF_TIMER | IRQF_IRQPOLL,
128 .handler = integrator_timer_interrupt,
129 .dev_id = &integrator_clockevent,
130};
131
132static void integrator_clockevent_init(unsigned long inrate,
133 void __iomem *base, int irq)
134{
135 unsigned long rate = inrate;
136 unsigned int ctrl = 0;
137
138 clkevt_base = base;
139 /* Calculate and program a divisor */
140 if (rate > 0x100000 * HZ) {
141 rate /= 256;
142 ctrl |= TIMER_CTRL_DIV256;
143 } else if (rate > 0x10000 * HZ) {
144 rate /= 16;
145 ctrl |= TIMER_CTRL_DIV16;
146 }
147 timer_reload = rate / HZ;
148 writel(ctrl, clkevt_base + TIMER_CTRL);
149
150 setup_irq(irq, &integrator_timer_irq);
151 clockevents_config_and_register(&integrator_clockevent,
152 rate,
153 1,
154 0xffffU);
155}
156
157static void __init integrator_ap_timer_init_of(struct device_node *node)
158{
159 const char *path;
160 void __iomem *base;
161 int err;
162 int irq;
163 struct clk *clk;
164 unsigned long rate;
165 struct device_node *pri_node;
166 struct device_node *sec_node;
167
168 base = of_io_request_and_map(node, 0, "integrator-timer");
169 if (!base)
170 return;
171
172 clk = of_clk_get(node, 0);
173 if (IS_ERR(clk)) {
174 pr_err("No clock for %s\n", node->name);
175 return;
176 }
177 clk_prepare_enable(clk);
178 rate = clk_get_rate(clk);
179 writel(0, base + TIMER_CTRL);
180
181 err = of_property_read_string(of_aliases,
182 "arm,timer-primary", &path);
183 if (WARN_ON(err))
184 return;
185 pri_node = of_find_node_by_path(path);
186 err = of_property_read_string(of_aliases,
187 "arm,timer-secondary", &path);
188 if (WARN_ON(err))
189 return;
190 sec_node = of_find_node_by_path(path);
191
192 if (node == pri_node) {
193 /* The primary timer lacks IRQ, use as clocksource */
194 integrator_clocksource_init(rate, base);
195 return;
196 }
197
198 if (node == sec_node) {
199 /* The secondary timer will drive the clock event */
200 irq = irq_of_parse_and_map(node, 0);
201 integrator_clockevent_init(rate, base, irq);
202 return;
203 }
204
205 pr_info("Timer @%p unused\n", base);
206 clk_disable_unprepare(clk);
207}
208
209CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
210 integrator_ap_timer_init_of);
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index caf7a2030461..361a789d4bee 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -20,8 +20,6 @@
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/sched_clock.h> 21#include <linux/sched_clock.h>
22 22
23#define MARCO_CLOCK_FREQ 1000000
24
25#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 23#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
26#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004 24#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
27#define SIRFSOC_TIMER_MATCH_0 0x0018 25#define SIRFSOC_TIMER_MATCH_0 0x0018
@@ -40,6 +38,8 @@
40 38
41#define SIRFSOC_TIMER_REG_CNT 6 39#define SIRFSOC_TIMER_REG_CNT 6
42 40
41static unsigned long marco_timer_rate;
42
43static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = { 43static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
44 SIRFSOC_TIMER_WATCHDOG_EN, 44 SIRFSOC_TIMER_WATCHDOG_EN,
45 SIRFSOC_TIMER_32COUNTER_0_CTRL, 45 SIRFSOC_TIMER_32COUNTER_0_CTRL,
@@ -195,7 +195,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
195 ce->rating = 200; 195 ce->rating = 200;
196 ce->set_mode = sirfsoc_timer_set_mode; 196 ce->set_mode = sirfsoc_timer_set_mode;
197 ce->set_next_event = sirfsoc_timer_set_next_event; 197 ce->set_next_event = sirfsoc_timer_set_next_event;
198 clockevents_calc_mult_shift(ce, MARCO_CLOCK_FREQ, 60); 198 clockevents_calc_mult_shift(ce, marco_timer_rate, 60);
199 ce->max_delta_ns = clockevent_delta2ns(-2, ce); 199 ce->max_delta_ns = clockevent_delta2ns(-2, ce);
200 ce->min_delta_ns = clockevent_delta2ns(2, ce); 200 ce->min_delta_ns = clockevent_delta2ns(2, ce);
201 ce->cpumask = cpumask_of(cpu); 201 ce->cpumask = cpumask_of(cpu);
@@ -257,7 +257,6 @@ static void __init sirfsoc_clockevent_init(void)
257/* initialize the kernel jiffy timer source */ 257/* initialize the kernel jiffy timer source */
258static void __init sirfsoc_marco_timer_init(struct device_node *np) 258static void __init sirfsoc_marco_timer_init(struct device_node *np)
259{ 259{
260 unsigned long rate;
261 u32 timer_div; 260 u32 timer_div;
262 struct clk *clk; 261 struct clk *clk;
263 262
@@ -266,16 +265,12 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
266 265
267 BUG_ON(clk_prepare_enable(clk)); 266 BUG_ON(clk_prepare_enable(clk));
268 267
269 rate = clk_get_rate(clk); 268 marco_timer_rate = clk_get_rate(clk);
270
271 BUG_ON(rate < MARCO_CLOCK_FREQ);
272 BUG_ON(rate % MARCO_CLOCK_FREQ);
273 269
274 /* Initialize the timer dividers */ 270 /* timer dividers: 0, not divided */
275 timer_div = rate / MARCO_CLOCK_FREQ - 1; 271 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
276 writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); 272 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
277 writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL); 273 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
278 writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
279 274
280 /* Initialize timer counters to 0 */ 275 /* Initialize timer counters to 0 */
281 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO); 276 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
@@ -288,7 +283,7 @@ static void __init sirfsoc_marco_timer_init(struct device_node *np)
288 /* Clear all interrupts */ 283 /* Clear all interrupts */
289 writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS); 284 writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
290 285
291 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, MARCO_CLOCK_FREQ)); 286 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, marco_timer_rate));
292 287
293 sirfsoc_clockevent_init(); 288 sirfsoc_clockevent_init();
294} 289}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3489f8f5fada..29b2ef5a68b9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
63 63
64config CPU_FREQ_DEFAULT_GOV_POWERSAVE 64config CPU_FREQ_DEFAULT_GOV_POWERSAVE
65 bool "powersave" 65 bool "powersave"
66 depends on EXPERT
67 select CPU_FREQ_GOV_POWERSAVE 66 select CPU_FREQ_GOV_POWERSAVE
68 help 67 help
69 Use the CPUFreq governor 'powersave' as default. This sets 68 Use the CPUFreq governor 'powersave' as default. This sets
@@ -183,6 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
183 182
184 If in doubt, say N. 183 If in doubt, say N.
185 184
185comment "CPU frequency scaling drivers"
186
186config CPUFREQ_DT 187config CPUFREQ_DT
187 tristate "Generic DT based cpufreq driver" 188 tristate "Generic DT based cpufreq driver"
188 depends on HAVE_CLK && OF 189 depends on HAVE_CLK && OF
@@ -196,19 +197,19 @@ config CPUFREQ_DT
196 197
197 If in doubt, say N. 198 If in doubt, say N.
198 199
199menu "x86 CPU frequency scaling drivers" 200if X86
200depends on X86
201source "drivers/cpufreq/Kconfig.x86" 201source "drivers/cpufreq/Kconfig.x86"
202endmenu 202endif
203 203
204menu "ARM CPU frequency scaling drivers" 204if ARM || ARM64
205depends on ARM || ARM64
206source "drivers/cpufreq/Kconfig.arm" 205source "drivers/cpufreq/Kconfig.arm"
207endmenu 206endif
208 207
209menu "AVR32 CPU frequency scaling drivers" 208if PPC32 || PPC64
210depends on AVR32 209source "drivers/cpufreq/Kconfig.powerpc"
210endif
211 211
212if AVR32
212config AVR32_AT32AP_CPUFREQ 213config AVR32_AT32AP_CPUFREQ
213 bool "CPU frequency driver for AT32AP" 214 bool "CPU frequency driver for AT32AP"
214 depends on PLATFORM_AT32AP 215 depends on PLATFORM_AT32AP
@@ -216,12 +217,9 @@ config AVR32_AT32AP_CPUFREQ
216 help 217 help
217 This enables the CPU frequency driver for AT32AP processors. 218 This enables the CPU frequency driver for AT32AP processors.
218 If in doubt, say N. 219 If in doubt, say N.
220endif
219 221
220endmenu 222if IA64
221
222menu "CPUFreq processor drivers"
223depends on IA64
224
225config IA64_ACPI_CPUFREQ 223config IA64_ACPI_CPUFREQ
226 tristate "ACPI Processor P-States driver" 224 tristate "ACPI Processor P-States driver"
227 depends on ACPI_PROCESSOR 225 depends on ACPI_PROCESSOR
@@ -232,12 +230,9 @@ config IA64_ACPI_CPUFREQ
232 For details, take a look at <file:Documentation/cpu-freq/>. 230 For details, take a look at <file:Documentation/cpu-freq/>.
233 231
234 If in doubt, say N. 232 If in doubt, say N.
233endif
235 234
236endmenu 235if MIPS
237
238menu "MIPS CPUFreq processor drivers"
239depends on MIPS
240
241config LOONGSON2_CPUFREQ 236config LOONGSON2_CPUFREQ
242 tristate "Loongson2 CPUFreq Driver" 237 tristate "Loongson2 CPUFreq Driver"
243 help 238 help
@@ -250,15 +245,18 @@ config LOONGSON2_CPUFREQ
250 245
251 If in doubt, say N. 246 If in doubt, say N.
252 247
253endmenu 248config LOONGSON1_CPUFREQ
249 tristate "Loongson1 CPUFreq Driver"
250 help
251 This option adds a CPUFreq driver for loongson1 processors which
252 support software configurable cpu frequency.
254 253
255menu "PowerPC CPU frequency scaling drivers" 254 For details, take a look at <file:Documentation/cpu-freq/>.
256depends on PPC32 || PPC64
257source "drivers/cpufreq/Kconfig.powerpc"
258endmenu
259 255
260menu "SPARC CPU frequency scaling drivers" 256 If in doubt, say N.
261depends on SPARC64 257endif
258
259if SPARC64
262config SPARC_US3_CPUFREQ 260config SPARC_US3_CPUFREQ
263 tristate "UltraSPARC-III CPU Frequency driver" 261 tristate "UltraSPARC-III CPU Frequency driver"
264 help 262 help
@@ -276,10 +274,9 @@ config SPARC_US2E_CPUFREQ
276 For details, take a look at <file:Documentation/cpu-freq>. 274 For details, take a look at <file:Documentation/cpu-freq>.
277 275
278 If in doubt, say N. 276 If in doubt, say N.
279endmenu 277endif
280 278
281menu "SH CPU Frequency scaling" 279if SUPERH
282depends on SUPERH
283config SH_CPU_FREQ 280config SH_CPU_FREQ
284 tristate "SuperH CPU Frequency driver" 281 tristate "SuperH CPU Frequency driver"
285 help 282 help
@@ -293,7 +290,7 @@ config SH_CPU_FREQ
293 For details, take a look at <file:Documentation/cpu-freq>. 290 For details, take a look at <file:Documentation/cpu-freq>.
294 291
295 If unsure, say N. 292 If unsure, say N.
296endmenu 293endif
297 294
298endif 295endif
299endmenu 296endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 83a75dc84761..0f9a2c3c0e0d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,3 +247,11 @@ config ARM_TEGRA_CPUFREQ
247 default y 247 default y
248 help 248 help
249 This adds the CPUFreq driver support for TEGRA SOCs. 249 This adds the CPUFreq driver support for TEGRA SOCs.
250
251config ARM_PXA2xx_CPUFREQ
252 tristate "Intel PXA2xx CPUfreq driver"
253 depends on PXA27x || PXA25x
254 help
255 This add the CPUFreq driver support for Intel PXA2xx SOCs.
256
257 If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..b3ca7b0b2c33 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -61,8 +61,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
61obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o 61obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
62obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o 62obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
63obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o 63obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
64obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o 64obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
65obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
66obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o 65obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
67obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o 66obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
68obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o 67obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
@@ -98,6 +97,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
98obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o 97obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
99obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o 98obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
100obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o 99obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
100obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o 101obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o 102obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o 103obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index a46c223c2506..e1a6ba66a7f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -289,6 +289,8 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
289 289
290 clk_put(clk[cluster]); 290 clk_put(clk[cluster]);
291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); 291 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
292 if (arm_bL_ops->free_opp_table)
293 arm_bL_ops->free_opp_table(cpu_dev);
292 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); 294 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
293} 295}
294 296
@@ -337,7 +339,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
337 if (ret) { 339 if (ret) {
338 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n", 340 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
339 __func__, cpu_dev->id, ret); 341 __func__, cpu_dev->id, ret);
340 goto out; 342 goto free_opp_table;
341 } 343 }
342 344
343 name[12] = cluster + '0'; 345 name[12] = cluster + '0';
@@ -354,6 +356,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
354 ret = PTR_ERR(clk[cluster]); 356 ret = PTR_ERR(clk[cluster]);
355 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); 357 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
356 358
359free_opp_table:
360 if (arm_bL_ops->free_opp_table)
361 arm_bL_ops->free_opp_table(cpu_dev);
357out: 362out:
358 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__, 363 dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
359 cluster); 364 cluster);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..a211f7db9d32 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -25,13 +25,16 @@
25 25
26struct cpufreq_arm_bL_ops { 26struct cpufreq_arm_bL_ops {
27 char name[CPUFREQ_NAME_LEN]; 27 char name[CPUFREQ_NAME_LEN];
28 int (*get_transition_latency)(struct device *cpu_dev);
29 28
30 /* 29 /*
31 * This must set opp table for cpu_dev in a similar way as done by 30 * This must set opp table for cpu_dev in a similar way as done by
32 * of_init_opp_table(). 31 * of_init_opp_table().
33 */ 32 */
34 int (*init_opp_table)(struct device *cpu_dev); 33 int (*init_opp_table)(struct device *cpu_dev);
34
35 /* Optional */
36 int (*get_transition_latency)(struct device *cpu_dev);
37 void (*free_opp_table)(struct device *cpu_dev);
35}; 38};
36 39
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 40int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 4550f6976768..ef0b3f1324d5 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -82,6 +82,7 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
82 .name = "dt-bl", 82 .name = "dt-bl",
83 .get_transition_latency = dt_get_transition_latency, 83 .get_transition_latency = dt_get_transition_latency,
84 .init_opp_table = dt_init_opp_table, 84 .init_opp_table = dt_init_opp_table,
85 .free_opp_table = of_free_opp_table,
85}; 86};
86 87
87static int generic_bL_probe(struct platform_device *pdev) 88static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f657c571b18e..9bc2720628a4 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
58 old_freq = clk_get_rate(cpu_clk) / 1000; 58 old_freq = clk_get_rate(cpu_clk) / 1000;
59 59
60 if (!IS_ERR(cpu_reg)) { 60 if (!IS_ERR(cpu_reg)) {
61 unsigned long opp_freq;
62
61 rcu_read_lock(); 63 rcu_read_lock();
62 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); 64 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
63 if (IS_ERR(opp)) { 65 if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
67 return PTR_ERR(opp); 69 return PTR_ERR(opp);
68 } 70 }
69 volt = dev_pm_opp_get_voltage(opp); 71 volt = dev_pm_opp_get_voltage(opp);
72 opp_freq = dev_pm_opp_get_freq(opp);
70 rcu_read_unlock(); 73 rcu_read_unlock();
71 tol = volt * priv->voltage_tolerance / 100; 74 tol = volt * priv->voltage_tolerance / 100;
72 volt_old = regulator_get_voltage(cpu_reg); 75 volt_old = regulator_get_voltage(cpu_reg);
76 dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
77 opp_freq / 1000, volt);
73 } 78 }
74 79
75 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", 80 dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
76 old_freq / 1000, volt_old ? volt_old / 1000 : -1, 81 old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
77 new_freq / 1000, volt ? volt / 1000 : -1); 82 new_freq / 1000, volt ? volt / 1000 : -1);
78 83
79 /* scaling up? scale voltage before frequency */ 84 /* scaling up? scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
89 ret = clk_set_rate(cpu_clk, freq_exact); 94 ret = clk_set_rate(cpu_clk, freq_exact);
90 if (ret) { 95 if (ret) {
91 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); 96 dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
92 if (!IS_ERR(cpu_reg)) 97 if (!IS_ERR(cpu_reg) && volt_old > 0)
93 regulator_set_voltage_tol(cpu_reg, volt_old, tol); 98 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
94 return ret; 99 return ret;
95 } 100 }
@@ -181,7 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
181{ 186{
182 struct cpufreq_dt_platform_data *pd; 187 struct cpufreq_dt_platform_data *pd;
183 struct cpufreq_frequency_table *freq_table; 188 struct cpufreq_frequency_table *freq_table;
184 struct thermal_cooling_device *cdev;
185 struct device_node *np; 189 struct device_node *np;
186 struct private_data *priv; 190 struct private_data *priv;
187 struct device *cpu_dev; 191 struct device *cpu_dev;
@@ -210,7 +214,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
210 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 214 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
211 if (!priv) { 215 if (!priv) {
212 ret = -ENOMEM; 216 ret = -ENOMEM;
213 goto out_put_node; 217 goto out_free_opp;
214 } 218 }
215 219
216 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 220 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -264,20 +268,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
264 goto out_free_priv; 268 goto out_free_priv;
265 } 269 }
266 270
267 /*
268 * For now, just loading the cooling device;
269 * thermal DT code takes care of matching them.
270 */
271 if (of_find_property(np, "#cooling-cells", NULL)) {
272 cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
273 if (IS_ERR(cdev))
274 dev_err(cpu_dev,
275 "running cpufreq without cooling device: %ld\n",
276 PTR_ERR(cdev));
277 else
278 priv->cdev = cdev;
279 }
280
281 priv->cpu_dev = cpu_dev; 271 priv->cpu_dev = cpu_dev;
282 priv->cpu_reg = cpu_reg; 272 priv->cpu_reg = cpu_reg;
283 policy->driver_data = priv; 273 policy->driver_data = priv;
@@ -287,7 +277,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
287 if (ret) { 277 if (ret) {
288 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, 278 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
289 ret); 279 ret);
290 goto out_cooling_unregister; 280 goto out_free_cpufreq_table;
291 } 281 }
292 282
293 policy->cpuinfo.transition_latency = transition_latency; 283 policy->cpuinfo.transition_latency = transition_latency;
@@ -300,12 +290,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300 290
301 return 0; 291 return 0;
302 292
303out_cooling_unregister: 293out_free_cpufreq_table:
304 cpufreq_cooling_unregister(priv->cdev);
305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 294 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
306out_free_priv: 295out_free_priv:
307 kfree(priv); 296 kfree(priv);
308out_put_node: 297out_free_opp:
298 of_free_opp_table(cpu_dev);
309 of_node_put(np); 299 of_node_put(np);
310out_put_reg_clk: 300out_put_reg_clk:
311 clk_put(cpu_clk); 301 clk_put(cpu_clk);
@@ -319,8 +309,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
319{ 309{
320 struct private_data *priv = policy->driver_data; 310 struct private_data *priv = policy->driver_data;
321 311
322 cpufreq_cooling_unregister(priv->cdev); 312 if (priv->cdev)
313 cpufreq_cooling_unregister(priv->cdev);
323 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 314 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
315 of_free_opp_table(priv->cpu_dev);
324 clk_put(policy->clk); 316 clk_put(policy->clk);
325 if (!IS_ERR(priv->cpu_reg)) 317 if (!IS_ERR(priv->cpu_reg))
326 regulator_put(priv->cpu_reg); 318 regulator_put(priv->cpu_reg);
@@ -329,6 +321,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
329 return 0; 321 return 0;
330} 322}
331 323
324static void cpufreq_ready(struct cpufreq_policy *policy)
325{
326 struct private_data *priv = policy->driver_data;
327 struct device_node *np = of_node_get(priv->cpu_dev->of_node);
328
329 if (WARN_ON(!np))
330 return;
331
332 /*
333 * For now, just loading the cooling device;
334 * thermal DT code takes care of matching them.
335 */
336 if (of_find_property(np, "#cooling-cells", NULL)) {
337 priv->cdev = of_cpufreq_cooling_register(np,
338 policy->related_cpus);
339 if (IS_ERR(priv->cdev)) {
340 dev_err(priv->cpu_dev,
341 "running cpufreq without cooling device: %ld\n",
342 PTR_ERR(priv->cdev));
343
344 priv->cdev = NULL;
345 }
346 }
347
348 of_node_put(np);
349}
350
332static struct cpufreq_driver dt_cpufreq_driver = { 351static struct cpufreq_driver dt_cpufreq_driver = {
333 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 352 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
334 .verify = cpufreq_generic_frequency_table_verify, 353 .verify = cpufreq_generic_frequency_table_verify,
@@ -336,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
336 .get = cpufreq_generic_get, 355 .get = cpufreq_generic_get,
337 .init = cpufreq_init, 356 .init = cpufreq_init,
338 .exit = cpufreq_exit, 357 .exit = cpufreq_exit,
358 .ready = cpufreq_ready,
339 .name = "cpufreq-dt", 359 .name = "cpufreq-dt",
340 .attr = cpufreq_generic_attr, 360 .attr = cpufreq_generic_attr,
341}; 361};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4473eba1d6b0..a09a29c312a9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
535static ssize_t store_##file_name \ 535static ssize_t store_##file_name \
536(struct cpufreq_policy *policy, const char *buf, size_t count) \ 536(struct cpufreq_policy *policy, const char *buf, size_t count) \
537{ \ 537{ \
538 int ret; \ 538 int ret, temp; \
539 struct cpufreq_policy new_policy; \ 539 struct cpufreq_policy new_policy; \
540 \ 540 \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name \
546 if (ret != 1) \ 546 if (ret != 1) \
547 return -EINVAL; \ 547 return -EINVAL; \
548 \ 548 \
549 temp = new_policy.object; \
549 ret = cpufreq_set_policy(policy, &new_policy); \ 550 ret = cpufreq_set_policy(policy, &new_policy); \
550 policy->user_policy.object = policy->object; \ 551 if (!ret) \
552 policy->user_policy.object = temp; \
551 \ 553 \
552 return ret ? ret : count; \ 554 return ret ? ret : count; \
553} 555}
@@ -898,46 +900,31 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
898 struct freq_attr **drv_attr; 900 struct freq_attr **drv_attr;
899 int ret = 0; 901 int ret = 0;
900 902
901 /* prepare interface data */
902 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
903 &dev->kobj, "cpufreq");
904 if (ret)
905 return ret;
906
907 /* set up files for this cpu device */ 903 /* set up files for this cpu device */
908 drv_attr = cpufreq_driver->attr; 904 drv_attr = cpufreq_driver->attr;
909 while ((drv_attr) && (*drv_attr)) { 905 while ((drv_attr) && (*drv_attr)) {
910 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
911 if (ret) 907 if (ret)
912 goto err_out_kobj_put; 908 return ret;
913 drv_attr++; 909 drv_attr++;
914 } 910 }
915 if (cpufreq_driver->get) { 911 if (cpufreq_driver->get) {
916 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 912 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
917 if (ret) 913 if (ret)
918 goto err_out_kobj_put; 914 return ret;
919 } 915 }
920 916
921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 917 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
922 if (ret) 918 if (ret)
923 goto err_out_kobj_put; 919 return ret;
924 920
925 if (cpufreq_driver->bios_limit) { 921 if (cpufreq_driver->bios_limit) {
926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 922 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
927 if (ret) 923 if (ret)
928 goto err_out_kobj_put; 924 return ret;
929 } 925 }
930 926
931 ret = cpufreq_add_dev_symlink(policy); 927 return cpufreq_add_dev_symlink(policy);
932 if (ret)
933 goto err_out_kobj_put;
934
935 return ret;
936
937err_out_kobj_put:
938 kobject_put(&policy->kobj);
939 wait_for_completion(&policy->kobj_unregister);
940 return ret;
941} 928}
942 929
943static void cpufreq_init_policy(struct cpufreq_policy *policy) 930static void cpufreq_init_policy(struct cpufreq_policy *policy)
@@ -1196,6 +1183,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1196 goto err_set_policy_cpu; 1183 goto err_set_policy_cpu;
1197 } 1184 }
1198 1185
1186 down_write(&policy->rwsem);
1187
1199 /* related cpus should atleast have policy->cpus */ 1188 /* related cpus should atleast have policy->cpus */
1200 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1189 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1201 1190
@@ -1208,9 +1197,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1208 if (!recover_policy) { 1197 if (!recover_policy) {
1209 policy->user_policy.min = policy->min; 1198 policy->user_policy.min = policy->min;
1210 policy->user_policy.max = policy->max; 1199 policy->user_policy.max = policy->max;
1200
1201 /* prepare interface data */
1202 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1203 &dev->kobj, "cpufreq");
1204 if (ret) {
1205 pr_err("%s: failed to init policy->kobj: %d\n",
1206 __func__, ret);
1207 goto err_init_policy_kobj;
1208 }
1211 } 1209 }
1212 1210
1213 down_write(&policy->rwsem);
1214 write_lock_irqsave(&cpufreq_driver_lock, flags); 1211 write_lock_irqsave(&cpufreq_driver_lock, flags);
1215 for_each_cpu(j, policy->cpus) 1212 for_each_cpu(j, policy->cpus)
1216 per_cpu(cpufreq_cpu_data, j) = policy; 1213 per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1288,8 +1285,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1288 up_write(&policy->rwsem); 1285 up_write(&policy->rwsem);
1289 1286
1290 kobject_uevent(&policy->kobj, KOBJ_ADD); 1287 kobject_uevent(&policy->kobj, KOBJ_ADD);
1288
1291 up_read(&cpufreq_rwsem); 1289 up_read(&cpufreq_rwsem);
1292 1290
1291 /* Callback for handling stuff after policy is ready */
1292 if (cpufreq_driver->ready)
1293 cpufreq_driver->ready(policy);
1294
1293 pr_debug("initialization complete\n"); 1295 pr_debug("initialization complete\n");
1294 1296
1295 return 0; 1297 return 0;
@@ -1301,6 +1303,11 @@ err_get_freq:
1301 per_cpu(cpufreq_cpu_data, j) = NULL; 1303 per_cpu(cpufreq_cpu_data, j) = NULL;
1302 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1303 1305
1306 if (!recover_policy) {
1307 kobject_put(&policy->kobj);
1308 wait_for_completion(&policy->kobj_unregister);
1309 }
1310err_init_policy_kobj:
1304 up_write(&policy->rwsem); 1311 up_write(&policy->rwsem);
1305 1312
1306 if (cpufreq_driver->exit) 1313 if (cpufreq_driver->exit)
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index f33f25b483ca..27a57ed9eb2c 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -371,7 +371,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
371 if (ret) { 371 if (ret) {
372 dev_err(dvfs_info->dev, 372 dev_err(dvfs_info->dev,
373 "failed to init cpufreq table: %d\n", ret); 373 "failed to init cpufreq table: %d\n", ret);
374 goto err_put_node; 374 goto err_free_opp;
375 } 375 }
376 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev); 376 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
377 exynos_sort_descend_freq_table(); 377 exynos_sort_descend_freq_table();
@@ -423,6 +423,8 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
423 423
424err_free_table: 424err_free_table:
425 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 425 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
426err_free_opp:
427 of_free_opp_table(dvfs_info->dev);
426err_put_node: 428err_put_node:
427 of_node_put(np); 429 of_node_put(np);
428 dev_err(&pdev->dev, "%s: failed initialization\n", __func__); 430 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -433,6 +435,7 @@ static int exynos_cpufreq_remove(struct platform_device *pdev)
433{ 435{
434 cpufreq_unregister_driver(&exynos_driver); 436 cpufreq_unregister_driver(&exynos_driver);
435 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 437 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
438 of_free_opp_table(dvfs_info->dev);
436 return 0; 439 return 0;
437} 440}
438 441
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c2d30765bf3d..5da1d131f770 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -31,6 +31,7 @@ static struct clk *step_clk;
31static struct clk *pll2_pfd2_396m_clk; 31static struct clk *pll2_pfd2_396m_clk;
32 32
33static struct device *cpu_dev; 33static struct device *cpu_dev;
34static bool free_opp;
34static struct cpufreq_frequency_table *freq_table; 35static struct cpufreq_frequency_table *freq_table;
35static unsigned int transition_latency; 36static unsigned int transition_latency;
36 37
@@ -207,11 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
207 goto put_reg; 208 goto put_reg;
208 } 209 }
209 210
211 /* Because we have added the OPPs here, we must free them */
212 free_opp = true;
213
210 num = dev_pm_opp_get_opp_count(cpu_dev); 214 num = dev_pm_opp_get_opp_count(cpu_dev);
211 if (num < 0) { 215 if (num < 0) {
212 ret = num; 216 ret = num;
213 dev_err(cpu_dev, "no OPP table is found: %d\n", ret); 217 dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
214 goto put_reg; 218 goto out_free_opp;
215 } 219 }
216 } 220 }
217 221
@@ -306,6 +310,9 @@ soc_opp_out:
306 310
307free_freq_table: 311free_freq_table:
308 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 312 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
313out_free_opp:
314 if (free_opp)
315 of_free_opp_table(cpu_dev);
309put_reg: 316put_reg:
310 if (!IS_ERR(arm_reg)) 317 if (!IS_ERR(arm_reg))
311 regulator_put(arm_reg); 318 regulator_put(arm_reg);
@@ -332,6 +339,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
332{ 339{
333 cpufreq_unregister_driver(&imx6q_cpufreq_driver); 340 cpufreq_unregister_driver(&imx6q_cpufreq_driver);
334 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 341 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
342 if (free_opp)
343 of_free_opp_table(cpu_dev);
335 regulator_put(arm_reg); 344 regulator_put(arm_reg);
336 if (!IS_ERR(pu_reg)) 345 if (!IS_ERR(pu_reg))
337 regulator_put(pu_reg); 346 regulator_put(pu_reg);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..1405b393c93d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
137 137
138static struct pstate_adjust_policy pid_params; 138static struct pstate_adjust_policy pid_params;
139static struct pstate_funcs pstate_funcs; 139static struct pstate_funcs pstate_funcs;
140static int hwp_active;
140 141
141struct perf_limits { 142struct perf_limits {
142 int no_turbo; 143 int no_turbo;
@@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 245 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245} 246}
246 247
248#define PCT_TO_HWP(x) (x * 255 / 100)
249static void intel_pstate_hwp_set(void)
250{
251 int min, max, cpu;
252 u64 value, freq;
253
254 get_online_cpus();
255
256 for_each_online_cpu(cpu) {
257 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
258 min = PCT_TO_HWP(limits.min_perf_pct);
259 value &= ~HWP_MIN_PERF(~0L);
260 value |= HWP_MIN_PERF(min);
261
262 max = PCT_TO_HWP(limits.max_perf_pct);
263 if (limits.no_turbo) {
264 rdmsrl( MSR_HWP_CAPABILITIES, freq);
265 max = HWP_GUARANTEED_PERF(freq);
266 }
267
268 value &= ~HWP_MAX_PERF(~0L);
269 value |= HWP_MAX_PERF(max);
270 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
271 }
272
273 put_online_cpus();
274}
275
247/************************** debugfs begin ************************/ 276/************************** debugfs begin ************************/
248static int pid_param_set(void *data, u64 val) 277static int pid_param_set(void *data, u64 val)
249{ 278{
@@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
279 struct dentry *debugfs_parent; 308 struct dentry *debugfs_parent;
280 int i = 0; 309 int i = 0;
281 310
311 if (hwp_active)
312 return;
282 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 313 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
283 if (IS_ERR_OR_NULL(debugfs_parent)) 314 if (IS_ERR_OR_NULL(debugfs_parent))
284 return; 315 return;
@@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 360 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
330 return -EPERM; 361 return -EPERM;
331 } 362 }
363
332 limits.no_turbo = clamp_t(int, input, 0, 1); 364 limits.no_turbo = clamp_t(int, input, 0, 1);
333 365
366 if (hwp_active)
367 intel_pstate_hwp_set();
368
334 return count; 369 return count;
335} 370}
336 371
@@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 383 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 384 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
350 385
386 if (hwp_active)
387 intel_pstate_hwp_set();
351 return count; 388 return count;
352} 389}
353 390
@@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 400 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 401 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
365 402
403 if (hwp_active)
404 intel_pstate_hwp_set();
366 return count; 405 return count;
367} 406}
368 407
@@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 434 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
396 BUG_ON(rc); 435 BUG_ON(rc);
397} 436}
398
399/************************** sysfs end ************************/ 437/************************** sysfs end ************************/
438
439static void intel_pstate_hwp_enable(void)
440{
441 hwp_active++;
442 pr_info("intel_pstate HWP enabled\n");
443
444 wrmsrl( MSR_PM_ENABLE, 0x1);
445}
446
400static int byt_get_min_pstate(void) 447static int byt_get_min_pstate(void)
401{ 448{
402 u64 value; 449 u64 value;
@@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
648 cpu->prev_mperf = mperf; 695 cpu->prev_mperf = mperf;
649} 696}
650 697
698static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
699{
700 int delay;
701
702 delay = msecs_to_jiffies(50);
703 mod_timer_pinned(&cpu->timer, jiffies + delay);
704}
705
651static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 706static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
652{ 707{
653 int delay; 708 int delay;
@@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 749 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
695} 750}
696 751
752static void intel_hwp_timer_func(unsigned long __data)
753{
754 struct cpudata *cpu = (struct cpudata *) __data;
755
756 intel_pstate_sample(cpu);
757 intel_hwp_set_sample_time(cpu);
758}
759
697static void intel_pstate_timer_func(unsigned long __data) 760static void intel_pstate_timer_func(unsigned long __data)
698{ 761{
699 struct cpudata *cpu = (struct cpudata *) __data; 762 struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
730 ICPU(0x3f, core_params), 793 ICPU(0x3f, core_params),
731 ICPU(0x45, core_params), 794 ICPU(0x45, core_params),
732 ICPU(0x46, core_params), 795 ICPU(0x46, core_params),
796 ICPU(0x47, core_params),
733 ICPU(0x4c, byt_params), 797 ICPU(0x4c, byt_params),
734 ICPU(0x4f, core_params), 798 ICPU(0x4f, core_params),
735 ICPU(0x56, core_params), 799 ICPU(0x56, core_params),
@@ -737,6 +801,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
737}; 801};
738MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 802MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
739 803
804static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
805 ICPU(0x56, core_params),
806 {}
807};
808
740static int intel_pstate_init_cpu(unsigned int cpunum) 809static int intel_pstate_init_cpu(unsigned int cpunum)
741{ 810{
742 struct cpudata *cpu; 811 struct cpudata *cpu;
@@ -753,9 +822,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
753 intel_pstate_get_cpu_pstates(cpu); 822 intel_pstate_get_cpu_pstates(cpu);
754 823
755 init_timer_deferrable(&cpu->timer); 824 init_timer_deferrable(&cpu->timer);
756 cpu->timer.function = intel_pstate_timer_func;
757 cpu->timer.data = (unsigned long)cpu; 825 cpu->timer.data = (unsigned long)cpu;
758 cpu->timer.expires = jiffies + HZ/100; 826 cpu->timer.expires = jiffies + HZ/100;
827
828 if (!hwp_active)
829 cpu->timer.function = intel_pstate_timer_func;
830 else
831 cpu->timer.function = intel_hwp_timer_func;
832
759 intel_pstate_busy_pid_reset(cpu); 833 intel_pstate_busy_pid_reset(cpu);
760 intel_pstate_sample(cpu); 834 intel_pstate_sample(cpu);
761 835
@@ -792,6 +866,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
792 limits.no_turbo = 0; 866 limits.no_turbo = 0;
793 return 0; 867 return 0;
794 } 868 }
869
795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 870 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
796 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 871 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
797 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 872 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +876,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 876 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 877 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
803 878
879 if (hwp_active)
880 intel_pstate_hwp_set();
881
804 return 0; 882 return 0;
805} 883}
806 884
@@ -823,6 +901,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 901 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
824 902
825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 903 del_timer_sync(&all_cpu_data[cpu_num]->timer);
904 if (hwp_active)
905 return;
906
826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 907 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
827} 908}
828 909
@@ -866,6 +947,7 @@ static struct cpufreq_driver intel_pstate_driver = {
866}; 947};
867 948
868static int __initdata no_load; 949static int __initdata no_load;
950static int __initdata no_hwp;
869 951
870static int intel_pstate_msrs_not_valid(void) 952static int intel_pstate_msrs_not_valid(void)
871{ 953{
@@ -943,15 +1025,46 @@ static bool intel_pstate_no_acpi_pss(void)
943 return true; 1025 return true;
944} 1026}
945 1027
1028static bool intel_pstate_has_acpi_ppc(void)
1029{
1030 int i;
1031
1032 for_each_possible_cpu(i) {
1033 struct acpi_processor *pr = per_cpu(processors, i);
1034
1035 if (!pr)
1036 continue;
1037 if (acpi_has_method(pr->handle, "_PPC"))
1038 return true;
1039 }
1040 return false;
1041}
1042
1043enum {
1044 PSS,
1045 PPC,
1046};
1047
946struct hw_vendor_info { 1048struct hw_vendor_info {
947 u16 valid; 1049 u16 valid;
948 char oem_id[ACPI_OEM_ID_SIZE]; 1050 char oem_id[ACPI_OEM_ID_SIZE];
949 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1051 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1052 int oem_pwr_table;
950}; 1053};
951 1054
952/* Hardware vendor-specific info that has its own power management modes */ 1055/* Hardware vendor-specific info that has its own power management modes */
953static struct hw_vendor_info vendor_info[] = { 1056static struct hw_vendor_info vendor_info[] = {
954 {1, "HP ", "ProLiant"}, 1057 {1, "HP ", "ProLiant", PSS},
1058 {1, "ORACLE", "X4-2 ", PPC},
1059 {1, "ORACLE", "X4-2L ", PPC},
1060 {1, "ORACLE", "X4-2B ", PPC},
1061 {1, "ORACLE", "X3-2 ", PPC},
1062 {1, "ORACLE", "X3-2L ", PPC},
1063 {1, "ORACLE", "X3-2B ", PPC},
1064 {1, "ORACLE", "X4470M2 ", PPC},
1065 {1, "ORACLE", "X4270M3 ", PPC},
1066 {1, "ORACLE", "X4270M2 ", PPC},
1067 {1, "ORACLE", "X4170M2 ", PPC},
955 {0, "", ""}, 1068 {0, "", ""},
956}; 1069};
957 1070
@@ -959,6 +1072,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
959{ 1072{
960 struct acpi_table_header hdr; 1073 struct acpi_table_header hdr;
961 struct hw_vendor_info *v_info; 1074 struct hw_vendor_info *v_info;
1075 const struct x86_cpu_id *id;
1076 u64 misc_pwr;
1077
1078 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1079 if (id) {
1080 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1081 if ( misc_pwr & (1 << 8))
1082 return true;
1083 }
962 1084
963 if (acpi_disabled || 1085 if (acpi_disabled ||
964 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1086 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -966,15 +1088,21 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
966 1088
967 for (v_info = vendor_info; v_info->valid; v_info++) { 1089 for (v_info = vendor_info; v_info->valid; v_info++) {
968 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1090 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
969 !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 1091 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
970 intel_pstate_no_acpi_pss()) 1092 ACPI_OEM_TABLE_ID_SIZE))
971 return true; 1093 switch (v_info->oem_pwr_table) {
1094 case PSS:
1095 return intel_pstate_no_acpi_pss();
1096 case PPC:
1097 return intel_pstate_has_acpi_ppc();
1098 }
972 } 1099 }
973 1100
974 return false; 1101 return false;
975} 1102}
976#else /* CONFIG_ACPI not enabled */ 1103#else /* CONFIG_ACPI not enabled */
977static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1104static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1105static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
978#endif /* CONFIG_ACPI */ 1106#endif /* CONFIG_ACPI */
979 1107
980static int __init intel_pstate_init(void) 1108static int __init intel_pstate_init(void)
@@ -982,6 +1110,7 @@ static int __init intel_pstate_init(void)
982 int cpu, rc = 0; 1110 int cpu, rc = 0;
983 const struct x86_cpu_id *id; 1111 const struct x86_cpu_id *id;
984 struct cpu_defaults *cpu_info; 1112 struct cpu_defaults *cpu_info;
1113 struct cpuinfo_x86 *c = &boot_cpu_data;
985 1114
986 if (no_load) 1115 if (no_load)
987 return -ENODEV; 1116 return -ENODEV;
@@ -1011,6 +1140,9 @@ static int __init intel_pstate_init(void)
1011 if (!all_cpu_data) 1140 if (!all_cpu_data)
1012 return -ENOMEM; 1141 return -ENOMEM;
1013 1142
1143 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1144 intel_pstate_hwp_enable();
1145
1014 rc = cpufreq_register_driver(&intel_pstate_driver); 1146 rc = cpufreq_register_driver(&intel_pstate_driver);
1015 if (rc) 1147 if (rc)
1016 goto out; 1148 goto out;
@@ -1041,6 +1173,8 @@ static int __init intel_pstate_setup(char *str)
1041 1173
1042 if (!strcmp(str, "disable")) 1174 if (!strcmp(str, "disable"))
1043 no_load = 1; 1175 no_load = 1;
1176 if (!strcmp(str, "no_hwp"))
1177 no_hwp = 1;
1044 return 0; 1178 return 0;
1045} 1179}
1046early_param("intel_pstate", intel_pstate_setup); 1180early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000000..25fbd6a1374f
--- /dev/null
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,223 @@
1/*
2 * CPU Frequency Scaling for Loongson 1 SoC
3 *
4 * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/cpu.h>
14#include <linux/cpufreq.h>
15#include <linux/delay.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19
20#include <asm/mach-loongson1/cpufreq.h>
21#include <asm/mach-loongson1/loongson1.h>
22
23static struct {
24 struct device *dev;
25 struct clk *clk; /* CPU clk */
26 struct clk *mux_clk; /* MUX of CPU clk */
27 struct clk *pll_clk; /* PLL clk */
28 struct clk *osc_clk; /* OSC clk */
29 unsigned int max_freq;
30 unsigned int min_freq;
31} ls1x_cpufreq;
32
33static int ls1x_cpufreq_notifier(struct notifier_block *nb,
34 unsigned long val, void *data)
35{
36 if (val == CPUFREQ_POSTCHANGE)
37 current_cpu_data.udelay_val = loops_per_jiffy;
38
39 return NOTIFY_OK;
40}
41
42static struct notifier_block ls1x_cpufreq_notifier_block = {
43 .notifier_call = ls1x_cpufreq_notifier
44};
45
46static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
47 unsigned int index)
48{
49 unsigned int old_freq, new_freq;
50
51 old_freq = policy->cur;
52 new_freq = policy->freq_table[index].frequency;
53
54 /*
55 * The procedure of reconfiguring CPU clk is as below.
56 *
57 * - Reparent CPU clk to OSC clk
58 * - Reset CPU clock (very important)
59 * - Reconfigure CPU DIV
60 * - Reparent CPU clk back to CPU DIV clk
61 */
62
63 dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
64 clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
65 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
66 LS1X_CLK_PLL_DIV);
67 __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
68 LS1X_CLK_PLL_DIV);
69 clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
70 clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
71
72 return 0;
73}
74
75static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
76{
77 struct cpufreq_frequency_table *freq_tbl;
78 unsigned int pll_freq, freq;
79 int steps, i, ret;
80
81 pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
82
83 steps = 1 << DIV_CPU_WIDTH;
84 freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
85 if (!freq_tbl) {
86 dev_err(ls1x_cpufreq.dev,
87 "failed to alloc cpufreq_frequency_table\n");
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 for (i = 0; i < (steps - 1); i++) {
93 freq = pll_freq / (i + 1);
94 if ((freq < ls1x_cpufreq.min_freq) ||
95 (freq > ls1x_cpufreq.max_freq))
96 freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
97 else
98 freq_tbl[i].frequency = freq;
99 dev_dbg(ls1x_cpufreq.dev,
100 "cpufreq table: index %d: frequency %d\n", i,
101 freq_tbl[i].frequency);
102 }
103 freq_tbl[i].frequency = CPUFREQ_TABLE_END;
104
105 policy->clk = ls1x_cpufreq.clk;
106 ret = cpufreq_generic_init(policy, freq_tbl, 0);
107 if (ret)
108 kfree(freq_tbl);
109out:
110 return ret;
111}
112
113static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
114{
115 kfree(policy->freq_table);
116 return 0;
117}
118
119static struct cpufreq_driver ls1x_cpufreq_driver = {
120 .name = "cpufreq-ls1x",
121 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
122 .verify = cpufreq_generic_frequency_table_verify,
123 .target_index = ls1x_cpufreq_target,
124 .get = cpufreq_generic_get,
125 .init = ls1x_cpufreq_init,
126 .exit = ls1x_cpufreq_exit,
127 .attr = cpufreq_generic_attr,
128};
129
130static int ls1x_cpufreq_remove(struct platform_device *pdev)
131{
132 cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
133 CPUFREQ_TRANSITION_NOTIFIER);
134 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
135
136 return 0;
137}
138
139static int ls1x_cpufreq_probe(struct platform_device *pdev)
140{
141 struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
142 struct clk *clk;
143 int ret;
144
145 if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
146 return -EINVAL;
147
148 ls1x_cpufreq.dev = &pdev->dev;
149
150 clk = devm_clk_get(&pdev->dev, pdata->clk_name);
151 if (IS_ERR(clk)) {
152 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
153 pdata->clk_name);
154 ret = PTR_ERR(clk);
155 goto out;
156 }
157 ls1x_cpufreq.clk = clk;
158
159 clk = clk_get_parent(clk);
160 if (IS_ERR(clk)) {
161 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
162 __clk_get_name(ls1x_cpufreq.clk));
163 ret = PTR_ERR(clk);
164 goto out;
165 }
166 ls1x_cpufreq.mux_clk = clk;
167
168 clk = clk_get_parent(clk);
169 if (IS_ERR(clk)) {
170 dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
171 __clk_get_name(ls1x_cpufreq.mux_clk));
172 ret = PTR_ERR(clk);
173 goto out;
174 }
175 ls1x_cpufreq.pll_clk = clk;
176
177 clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
178 if (IS_ERR(clk)) {
179 dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
180 pdata->osc_clk_name);
181 ret = PTR_ERR(clk);
182 goto out;
183 }
184 ls1x_cpufreq.osc_clk = clk;
185
186 ls1x_cpufreq.max_freq = pdata->max_freq;
187 ls1x_cpufreq.min_freq = pdata->min_freq;
188
189 ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
190 if (ret) {
191 dev_err(ls1x_cpufreq.dev,
192 "failed to register cpufreq driver: %d\n", ret);
193 goto out;
194 }
195
196 ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
197 CPUFREQ_TRANSITION_NOTIFIER);
198
199 if (!ret)
200 goto out;
201
202 dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
203 ret);
204
205 cpufreq_unregister_driver(&ls1x_cpufreq_driver);
206out:
207 return ret;
208}
209
210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = {
212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 },
215 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove,
217};
218
219module_platform_driver(ls1x_cpufreq_platdrv);
220
221MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
222MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
223MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 4d2c8e861089..2a0d58959acf 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
603 free_percpu(pcc_cpu_info); 603 free_percpu(pcc_cpu_info);
604} 604}
605 605
606static const struct acpi_device_id processor_device_ids[] = {
607 {ACPI_PROCESSOR_OBJECT_HID, },
608 {ACPI_PROCESSOR_DEVICE_HID, },
609 {},
610};
611MODULE_DEVICE_TABLE(acpi, processor_device_ids);
612
606MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); 613MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
607MODULE_VERSION(PCC_VERSION); 614MODULE_VERSION(PCC_VERSION);
608MODULE_DESCRIPTION("Processor Clocking Control interface driver"); 615MODULE_DESCRIPTION("Processor Clocking Control interface driver");
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c
index 50997ea942fc..80704b931ba4 100644
--- a/drivers/cpuidle/cpuidle-arm64.c
+++ b/drivers/cpuidle/cpuidle-arm64.c
@@ -73,7 +73,6 @@ static struct cpuidle_driver arm64_idle_driver = {
73 .exit_latency = 1, 73 .exit_latency = 1,
74 .target_residency = 1, 74 .target_residency = 1,
75 .power_usage = UINT_MAX, 75 .power_usage = UINT_MAX,
76 .flags = CPUIDLE_FLAG_TIME_VALID,
77 .name = "WFI", 76 .name = "WFI",
78 .desc = "ARM64 WFI", 77 .desc = "ARM64 WFI",
79 } 78 }
@@ -104,11 +103,8 @@ static int __init arm64_idle_init(void)
104 * reason to initialize the idle driver if only wfi is supported. 103 * reason to initialize the idle driver if only wfi is supported.
105 */ 104 */
106 ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1); 105 ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
107 if (ret <= 0) { 106 if (ret <= 0)
108 if (ret)
109 pr_err("failed to initialize idle states\n");
110 return ret ? : -ENODEV; 107 return ret ? : -ENODEV;
111 }
112 108
113 /* 109 /*
114 * Call arch CPU operations in order to initialize 110 * Call arch CPU operations in order to initialize
@@ -122,12 +118,6 @@ static int __init arm64_idle_init(void)
122 } 118 }
123 } 119 }
124 120
125 ret = cpuidle_register(drv, NULL); 121 return cpuidle_register(drv, NULL);
126 if (ret) {
127 pr_err("failed to register cpuidle driver\n");
128 return ret;
129 }
130
131 return 0;
132} 122}
133device_initcall(arm64_idle_init); 123device_initcall(arm64_idle_init);
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
index a0774370c6bc..1964ff07117c 100644
--- a/drivers/cpuidle/cpuidle-at91.c
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -43,7 +43,6 @@ static struct cpuidle_driver at91_idle_driver = {
43 .enter = at91_enter_idle, 43 .enter = at91_enter_idle,
44 .exit_latency = 10, 44 .exit_latency = 10,
45 .target_residency = 10000, 45 .target_residency = 10000,
46 .flags = CPUIDLE_FLAG_TIME_VALID,
47 .name = "RAM_SR", 46 .name = "RAM_SR",
48 .desc = "WFI and DDR Self Refresh", 47 .desc = "WFI and DDR Self Refresh",
49 }, 48 },
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index fbc00a1d3c48..e3e225fe6b45 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -67,8 +67,7 @@ static struct cpuidle_driver bl_idle_little_driver = {
67 .enter = bl_enter_powerdown, 67 .enter = bl_enter_powerdown,
68 .exit_latency = 700, 68 .exit_latency = 700,
69 .target_residency = 2500, 69 .target_residency = 2500,
70 .flags = CPUIDLE_FLAG_TIME_VALID | 70 .flags = CPUIDLE_FLAG_TIMER_STOP,
71 CPUIDLE_FLAG_TIMER_STOP,
72 .name = "C1", 71 .name = "C1",
73 .desc = "ARM little-cluster power down", 72 .desc = "ARM little-cluster power down",
74 }, 73 },
@@ -89,8 +88,7 @@ static struct cpuidle_driver bl_idle_big_driver = {
89 .enter = bl_enter_powerdown, 88 .enter = bl_enter_powerdown,
90 .exit_latency = 500, 89 .exit_latency = 500,
91 .target_residency = 2000, 90 .target_residency = 2000,
92 .flags = CPUIDLE_FLAG_TIME_VALID | 91 .flags = CPUIDLE_FLAG_TIMER_STOP,
93 CPUIDLE_FLAG_TIMER_STOP,
94 .name = "C1", 92 .name = "C1",
95 .desc = "ARM big-cluster power down", 93 .desc = "ARM big-cluster power down",
96 }, 94 },
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 6e51114057d0..6541b0bfdfaa 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -55,7 +55,6 @@ static struct cpuidle_driver calxeda_idle_driver = {
55 { 55 {
56 .name = "PG", 56 .name = "PG",
57 .desc = "Power Gate", 57 .desc = "Power Gate",
58 .flags = CPUIDLE_FLAG_TIME_VALID,
59 .exit_latency = 30, 58 .exit_latency = 30,
60 .power_usage = 50, 59 .power_usage = 50,
61 .target_residency = 200, 60 .target_residency = 200,
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index fc7b62720deb..1adb6980b707 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -79,7 +79,6 @@ static struct cpuidle_driver cps_driver = {
79 .enter = cps_nc_enter, 79 .enter = cps_nc_enter,
80 .exit_latency = 200, 80 .exit_latency = 200,
81 .target_residency = 450, 81 .target_residency = 450,
82 .flags = CPUIDLE_FLAG_TIME_VALID,
83 .name = "nc-wait", 82 .name = "nc-wait",
84 .desc = "non-coherent MIPS wait", 83 .desc = "non-coherent MIPS wait",
85 }, 84 },
@@ -87,8 +86,7 @@ static struct cpuidle_driver cps_driver = {
87 .enter = cps_nc_enter, 86 .enter = cps_nc_enter,
88 .exit_latency = 300, 87 .exit_latency = 300,
89 .target_residency = 700, 88 .target_residency = 700,
90 .flags = CPUIDLE_FLAG_TIME_VALID | 89 .flags = CPUIDLE_FLAG_TIMER_STOP,
91 CPUIDLE_FLAG_TIMER_STOP,
92 .name = "clock-gated", 90 .name = "clock-gated",
93 .desc = "core clock gated", 91 .desc = "core clock gated",
94 }, 92 },
@@ -96,8 +94,7 @@ static struct cpuidle_driver cps_driver = {
96 .enter = cps_nc_enter, 94 .enter = cps_nc_enter,
97 .exit_latency = 600, 95 .exit_latency = 600,
98 .target_residency = 1000, 96 .target_residency = 1000,
99 .flags = CPUIDLE_FLAG_TIME_VALID | 97 .flags = CPUIDLE_FLAG_TIMER_STOP,
100 CPUIDLE_FLAG_TIMER_STOP,
101 .name = "power-gated", 98 .name = "power-gated",
102 .desc = "core power gated", 99 .desc = "core power gated",
103 }, 100 },
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index ba9b34b579f3..64d12a855ec6 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -47,7 +47,6 @@ static struct cpuidle_driver exynos_idle_driver = {
47 .enter = exynos_enter_lowpower, 47 .enter = exynos_enter_lowpower,
48 .exit_latency = 300, 48 .exit_latency = 300,
49 .target_residency = 100000, 49 .target_residency = 100000,
50 .flags = CPUIDLE_FLAG_TIME_VALID,
51 .name = "C1", 50 .name = "C1",
52 .desc = "ARM power down", 51 .desc = "ARM power down",
53 }, 52 },
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 41ba843251b8..d88f8d7c2143 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -47,7 +47,6 @@ static struct cpuidle_driver kirkwood_idle_driver = {
47 .enter = kirkwood_enter_idle, 47 .enter = kirkwood_enter_idle,
48 .exit_latency = 10, 48 .exit_latency = 10,
49 .target_residency = 100000, 49 .target_residency = 100000,
50 .flags = CPUIDLE_FLAG_TIME_VALID,
51 .name = "DDR SR", 50 .name = "DDR SR",
52 .desc = "WFI and DDR Self Refresh", 51 .desc = "WFI and DDR Self Refresh",
53 }, 52 },
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 45371bb16214..dd4c176df2a3 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -53,7 +53,6 @@ static struct cpuidle_driver armadaxp_idle_driver = {
53 .exit_latency = 10, 53 .exit_latency = 10,
54 .power_usage = 50, 54 .power_usage = 50,
55 .target_residency = 100, 55 .target_residency = 100,
56 .flags = CPUIDLE_FLAG_TIME_VALID,
57 .name = "MV CPU IDLE", 56 .name = "MV CPU IDLE",
58 .desc = "CPU power down", 57 .desc = "CPU power down",
59 }, 58 },
@@ -62,8 +61,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
62 .exit_latency = 100, 61 .exit_latency = 100,
63 .power_usage = 5, 62 .power_usage = 5,
64 .target_residency = 1000, 63 .target_residency = 1000,
65 .flags = CPUIDLE_FLAG_TIME_VALID | 64 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
66 MVEBU_V7_FLAG_DEEP_IDLE,
67 .name = "MV CPU DEEP IDLE", 65 .name = "MV CPU DEEP IDLE",
68 .desc = "CPU and L2 Fabric power down", 66 .desc = "CPU and L2 Fabric power down",
69 }, 67 },
@@ -78,8 +76,7 @@ static struct cpuidle_driver armada370_idle_driver = {
78 .exit_latency = 100, 76 .exit_latency = 100,
79 .power_usage = 5, 77 .power_usage = 5,
80 .target_residency = 1000, 78 .target_residency = 1000,
81 .flags = (CPUIDLE_FLAG_TIME_VALID | 79 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
82 MVEBU_V7_FLAG_DEEP_IDLE),
83 .name = "Deep Idle", 80 .name = "Deep Idle",
84 .desc = "CPU and L2 Fabric power down", 81 .desc = "CPU and L2 Fabric power down",
85 }, 82 },
@@ -94,7 +91,6 @@ static struct cpuidle_driver armada38x_idle_driver = {
94 .exit_latency = 10, 91 .exit_latency = 10,
95 .power_usage = 5, 92 .power_usage = 5,
96 .target_residency = 100, 93 .target_residency = 100,
97 .flags = CPUIDLE_FLAG_TIME_VALID,
98 .name = "Idle", 94 .name = "Idle",
99 .desc = "CPU and SCU power down", 95 .desc = "CPU and SCU power down",
100 }, 96 },
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7d3a3497dd4c..e9248bb9173a 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -93,7 +93,6 @@ static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
93 { /* Snooze */ 93 { /* Snooze */
94 .name = "snooze", 94 .name = "snooze",
95 .desc = "snooze", 95 .desc = "snooze",
96 .flags = CPUIDLE_FLAG_TIME_VALID,
97 .exit_latency = 0, 96 .exit_latency = 0,
98 .target_residency = 0, 97 .target_residency = 0,
99 .enter = &snooze_loop }, 98 .enter = &snooze_loop },
@@ -202,7 +201,7 @@ static int powernv_add_idle_states(void)
202 /* Add NAP state */ 201 /* Add NAP state */
203 strcpy(powernv_states[nr_idle_states].name, "Nap"); 202 strcpy(powernv_states[nr_idle_states].name, "Nap");
204 strcpy(powernv_states[nr_idle_states].desc, "Nap"); 203 strcpy(powernv_states[nr_idle_states].desc, "Nap");
205 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID; 204 powernv_states[nr_idle_states].flags = 0;
206 powernv_states[nr_idle_states].exit_latency = 205 powernv_states[nr_idle_states].exit_latency =
207 ((unsigned int)latency_ns) / 1000; 206 ((unsigned int)latency_ns) / 1000;
208 powernv_states[nr_idle_states].target_residency = 207 powernv_states[nr_idle_states].target_residency =
@@ -215,8 +214,7 @@ static int powernv_add_idle_states(void)
215 /* Add FASTSLEEP state */ 214 /* Add FASTSLEEP state */
216 strcpy(powernv_states[nr_idle_states].name, "FastSleep"); 215 strcpy(powernv_states[nr_idle_states].name, "FastSleep");
217 strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); 216 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
218 powernv_states[nr_idle_states].flags = 217 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
219 CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
220 powernv_states[nr_idle_states].exit_latency = 218 powernv_states[nr_idle_states].exit_latency =
221 ((unsigned int)latency_ns) / 1000; 219 ((unsigned int)latency_ns) / 1000;
222 powernv_states[nr_idle_states].target_residency = 220 powernv_states[nr_idle_states].target_residency =
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 6f7b01956885..bb9e2b6f3ecc 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -142,14 +142,12 @@ static struct cpuidle_state dedicated_states[] = {
142 { /* Snooze */ 142 { /* Snooze */
143 .name = "snooze", 143 .name = "snooze",
144 .desc = "snooze", 144 .desc = "snooze",
145 .flags = CPUIDLE_FLAG_TIME_VALID,
146 .exit_latency = 0, 145 .exit_latency = 0,
147 .target_residency = 0, 146 .target_residency = 0,
148 .enter = &snooze_loop }, 147 .enter = &snooze_loop },
149 { /* CEDE */ 148 { /* CEDE */
150 .name = "CEDE", 149 .name = "CEDE",
151 .desc = "CEDE", 150 .desc = "CEDE",
152 .flags = CPUIDLE_FLAG_TIME_VALID,
153 .exit_latency = 10, 151 .exit_latency = 10,
154 .target_residency = 100, 152 .target_residency = 100,
155 .enter = &dedicated_cede_loop }, 153 .enter = &dedicated_cede_loop },
@@ -162,7 +160,6 @@ static struct cpuidle_state shared_states[] = {
162 { /* Shared Cede */ 160 { /* Shared Cede */
163 .name = "Shared Cede", 161 .name = "Shared Cede",
164 .desc = "Shared Cede", 162 .desc = "Shared Cede",
165 .flags = CPUIDLE_FLAG_TIME_VALID,
166 .exit_latency = 0, 163 .exit_latency = 0,
167 .target_residency = 0, 164 .target_residency = 0,
168 .enter = &shared_cede_loop }, 165 .enter = &shared_cede_loop },
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index 5e35804b1a95..292e65a90308 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -101,8 +101,7 @@ static struct cpuidle_driver ux500_idle_driver = {
101 .enter = ux500_enter_idle, 101 .enter = ux500_enter_idle,
102 .exit_latency = 70, 102 .exit_latency = 70,
103 .target_residency = 260, 103 .target_residency = 260,
104 .flags = CPUIDLE_FLAG_TIME_VALID | 104 .flags = CPUIDLE_FLAG_TIMER_STOP,
105 CPUIDLE_FLAG_TIMER_STOP,
106 .name = "ApIdle", 105 .name = "ApIdle",
107 .desc = "ARM Retention", 106 .desc = "ARM Retention",
108 }, 107 },
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index c61b8b2a7c77..022dec86de8e 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -52,7 +52,6 @@ static struct cpuidle_driver zynq_idle_driver = {
52 .enter = zynq_enter_idle, 52 .enter = zynq_enter_idle,
53 .exit_latency = 10, 53 .exit_latency = 10,
54 .target_residency = 10000, 54 .target_residency = 10000,
55 .flags = CPUIDLE_FLAG_TIME_VALID,
56 .name = "RAM_SR", 55 .name = "RAM_SR",
57 .desc = "WFI and RAM Self Refresh", 56 .desc = "WFI and RAM Self Refresh",
58 }, 57 },
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index e431d11abf8d..2697e87d5b34 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -201,7 +201,6 @@ static void poll_idle_init(struct cpuidle_driver *drv)
201 state->exit_latency = 0; 201 state->exit_latency = 0;
202 state->target_residency = 0; 202 state->target_residency = 0;
203 state->power_usage = -1; 203 state->power_usage = -1;
204 state->flags = CPUIDLE_FLAG_TIME_VALID;
205 state->enter = poll_idle; 204 state->enter = poll_idle;
206 state->disabled = false; 205 state->disabled = false;
207} 206}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 52f4d11bbf3f..a5c111b67f37 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -27,6 +27,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
27{ 27{
28 int err; 28 int err;
29 const struct of_device_id *match_id; 29 const struct of_device_id *match_id;
30 const char *desc;
30 31
31 match_id = of_match_node(matches, state_node); 32 match_id = of_match_node(matches, state_node);
32 if (!match_id) 33 if (!match_id)
@@ -73,7 +74,11 @@ static int init_state_node(struct cpuidle_state *idle_state,
73 return -EINVAL; 74 return -EINVAL;
74 } 75 }
75 76
76 idle_state->flags = CPUIDLE_FLAG_TIME_VALID; 77 err = of_property_read_string(state_node, "idle-state-name", &desc);
78 if (err)
79 desc = state_node->name;
80
81 idle_state->flags = 0;
77 if (of_property_read_bool(state_node, "local-timer-stop")) 82 if (of_property_read_bool(state_node, "local-timer-stop"))
78 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP; 83 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
79 /* 84 /*
@@ -82,7 +87,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
82 * and desc become string pointers 87 * and desc become string pointers
83 */ 88 */
84 strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1); 89 strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
85 strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1); 90 strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
86 return 0; 91 return 0;
87} 92}
88 93
@@ -169,6 +174,9 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
169 if (!state_node) 174 if (!state_node)
170 break; 175 break;
171 176
177 if (!of_device_is_available(state_node))
178 continue;
179
172 if (!idle_state_valid(state_node, i, cpumask)) { 180 if (!idle_state_valid(state_node, i, cpumask)) {
173 pr_warn("%s idle state not valid, bailing out\n", 181 pr_warn("%s idle state not valid, bailing out\n",
174 state_node->full_name); 182 state_node->full_name);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 06b57c4c4d80..37263d9a1051 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,7 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
79 79
80 last_state = &ldev->states[last_idx]; 80 last_state = &ldev->states[last_idx];
81 81
82 if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { 82 if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
83 last_residency = cpuidle_get_last_residency(dev) - \ 83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency; 84 drv->states[last_idx].exit_latency;
85 } 85 }
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 710a233b9b0d..659d7b0c9ebf 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -405,7 +405,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
405 * the measured amount of time is less than the exit latency, 405 * the measured amount of time is less than the exit latency,
406 * assume the state was never reached and the exit latency is 0. 406 * assume the state was never reached and the exit latency is 0.
407 */ 407 */
408 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) { 408 if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
409 /* Use timer value as is */ 409 /* Use timer value as is */
410 measured_us = data->next_timer_us; 410 measured_us = data->next_timer_us;
411 411
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 244722170410..380478562b7d 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -22,6 +22,7 @@
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/pm_runtime.h>
25 26
26#include "../dmaengine.h" 27#include "../dmaengine.h"
27#include "internal.h" 28#include "internal.h"
@@ -1504,6 +1505,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1504 dw->regs = chip->regs; 1505 dw->regs = chip->regs;
1505 chip->dw = dw; 1506 chip->dw = dw;
1506 1507
1508 pm_runtime_enable(chip->dev);
1509 pm_runtime_get_sync(chip->dev);
1510
1507 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1511 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1508 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1512 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1509 1513
@@ -1667,11 +1671,14 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1667 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", 1671 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1668 nr_channels); 1672 nr_channels);
1669 1673
1674 pm_runtime_put_sync_suspend(chip->dev);
1675
1670 return 0; 1676 return 0;
1671 1677
1672err_dma_register: 1678err_dma_register:
1673 free_irq(chip->irq, dw); 1679 free_irq(chip->irq, dw);
1674err_pdata: 1680err_pdata:
1681 pm_runtime_put_sync_suspend(chip->dev);
1675 return err; 1682 return err;
1676} 1683}
1677EXPORT_SYMBOL_GPL(dw_dma_probe); 1684EXPORT_SYMBOL_GPL(dw_dma_probe);
@@ -1681,6 +1688,8 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1681 struct dw_dma *dw = chip->dw; 1688 struct dw_dma *dw = chip->dw;
1682 struct dw_dma_chan *dwc, *_dwc; 1689 struct dw_dma_chan *dwc, *_dwc;
1683 1690
1691 pm_runtime_get_sync(chip->dev);
1692
1684 dw_dma_off(dw); 1693 dw_dma_off(dw);
1685 dma_async_device_unregister(&dw->dma); 1694 dma_async_device_unregister(&dw->dma);
1686 1695
@@ -1693,6 +1702,8 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1693 channel_clear_bit(dw, CH_EN, dwc->mask); 1702 channel_clear_bit(dw, CH_EN, dwc->mask);
1694 } 1703 }
1695 1704
1705 pm_runtime_put_sync_suspend(chip->dev);
1706 pm_runtime_disable(chip->dev);
1696 return 0; 1707 return 0;
1697} 1708}
1698EXPORT_SYMBOL_GPL(dw_dma_remove); 1709EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 5aeada56a442..bda20e6e1007 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1479,7 +1479,7 @@ static struct platform_device_id nbpf_ids[] = {
1479}; 1479};
1480MODULE_DEVICE_TABLE(platform, nbpf_ids); 1480MODULE_DEVICE_TABLE(platform, nbpf_ids);
1481 1481
1482#ifdef CONFIG_PM_RUNTIME 1482#ifdef CONFIG_PM
1483static int nbpf_runtime_suspend(struct device *dev) 1483static int nbpf_runtime_suspend(struct device *dev)
1484{ 1484{
1485 struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); 1485 struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5fe59335e247..d9ca3e32d748 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3051,7 +3051,7 @@ static int dma40_runtime_resume(struct device *dev)
3051 3051
3052static const struct dev_pm_ops dma40_pm_ops = { 3052static const struct dev_pm_ops dma40_pm_ops = {
3053 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) 3053 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3054 SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend, 3054 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3055 dma40_runtime_resume, 3055 dma40_runtime_resume,
3056 NULL) 3056 NULL)
3057}; 3057};
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 16efa603ff65..1c867d0303db 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1587,7 +1587,7 @@ static int tegra_dma_pm_resume(struct device *dev)
1587#endif 1587#endif
1588 1588
1589static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1589static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1590#ifdef CONFIG_PM_RUNTIME 1590#ifdef CONFIG_PM
1591 .runtime_suspend = tegra_dma_runtime_suspend, 1591 .runtime_suspend = tegra_dma_runtime_suspend,
1592 .runtime_resume = tegra_dma_runtime_resume, 1592 .runtime_resume = tegra_dma_runtime_resume,
1593#endif 1593#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 7072c2892d63..49c265255a07 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -61,14 +61,14 @@ config EDAC_DECODE_MCE
61 has been initialized. 61 has been initialized.
62 62
63config EDAC_MCE_INJ 63config EDAC_MCE_INJ
64 tristate "Simple MCE injection interface over /sysfs" 64 tristate "Simple MCE injection interface"
65 depends on EDAC_DECODE_MCE 65 depends on EDAC_DECODE_MCE && DEBUG_FS
66 default n 66 default n
67 help 67 help
68 This is a simple interface to inject MCEs over /sysfs and test 68 This is a simple debugfs interface to inject MCEs and test different
69 the MCE decoding code in EDAC. 69 aspects of the MCE handling code.
70 70
71 This is currently AMD-only. 71 WARNING: Do not even assume this interface is staying stable!
72 72
73config EDAC_MM_EDAC 73config EDAC_MM_EDAC
74 tristate "Main Memory EDAC (Error Detection And Correction) reporting" 74 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
@@ -105,11 +105,11 @@ config EDAC_GHES
105 In doubt, say 'Y'. 105 In doubt, say 'Y'.
106 106
107config EDAC_AMD64 107config EDAC_AMD64
108 tristate "AMD64 (Opteron, Athlon64) K8, F10h" 108 tristate "AMD64 (Opteron, Athlon64)"
109 depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE 109 depends on EDAC_MM_EDAC && AMD_NB && EDAC_DECODE_MCE
110 help 110 help
111 Support for error detection and correction of DRAM ECC errors on 111 Support for error detection and correction of DRAM ECC errors on
112 the AMD64 families of memory controllers (K8 and F10h) 112 the AMD64 families (>= K8) of memory controllers.
113 113
114config EDAC_AMD64_ERROR_INJECTION 114config EDAC_AMD64_ERROR_INJECTION
115 bool "Sysfs HW Error injection facilities" 115 bool "Sysfs HW Error injection facilities"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 359aa499b200..d40c69a04df7 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -9,7 +9,7 @@
9obj-$(CONFIG_EDAC) := edac_stub.o 9obj-$(CONFIG_EDAC) := edac_stub.o
10obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o 10obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
11 11
12edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o 12edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
13edac_core-y += edac_module.o edac_device_sysfs.o 13edac_core-y += edac_module.o edac_device_sysfs.o
14 14
15ifdef CONFIG_PCI 15ifdef CONFIG_PCI
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index bbd65149cdb2..17638d7cf5c2 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -692,9 +692,19 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
692{ 692{
693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
694 694
695 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 695 if (pvt->dram_type == MEM_LRDDR3) {
696 (dclr & BIT(16)) ? "un" : "", 696 u32 dcsm = pvt->csels[chan].csmasks[0];
697 (dclr & BIT(19)) ? "yes" : "no"); 697 /*
698 * It's assumed all LRDIMMs in a DCT are going to be of
699 * same 'type' until proven otherwise. So, use a cs
700 * value of '0' here to get dcsm value.
701 */
702 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
703 }
704
705 edac_dbg(1, "All DIMMs support ECC:%s\n",
706 (dclr & BIT(19)) ? "yes" : "no");
707
698 708
699 edac_dbg(1, " PAR/ERR parity: %s\n", 709 edac_dbg(1, " PAR/ERR parity: %s\n",
700 (dclr & BIT(8)) ? "enabled" : "disabled"); 710 (dclr & BIT(8)) ? "enabled" : "disabled");
@@ -756,7 +766,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
756 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { 766 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
757 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 767 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
758 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; 768 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
759 } else if (pvt->fam == 0x15 && pvt->model >= 0x30) { 769 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
760 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; 770 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
761 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; 771 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
762 } else { 772 } else {
@@ -813,25 +823,63 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
813 } 823 }
814} 824}
815 825
816static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs) 826static void determine_memory_type(struct amd64_pvt *pvt)
817{ 827{
818 enum mem_type type; 828 u32 dram_ctrl, dcsm;
819 829
820 /* F15h supports only DDR3 */ 830 switch (pvt->fam) {
821 if (pvt->fam >= 0x15) 831 case 0xf:
822 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 832 if (pvt->ext_model >= K8_REV_F)
823 else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) { 833 goto ddr3;
834
835 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
836 return;
837
838 case 0x10:
824 if (pvt->dchr0 & DDR3_MODE) 839 if (pvt->dchr0 & DDR3_MODE)
825 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 840 goto ddr3;
841
842 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
843 return;
844
845 case 0x15:
846 if (pvt->model < 0x60)
847 goto ddr3;
848
849 /*
850 * Model 0x60h needs special handling:
851 *
852 * We use a Chip Select value of '0' to obtain dcsm.
853 * Theoretically, it is possible to populate LRDIMMs of different
854 * 'Rank' value on a DCT. But this is not the common case. So,
855 * it's reasonable to assume all DIMMs are going to be of same
856 * 'type' until proven otherwise.
857 */
858 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
859 dcsm = pvt->csels[0].csmasks[0];
860
861 if (((dram_ctrl >> 8) & 0x7) == 0x2)
862 pvt->dram_type = MEM_DDR4;
863 else if (pvt->dclr0 & BIT(16))
864 pvt->dram_type = MEM_DDR3;
865 else if (dcsm & 0x3)
866 pvt->dram_type = MEM_LRDDR3;
826 else 867 else
827 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 868 pvt->dram_type = MEM_RDDR3;
828 } else {
829 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
830 }
831 869
832 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); 870 return;
833 871
834 return type; 872 case 0x16:
873 goto ddr3;
874
875 default:
876 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
877 pvt->dram_type = MEM_EMPTY;
878 }
879 return;
880
881ddr3:
882 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
835} 883}
836 884
837/* Get the number of DCT channels the memory controller is using. */ 885/* Get the number of DCT channels the memory controller is using. */
@@ -958,8 +1006,12 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
958 if (WARN_ON(!nb)) 1006 if (WARN_ON(!nb))
959 return; 1007 return;
960 1008
961 pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 1009 if (pvt->model == 0x60)
962 : PCI_DEVICE_ID_AMD_15H_NB_F1; 1010 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1011 else if (pvt->model == 0x30)
1012 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1013 else
1014 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
963 1015
964 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); 1016 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
965 if (WARN_ON(!f1)) 1017 if (WARN_ON(!f1))
@@ -1049,7 +1101,7 @@ static int ddr2_cs_size(unsigned i, bool dct_width)
1049} 1101}
1050 1102
1051static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1103static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1052 unsigned cs_mode) 1104 unsigned cs_mode, int cs_mask_nr)
1053{ 1105{
1054 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1106 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1055 1107
@@ -1167,8 +1219,43 @@ static int ddr3_cs_size(unsigned i, bool dct_width)
1167 return cs_size; 1219 return cs_size;
1168} 1220}
1169 1221
1222static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1223{
1224 unsigned shift = 0;
1225 int cs_size = 0;
1226
1227 if (i < 4 || i == 6)
1228 cs_size = -1;
1229 else if (i == 12)
1230 shift = 7;
1231 else if (!(i & 0x1))
1232 shift = i >> 1;
1233 else
1234 shift = (i + 1) >> 1;
1235
1236 if (cs_size != -1)
1237 cs_size = rank_multiply * (128 << shift);
1238
1239 return cs_size;
1240}
1241
1242static int ddr4_cs_size(unsigned i)
1243{
1244 int cs_size = 0;
1245
1246 if (i == 0)
1247 cs_size = -1;
1248 else if (i == 1)
1249 cs_size = 1024;
1250 else
1251 /* Min cs_size = 1G */
1252 cs_size = 1024 * (1 << (i >> 1));
1253
1254 return cs_size;
1255}
1256
1170static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1257static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1171 unsigned cs_mode) 1258 unsigned cs_mode, int cs_mask_nr)
1172{ 1259{
1173 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1260 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1174 1261
@@ -1184,18 +1271,49 @@ static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1184 * F15h supports only 64bit DCT interfaces 1271 * F15h supports only 64bit DCT interfaces
1185 */ 1272 */
1186static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1273static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1187 unsigned cs_mode) 1274 unsigned cs_mode, int cs_mask_nr)
1188{ 1275{
1189 WARN_ON(cs_mode > 12); 1276 WARN_ON(cs_mode > 12);
1190 1277
1191 return ddr3_cs_size(cs_mode, false); 1278 return ddr3_cs_size(cs_mode, false);
1192} 1279}
1193 1280
1281/* F15h M60h supports DDR4 mapping as well.. */
1282static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1283 unsigned cs_mode, int cs_mask_nr)
1284{
1285 int cs_size;
1286 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1287
1288 WARN_ON(cs_mode > 12);
1289
1290 if (pvt->dram_type == MEM_DDR4) {
1291 if (cs_mode > 9)
1292 return -1;
1293
1294 cs_size = ddr4_cs_size(cs_mode);
1295 } else if (pvt->dram_type == MEM_LRDDR3) {
1296 unsigned rank_multiply = dcsm & 0xf;
1297
1298 if (rank_multiply == 3)
1299 rank_multiply = 4;
1300 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1301 } else {
1302 /* Minimum cs size is 512mb for F15hM60h*/
1303 if (cs_mode == 0x1)
1304 return -1;
1305
1306 cs_size = ddr3_cs_size(cs_mode, false);
1307 }
1308
1309 return cs_size;
1310}
1311
1194/* 1312/*
1195 * F16h and F15h model 30h have only limited cs_modes. 1313 * F16h and F15h model 30h have only limited cs_modes.
1196 */ 1314 */
1197static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1315static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1198 unsigned cs_mode) 1316 unsigned cs_mode, int cs_mask_nr)
1199{ 1317{
1200 WARN_ON(cs_mode > 12); 1318 WARN_ON(cs_mode > 12);
1201 1319
@@ -1757,13 +1875,20 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1757 1875
1758 size0 = 0; 1876 size0 = 0;
1759 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 1877 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1878 /* For f15m60h, need multiplier for LRDIMM cs_size
1879 * calculation. We pass 'dimm' value to the dbam_to_cs
1880 * mapper so we can find the multiplier from the
1881 * corresponding DCSM.
1882 */
1760 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 1883 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1761 DBAM_DIMM(dimm, dbam)); 1884 DBAM_DIMM(dimm, dbam),
1885 dimm);
1762 1886
1763 size1 = 0; 1887 size1 = 0;
1764 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 1888 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1765 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 1889 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1766 DBAM_DIMM(dimm, dbam)); 1890 DBAM_DIMM(dimm, dbam),
1891 dimm);
1767 1892
1768 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 1893 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1769 dimm * 2, size0, 1894 dimm * 2, size0,
@@ -1812,6 +1937,16 @@ static struct amd64_family_type family_types[] = {
1812 .dbam_to_cs = f16_dbam_to_chip_select, 1937 .dbam_to_cs = f16_dbam_to_chip_select,
1813 } 1938 }
1814 }, 1939 },
1940 [F15_M60H_CPUS] = {
1941 .ctl_name = "F15h_M60h",
1942 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1943 .f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
1944 .ops = {
1945 .early_channel_count = f1x_early_channel_count,
1946 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1947 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
1948 }
1949 },
1815 [F16_CPUS] = { 1950 [F16_CPUS] = {
1816 .ctl_name = "F16h", 1951 .ctl_name = "F16h",
1817 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, 1952 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
@@ -2175,6 +2310,8 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2175 } 2310 }
2176 2311
2177 pvt->ecc_sym_sz = 4; 2312 pvt->ecc_sym_sz = 4;
2313 determine_memory_type(pvt);
2314 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2178 2315
2179 if (pvt->fam >= 0x10) { 2316 if (pvt->fam >= 0x10) {
2180 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2317 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -2238,7 +2375,8 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2238 */ 2375 */
2239 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); 2376 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2240 2377
2241 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2378 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2379 << (20 - PAGE_SHIFT);
2242 2380
2243 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2381 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2244 csrow_nr, dct, cs_mode); 2382 csrow_nr, dct, cs_mode);
@@ -2257,7 +2395,6 @@ static int init_csrows(struct mem_ctl_info *mci)
2257 struct csrow_info *csrow; 2395 struct csrow_info *csrow;
2258 struct dimm_info *dimm; 2396 struct dimm_info *dimm;
2259 enum edac_type edac_mode; 2397 enum edac_type edac_mode;
2260 enum mem_type mtype;
2261 int i, j, empty = 1; 2398 int i, j, empty = 1;
2262 int nr_pages = 0; 2399 int nr_pages = 0;
2263 u32 val; 2400 u32 val;
@@ -2302,8 +2439,6 @@ static int init_csrows(struct mem_ctl_info *mci)
2302 nr_pages += row_dct1_pages; 2439 nr_pages += row_dct1_pages;
2303 } 2440 }
2304 2441
2305 mtype = determine_memory_type(pvt, i);
2306
2307 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2442 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2308 2443
2309 /* 2444 /*
@@ -2317,7 +2452,7 @@ static int init_csrows(struct mem_ctl_info *mci)
2317 2452
2318 for (j = 0; j < pvt->channel_count; j++) { 2453 for (j = 0; j < pvt->channel_count; j++) {
2319 dimm = csrow->channels[j]->dimm; 2454 dimm = csrow->channels[j]->dimm;
2320 dimm->mtype = mtype; 2455 dimm->mtype = pvt->dram_type;
2321 dimm->edac_mode = edac_mode; 2456 dimm->edac_mode = edac_mode;
2322 } 2457 }
2323 } 2458 }
@@ -2604,6 +2739,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2604 fam_type = &family_types[F15_M30H_CPUS]; 2739 fam_type = &family_types[F15_M30H_CPUS];
2605 pvt->ops = &family_types[F15_M30H_CPUS].ops; 2740 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2606 break; 2741 break;
2742 } else if (pvt->model == 0x60) {
2743 fam_type = &family_types[F15_M60H_CPUS];
2744 pvt->ops = &family_types[F15_M60H_CPUS].ops;
2745 break;
2607 } 2746 }
2608 2747
2609 fam_type = &family_types[F15_CPUS]; 2748 fam_type = &family_types[F15_CPUS];
@@ -2828,55 +2967,13 @@ static void remove_one_instance(struct pci_dev *pdev)
2828 * inquiry this table to see if this driver is for a given device found. 2967 * inquiry this table to see if this driver is for a given device found.
2829 */ 2968 */
2830static const struct pci_device_id amd64_pci_table[] = { 2969static const struct pci_device_id amd64_pci_table[] = {
2831 { 2970 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
2832 .vendor = PCI_VENDOR_ID_AMD, 2971 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
2833 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2972 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
2834 .subvendor = PCI_ANY_ID, 2973 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
2835 .subdevice = PCI_ANY_ID, 2974 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
2836 .class = 0, 2975 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
2837 .class_mask = 0, 2976 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
2838 },
2839 {
2840 .vendor = PCI_VENDOR_ID_AMD,
2841 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2842 .subvendor = PCI_ANY_ID,
2843 .subdevice = PCI_ANY_ID,
2844 .class = 0,
2845 .class_mask = 0,
2846 },
2847 {
2848 .vendor = PCI_VENDOR_ID_AMD,
2849 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2850 .subvendor = PCI_ANY_ID,
2851 .subdevice = PCI_ANY_ID,
2852 .class = 0,
2853 .class_mask = 0,
2854 },
2855 {
2856 .vendor = PCI_VENDOR_ID_AMD,
2857 .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2858 .subvendor = PCI_ANY_ID,
2859 .subdevice = PCI_ANY_ID,
2860 .class = 0,
2861 .class_mask = 0,
2862 },
2863 {
2864 .vendor = PCI_VENDOR_ID_AMD,
2865 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2866 .subvendor = PCI_ANY_ID,
2867 .subdevice = PCI_ANY_ID,
2868 .class = 0,
2869 .class_mask = 0,
2870 },
2871 {
2872 .vendor = PCI_VENDOR_ID_AMD,
2873 .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2874 .subvendor = PCI_ANY_ID,
2875 .subdevice = PCI_ANY_ID,
2876 .class = 0,
2877 .class_mask = 0,
2878 },
2879
2880 {0, } 2977 {0, }
2881}; 2978};
2882MODULE_DEVICE_TABLE(pci, amd64_pci_table); 2979MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2938,6 +3035,11 @@ static int __init amd64_edac_init(void)
2938 goto err_no_instances; 3035 goto err_no_instances;
2939 3036
2940 setup_pci_device(); 3037 setup_pci_device();
3038
3039#ifdef CONFIG_X86_32
3040 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3041#endif
3042
2941 return 0; 3043 return 0;
2942 3044
2943err_no_instances: 3045err_no_instances:
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 55fb5941c6d4..d8468c667925 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -162,10 +162,12 @@
162/* 162/*
163 * PCI-defined configuration space registers 163 * PCI-defined configuration space registers
164 */ 164 */
165#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
166#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
167#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 165#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
168#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 166#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
167#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
168#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
169#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F1 0x1571
170#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F2 0x1572
169#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531 171#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
170#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532 172#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
171#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581 173#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
@@ -221,6 +223,8 @@
221 223
222#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) 224#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
223 225
226#define DRAM_CONTROL 0x78
227
224#define DBAM0 0x80 228#define DBAM0 0x80
225#define DBAM1 0x180 229#define DBAM1 0x180
226 230
@@ -301,6 +305,7 @@ enum amd_families {
301 F10_CPUS, 305 F10_CPUS,
302 F15_CPUS, 306 F15_CPUS,
303 F15_M30H_CPUS, 307 F15_M30H_CPUS,
308 F15_M60H_CPUS,
304 F16_CPUS, 309 F16_CPUS,
305 F16_M30H_CPUS, 310 F16_M30H_CPUS,
306 NUM_FAMILIES, 311 NUM_FAMILIES,
@@ -379,6 +384,9 @@ struct amd64_pvt {
379 384
380 /* place to store error injection parameters prior to issue */ 385 /* place to store error injection parameters prior to issue */
381 struct error_injection injection; 386 struct error_injection injection;
387
388 /* cache the dram_type */
389 enum mem_type dram_type;
382}; 390};
383 391
384enum err_codes { 392enum err_codes {
@@ -480,7 +488,8 @@ struct low_ops {
480 int (*early_channel_count) (struct amd64_pvt *pvt); 488 int (*early_channel_count) (struct amd64_pvt *pvt);
481 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, 489 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
482 struct err_info *); 490 struct err_info *);
483 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); 491 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
492 unsigned cs_mode, int cs_mask_nr);
484}; 493};
485 494
486struct amd64_family_type { 495struct amd64_family_type {
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index c3893b0ddb18..1747906f10ce 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -125,27 +125,27 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
125 125
126#endif /* CONFIG_EDAC_DEBUG */ 126#endif /* CONFIG_EDAC_DEBUG */
127 127
128/*
129 * keep those in sync with the enum mem_type
130 */
131const char * const edac_mem_types[] = { 128const char * const edac_mem_types[] = {
132 "Empty csrow", 129 [MEM_EMPTY] = "Empty csrow",
133 "Reserved csrow type", 130 [MEM_RESERVED] = "Reserved csrow type",
134 "Unknown csrow type", 131 [MEM_UNKNOWN] = "Unknown csrow type",
135 "Fast page mode RAM", 132 [MEM_FPM] = "Fast page mode RAM",
136 "Extended data out RAM", 133 [MEM_EDO] = "Extended data out RAM",
137 "Burst Extended data out RAM", 134 [MEM_BEDO] = "Burst Extended data out RAM",
138 "Single data rate SDRAM", 135 [MEM_SDR] = "Single data rate SDRAM",
139 "Registered single data rate SDRAM", 136 [MEM_RDR] = "Registered single data rate SDRAM",
140 "Double data rate SDRAM", 137 [MEM_DDR] = "Double data rate SDRAM",
141 "Registered Double data rate SDRAM", 138 [MEM_RDDR] = "Registered Double data rate SDRAM",
142 "Rambus DRAM", 139 [MEM_RMBS] = "Rambus DRAM",
143 "Unbuffered DDR2 RAM", 140 [MEM_DDR2] = "Unbuffered DDR2 RAM",
144 "Fully buffered DDR2", 141 [MEM_FB_DDR2] = "Fully buffered DDR2",
145 "Registered DDR2 RAM", 142 [MEM_RDDR2] = "Registered DDR2 RAM",
146 "Rambus XDR", 143 [MEM_XDR] = "Rambus XDR",
147 "Unbuffered DDR3 RAM", 144 [MEM_DDR3] = "Unbuffered DDR3 RAM",
148 "Registered DDR3 RAM", 145 [MEM_RDDR3] = "Registered DDR3 RAM",
146 [MEM_LRDDR3] = "Load-Reduced DDR3 RAM",
147 [MEM_DDR4] = "Unbuffered DDR4 RAM",
148 [MEM_RDDR4] = "Registered DDR4 RAM",
149}; 149};
150EXPORT_SYMBOL_GPL(edac_mem_types); 150EXPORT_SYMBOL_GPL(edac_mem_types);
151 151
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index a6cd36100663..670d2829c547 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -372,7 +372,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
372{ 372{
373 int err, chan; 373 int err, chan;
374 374
375 if (csrow->nr_channels >= EDAC_NR_CHANNELS) 375 if (csrow->nr_channels > EDAC_NR_CHANNELS)
376 return -ENODEV; 376 return -ENODEV;
377 377
378 csrow->dev.type = &csrow_attr_type; 378 csrow->dev.type = &csrow_attr_type;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index e8658e451762..24d877f6e577 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -14,9 +14,6 @@
14#include "edac_core.h" 14#include "edac_core.h"
15#include "edac_module.h" 15#include "edac_module.h"
16 16
17/* Turn off this whole feature if PCI is not configured */
18#ifdef CONFIG_PCI
19
20#define EDAC_PCI_SYMLINK "device" 17#define EDAC_PCI_SYMLINK "device"
21 18
22/* data variables exported via sysfs */ 19/* data variables exported via sysfs */
@@ -761,5 +758,3 @@ MODULE_PARM_DESC(check_pci_errors,
761module_param(edac_pci_panic_on_pe, int, 0644); 758module_param(edac_pci_panic_on_pe, int, 0644);
762MODULE_PARM_DESC(edac_pci_panic_on_pe, 759MODULE_PARM_DESC(edac_pci_panic_on_pe,
763 "Panic on PCI Bus Parity error: 0=off 1=on"); 760 "Panic on PCI Bus Parity error: 0=off 1=on");
764
765#endif /* CONFIG_PCI */
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 8399b4e16fe0..b24681998740 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -413,8 +413,8 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
413 413
414 /* Generate the trace event */ 414 /* Generate the trace event */
415 grain_bits = fls_long(e->grain); 415 grain_bits = fls_long(e->grain);
416 sprintf(pvt->detail_location, "APEI location: %s %s", 416 snprintf(pvt->detail_location, sizeof(pvt->detail_location),
417 e->location, e->other_detail); 417 "APEI location: %s %s", e->location, e->other_detail);
418 trace_mc_event(type, e->msg, e->label, e->error_count, 418 trace_mc_event(type, e->msg, e->label, e->error_count,
419 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, 419 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
420 PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page, 420 PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index cd28b968e5c7..5cb36a6022cc 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -542,8 +542,7 @@ fail1:
542 pci_unregister_driver(&i3000_driver); 542 pci_unregister_driver(&i3000_driver);
543 543
544fail0: 544fail0:
545 if (mci_pdev) 545 pci_dev_put(mci_pdev);
546 pci_dev_put(mci_pdev);
547 546
548 return pci_rc; 547 return pci_rc;
549} 548}
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index aa98b136f5d0..4ad062b0ef26 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -523,8 +523,7 @@ fail1:
523 pci_unregister_driver(&i3200_driver); 523 pci_unregister_driver(&i3200_driver);
524 524
525fail0: 525fail0:
526 if (mci_pdev) 526 pci_dev_put(mci_pdev);
527 pci_dev_put(mci_pdev);
528 527
529 return pci_rc; 528 return pci_rc;
530} 529}
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index d730e276d1a8..b4705d9366bf 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -458,8 +458,7 @@ static void __exit i82443bxgx_edacmc_exit(void)
458 if (!i82443bxgx_registered) 458 if (!i82443bxgx_registered)
459 i82443bxgx_edacmc_remove_one(mci_pdev); 459 i82443bxgx_edacmc_remove_one(mci_pdev);
460 460
461 if (mci_pdev) 461 pci_dev_put(mci_pdev);
462 pci_dev_put(mci_pdev);
463} 462}
464 463
465module_init(i82443bxgx_edacmc_init); 464module_init(i82443bxgx_edacmc_init);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f78c1c54dbd5..58586d59bf8e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -138,6 +138,15 @@ static const char * const mc5_mce_desc[] = {
138 "Retire status queue" 138 "Retire status queue"
139}; 139};
140 140
141static const char * const mc6_mce_desc[] = {
142 "Hardware Assertion",
143 "Free List",
144 "Physical Register File",
145 "Retire Queue",
146 "Scheduler table",
147 "Status Register File",
148};
149
141static bool f12h_mc0_mce(u16 ec, u8 xec) 150static bool f12h_mc0_mce(u16 ec, u8 xec)
142{ 151{
143 bool ret = false; 152 bool ret = false;
@@ -432,8 +441,8 @@ static bool k8_mc2_mce(u16 ec, u8 xec)
432 pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec)); 441 pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
433 else if (xec == 0x0) { 442 else if (xec == 0x0) {
434 if (TLB_ERROR(ec)) 443 if (TLB_ERROR(ec))
435 pr_cont(": %s error in a Page Descriptor Cache or " 444 pr_cont("%s error in a Page Descriptor Cache or Guest TLB.\n",
436 "Guest TLB.\n", TT_MSG(ec)); 445 TT_MSG(ec));
437 else if (BUS_ERROR(ec)) 446 else if (BUS_ERROR(ec))
438 pr_cont(": %s/ECC error in data read from NB: %s.\n", 447 pr_cont(": %s/ECC error in data read from NB: %s.\n",
439 R4_MSG(ec), PP_MSG(ec)); 448 R4_MSG(ec), PP_MSG(ec));
@@ -672,38 +681,10 @@ static void decode_mc6_mce(struct mce *m)
672 681
673 pr_emerg(HW_ERR "MC6 Error: "); 682 pr_emerg(HW_ERR "MC6 Error: ");
674 683
675 switch (xec) { 684 if (xec > 0x5)
676 case 0x0:
677 pr_cont("Hardware Assertion");
678 break;
679
680 case 0x1:
681 pr_cont("Free List");
682 break;
683
684 case 0x2:
685 pr_cont("Physical Register File");
686 break;
687
688 case 0x3:
689 pr_cont("Retire Queue");
690 break;
691
692 case 0x4:
693 pr_cont("Scheduler table");
694 break;
695
696 case 0x5:
697 pr_cont("Status Register File");
698 break;
699
700 default:
701 goto wrong_mc6_mce; 685 goto wrong_mc6_mce;
702 break;
703 }
704
705 pr_cont(" parity error.\n");
706 686
687 pr_cont("%s parity error.\n", mc6_mce_desc[xec]);
707 return; 688 return;
708 689
709 wrong_mc6_mce: 690 wrong_mc6_mce:
@@ -800,7 +781,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
800 pr_cont("]: 0x%016llx\n", m->status); 781 pr_cont("]: 0x%016llx\n", m->status);
801 782
802 if (m->status & MCI_STATUS_ADDRV) 783 if (m->status & MCI_STATUS_ADDRV)
803 pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr); 784 pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
804 785
805 if (!fam_ops) 786 if (!fam_ops)
806 goto err_code; 787 goto err_code;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 51b7e3a36e37..c2359a1ea6b3 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -32,9 +32,6 @@
32#define R4(x) (((x) >> 4) & 0xf) 32#define R4(x) (((x) >> 4) & 0xf)
33#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") 33#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
34 34
35#define MCI_STATUS_DEFERRED BIT_64(44)
36#define MCI_STATUS_POISON BIT_64(43)
37
38extern const char * const pp_msgs[]; 35extern const char * const pp_msgs[];
39 36
40enum tt_ids { 37enum tt_ids {
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 5e46a9fea31b..0bd91a802c67 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -1,173 +1,262 @@
1/* 1/*
2 * A simple MCE injection facility for testing the MCE decoding code. This 2 * A simple MCE injection facility for testing different aspects of the RAS
3 * driver should be built as module so that it can be loaded on production 3 * code. This driver should be built as module so that it can be loaded
4 * kernels for testing purposes. 4 * on production kernels for testing purposes.
5 * 5 *
6 * This file may be distributed under the terms of the GNU General Public 6 * This file may be distributed under the terms of the GNU General Public
7 * License version 2. 7 * License version 2.
8 * 8 *
9 * Copyright (c) 2010: Borislav Petkov <bp@alien8.de> 9 * Copyright (c) 2010-14: Borislav Petkov <bp@alien8.de>
10 * Advanced Micro Devices Inc. 10 * Advanced Micro Devices Inc.
11 */ 11 */
12 12
13#include <linux/kobject.h> 13#include <linux/kobject.h>
14#include <linux/debugfs.h>
14#include <linux/device.h> 15#include <linux/device.h>
15#include <linux/edac.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/cpu.h>
17#include <asm/mce.h> 18#include <asm/mce.h>
18 19
19#include "mce_amd.h" 20#include "mce_amd.h"
20 21
21struct edac_mce_attr {
22 struct attribute attr;
23 ssize_t (*show) (struct kobject *kobj, struct edac_mce_attr *attr, char *buf);
24 ssize_t (*store)(struct kobject *kobj, struct edac_mce_attr *attr,
25 const char *buf, size_t count);
26};
27
28#define EDAC_MCE_ATTR(_name, _mode, _show, _store) \
29static struct edac_mce_attr mce_attr_##_name = __ATTR(_name, _mode, _show, _store)
30
31static struct kobject *mce_kobj;
32
33/* 22/*
34 * Collect all the MCi_XXX settings 23 * Collect all the MCi_XXX settings
35 */ 24 */
36static struct mce i_mce; 25static struct mce i_mce;
26static struct dentry *dfs_inj;
37 27
38#define MCE_INJECT_STORE(reg) \ 28#define MCE_INJECT_SET(reg) \
39static ssize_t edac_inject_##reg##_store(struct kobject *kobj, \ 29static int inj_##reg##_set(void *data, u64 val) \
40 struct edac_mce_attr *attr, \
41 const char *data, size_t count)\
42{ \ 30{ \
43 int ret = 0; \ 31 struct mce *m = (struct mce *)data; \
44 unsigned long value; \
45 \
46 ret = kstrtoul(data, 16, &value); \
47 if (ret < 0) \
48 printk(KERN_ERR "Error writing MCE " #reg " field.\n"); \
49 \ 32 \
50 i_mce.reg = value; \ 33 m->reg = val; \
51 \ 34 return 0; \
52 return count; \
53} 35}
54 36
55MCE_INJECT_STORE(status); 37MCE_INJECT_SET(status);
56MCE_INJECT_STORE(misc); 38MCE_INJECT_SET(misc);
57MCE_INJECT_STORE(addr); 39MCE_INJECT_SET(addr);
58 40
59#define MCE_INJECT_SHOW(reg) \ 41#define MCE_INJECT_GET(reg) \
60static ssize_t edac_inject_##reg##_show(struct kobject *kobj, \ 42static int inj_##reg##_get(void *data, u64 *val) \
61 struct edac_mce_attr *attr, \
62 char *buf) \
63{ \ 43{ \
64 return sprintf(buf, "0x%016llx\n", i_mce.reg); \ 44 struct mce *m = (struct mce *)data; \
45 \
46 *val = m->reg; \
47 return 0; \
65} 48}
66 49
67MCE_INJECT_SHOW(status); 50MCE_INJECT_GET(status);
68MCE_INJECT_SHOW(misc); 51MCE_INJECT_GET(misc);
69MCE_INJECT_SHOW(addr); 52MCE_INJECT_GET(addr);
70 53
71EDAC_MCE_ATTR(status, 0644, edac_inject_status_show, edac_inject_status_store); 54DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
72EDAC_MCE_ATTR(misc, 0644, edac_inject_misc_show, edac_inject_misc_store); 55DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
73EDAC_MCE_ATTR(addr, 0644, edac_inject_addr_show, edac_inject_addr_store); 56DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
74 57
75/* 58/*
76 * This denotes into which bank we're injecting and triggers 59 * Caller needs to be make sure this cpu doesn't disappear
77 * the injection, at the same time. 60 * from under us, i.e.: get_cpu/put_cpu.
78 */ 61 */
79static ssize_t edac_inject_bank_store(struct kobject *kobj, 62static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
80 struct edac_mce_attr *attr,
81 const char *data, size_t count)
82{ 63{
83 int ret = 0; 64 u32 l, h;
84 unsigned long value; 65 int err;
85 66
86 ret = kstrtoul(data, 10, &value); 67 err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
87 if (ret < 0) { 68 if (err) {
88 printk(KERN_ERR "Invalid bank value!\n"); 69 pr_err("%s: error reading HWCR\n", __func__);
89 return -EINVAL; 70 return err;
90 } 71 }
91 72
92 if (value > 5) 73 enable ? (l |= BIT(18)) : (l &= ~BIT(18));
93 if (boot_cpu_data.x86 != 0x15 || value > 6) {
94 printk(KERN_ERR "Non-existent MCE bank: %lu\n", value);
95 return -EINVAL;
96 }
97 74
98 i_mce.bank = value; 75 err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
76 if (err)
77 pr_err("%s: error writing HWCR\n", __func__);
99 78
100 amd_decode_mce(NULL, 0, &i_mce); 79 return err;
80}
101 81
102 return count; 82static int flags_get(void *data, u64 *val)
83{
84 struct mce *m = (struct mce *)data;
85
86 *val = m->inject_flags;
87
88 return 0;
103} 89}
104 90
105static ssize_t edac_inject_bank_show(struct kobject *kobj, 91static int flags_set(void *data, u64 val)
106 struct edac_mce_attr *attr, char *buf)
107{ 92{
108 return sprintf(buf, "%d\n", i_mce.bank); 93 struct mce *m = (struct mce *)data;
94
95 m->inject_flags = (u8)val;
96 return 0;
109} 97}
110 98
111EDAC_MCE_ATTR(bank, 0644, edac_inject_bank_show, edac_inject_bank_store); 99DEFINE_SIMPLE_ATTRIBUTE(flags_fops, flags_get, flags_set, "%llu\n");
112 100
113static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc, 101/*
114 &mce_attr_addr, &mce_attr_bank 102 * On which CPU to inject?
115}; 103 */
104MCE_INJECT_GET(extcpu);
116 105
117static int __init edac_init_mce_inject(void) 106static int inj_extcpu_set(void *data, u64 val)
118{ 107{
119 struct bus_type *edac_subsys = NULL; 108 struct mce *m = (struct mce *)data;
120 int i, err = 0;
121 109
122 edac_subsys = edac_get_sysfs_subsys(); 110 if (val >= nr_cpu_ids || !cpu_online(val)) {
123 if (!edac_subsys) 111 pr_err("%s: Invalid CPU: %llu\n", __func__, val);
124 return -EINVAL; 112 return -EINVAL;
113 }
114 m->extcpu = val;
115 return 0;
116}
117
118DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
125 119
126 mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj); 120static void trigger_mce(void *info)
127 if (!mce_kobj) { 121{
128 printk(KERN_ERR "Error creating a mce kset.\n"); 122 asm volatile("int $18");
129 err = -ENOMEM; 123}
130 goto err_mce_kobj; 124
125static void do_inject(void)
126{
127 u64 mcg_status = 0;
128 unsigned int cpu = i_mce.extcpu;
129 u8 b = i_mce.bank;
130
131 if (!(i_mce.inject_flags & MCJ_EXCEPTION)) {
132 amd_decode_mce(NULL, 0, &i_mce);
133 return;
131 } 134 }
132 135
133 for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) { 136 get_online_cpus();
134 err = sysfs_create_file(mce_kobj, &sysfs_attrs[i]->attr); 137 if (!cpu_online(cpu))
135 if (err) { 138 goto err;
136 printk(KERN_ERR "Error creating %s in sysfs.\n", 139
137 sysfs_attrs[i]->attr.name); 140 /* prep MCE global settings for the injection */
138 goto err_sysfs_create; 141 mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
142
143 if (!(i_mce.status & MCI_STATUS_PCC))
144 mcg_status |= MCG_STATUS_RIPV;
145
146 toggle_hw_mce_inject(cpu, true);
147
148 wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
149 (u32)mcg_status, (u32)(mcg_status >> 32));
150
151 wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
152 (u32)i_mce.status, (u32)(i_mce.status >> 32));
153
154 wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
155 (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
156
157 wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
158 (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
159
160 toggle_hw_mce_inject(cpu, false);
161
162 smp_call_function_single(cpu, trigger_mce, NULL, 0);
163
164err:
165 put_online_cpus();
166
167}
168
169/*
170 * This denotes into which bank we're injecting and triggers
171 * the injection, at the same time.
172 */
173static int inj_bank_set(void *data, u64 val)
174{
175 struct mce *m = (struct mce *)data;
176
177 if (val > 5) {
178 if (boot_cpu_data.x86 != 0x15 || val > 6) {
179 pr_err("Non-existent MCE bank: %llu\n", val);
180 return -EINVAL;
139 } 181 }
140 } 182 }
141 return 0;
142 183
143err_sysfs_create: 184 m->bank = val;
144 while (--i >= 0) 185 do_inject();
145 sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
146 186
147 kobject_del(mce_kobj); 187 return 0;
188}
148 189
149err_mce_kobj: 190static int inj_bank_get(void *data, u64 *val)
150 edac_put_sysfs_subsys(); 191{
192 struct mce *m = (struct mce *)data;
151 193
152 return err; 194 *val = m->bank;
195 return 0;
153} 196}
154 197
155static void __exit edac_exit_mce_inject(void) 198DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
199
200struct dfs_node {
201 char *name;
202 struct dentry *d;
203 const struct file_operations *fops;
204} dfs_fls[] = {
205 { .name = "status", .fops = &status_fops },
206 { .name = "misc", .fops = &misc_fops },
207 { .name = "addr", .fops = &addr_fops },
208 { .name = "bank", .fops = &bank_fops },
209 { .name = "flags", .fops = &flags_fops },
210 { .name = "cpu", .fops = &extcpu_fops },
211};
212
213static int __init init_mce_inject(void)
156{ 214{
157 int i; 215 int i;
158 216
159 for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) 217 dfs_inj = debugfs_create_dir("mce-inject", NULL);
160 sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); 218 if (!dfs_inj)
219 return -EINVAL;
220
221 for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
222 dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
223 S_IRUSR | S_IWUSR,
224 dfs_inj,
225 &i_mce,
226 dfs_fls[i].fops);
227
228 if (!dfs_fls[i].d)
229 goto err_dfs_add;
230 }
231
232 return 0;
233
234err_dfs_add:
235 while (--i >= 0)
236 debugfs_remove(dfs_fls[i].d);
161 237
162 kobject_del(mce_kobj); 238 debugfs_remove(dfs_inj);
239 dfs_inj = NULL;
163 240
164 edac_put_sysfs_subsys(); 241 return -ENOMEM;
165} 242}
166 243
167module_init(edac_init_mce_inject); 244static void __exit exit_mce_inject(void)
168module_exit(edac_exit_mce_inject); 245{
246 int i;
247
248 for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
249 debugfs_remove(dfs_fls[i].d);
250
251 memset(&dfs_fls, 0, sizeof(dfs_fls));
252
253 debugfs_remove(dfs_inj);
254 dfs_inj = NULL;
255}
256module_init(init_mce_inject);
257module_exit(exit_mce_inject);
169 258
170MODULE_LICENSE("GPL"); 259MODULE_LICENSE("GPL");
171MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); 260MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
172MODULE_AUTHOR("AMD Inc."); 261MODULE_AUTHOR("AMD Inc.");
173MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); 262MODULE_DESCRIPTION("MCE injection facility for RAS testing");
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 542fad70e360..6366e880f978 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -178,7 +178,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
178 res = devm_request_irq(&pdev->dev, 178 res = devm_request_irq(&pdev->dev,
179 pdata->irq, 179 pdata->irq,
180 mv64x60_pci_isr, 180 mv64x60_pci_isr,
181 IRQF_DISABLED, 181 0,
182 "[EDAC] PCI err", 182 "[EDAC] PCI err",
183 pci); 183 pci);
184 if (res < 0) { 184 if (res < 0) {
@@ -345,7 +345,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev)
345 res = devm_request_irq(&pdev->dev, 345 res = devm_request_irq(&pdev->dev,
346 pdata->irq, 346 pdata->irq,
347 mv64x60_sram_isr, 347 mv64x60_sram_isr,
348 IRQF_DISABLED, 348 0,
349 "[EDAC] SRAM err", 349 "[EDAC] SRAM err",
350 edac_dev); 350 edac_dev);
351 if (res < 0) { 351 if (res < 0) {
@@ -540,7 +540,7 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev)
540 res = devm_request_irq(&pdev->dev, 540 res = devm_request_irq(&pdev->dev,
541 pdata->irq, 541 pdata->irq,
542 mv64x60_cpu_isr, 542 mv64x60_cpu_isr,
543 IRQF_DISABLED, 543 0,
544 "[EDAC] CPU err", 544 "[EDAC] CPU err",
545 edac_dev); 545 edac_dev);
546 if (res < 0) { 546 if (res < 0) {
@@ -800,7 +800,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
800 res = devm_request_irq(&pdev->dev, 800 res = devm_request_irq(&pdev->dev,
801 pdata->irq, 801 pdata->irq,
802 mv64x60_mc_isr, 802 mv64x60_mc_isr,
803 IRQF_DISABLED, 803 0,
804 "[EDAC] MC err", 804 "[EDAC] MC err",
805 mci); 805 mci);
806 if (res < 0) { 806 if (res < 0) {
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 0f04d5ead521..41593539cec4 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1120,7 +1120,7 @@ static int ppc4xx_edac_register_irq(struct platform_device *op,
1120 1120
1121 status = request_irq(ded_irq, 1121 status = request_irq(ded_irq,
1122 ppc4xx_edac_isr, 1122 ppc4xx_edac_isr,
1123 IRQF_DISABLED, 1123 0,
1124 "[EDAC] MC ECCDED", 1124 "[EDAC] MC ECCDED",
1125 mci); 1125 mci);
1126 1126
@@ -1134,7 +1134,7 @@ static int ppc4xx_edac_register_irq(struct platform_device *op,
1134 1134
1135 status = request_irq(sec_irq, 1135 status = request_irq(sec_irq,
1136 ppc4xx_edac_isr, 1136 ppc4xx_edac_isr,
1137 IRQF_DISABLED, 1137 0,
1138 "[EDAC] MC ECCSEC", 1138 "[EDAC] MC ECCSEC",
1139 mci); 1139 mci);
1140 1140
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e9bb1af67c8d..63aa6730e89e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -135,6 +135,7 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
135 135
136#define TOLM 0x80 136#define TOLM 0x80
137#define TOHM 0x84 137#define TOHM 0x84
138#define HASWELL_TOLM 0xd0
138#define HASWELL_TOHM_0 0xd4 139#define HASWELL_TOHM_0 0xd4
139#define HASWELL_TOHM_1 0xd8 140#define HASWELL_TOHM_1 0xd8
140 141
@@ -261,6 +262,7 @@ enum type {
261 SANDY_BRIDGE, 262 SANDY_BRIDGE,
262 IVY_BRIDGE, 263 IVY_BRIDGE,
263 HASWELL, 264 HASWELL,
265 BROADWELL,
264}; 266};
265 267
266struct sbridge_pvt; 268struct sbridge_pvt;
@@ -445,7 +447,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
445 * - each SMI channel interfaces with a scalable memory buffer 447 * - each SMI channel interfaces with a scalable memory buffer
446 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC 448 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
447 */ 449 */
448#define HASWELL_DDRCRCLKCONTROLS 0xa10 450#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
449#define HASWELL_HASYSDEFEATURE2 0x84 451#define HASWELL_HASYSDEFEATURE2 0x84
450#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 452#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
451#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 453#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
@@ -497,12 +499,53 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
497}; 499};
498 500
499/* 501/*
502 * Broadwell support
503 *
504 * DE processor:
505 * - 1 IMC
506 * - 2 DDR3 channels, 2 DPC per channel
507 */
508#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
509#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
510#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
511#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
512#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
513#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
514#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
515#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
516#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
517#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
518#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
519
520static const struct pci_id_descr pci_dev_descr_broadwell[] = {
521 /* first item must be the HA */
522 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) },
523
524 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) },
525 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) },
526
527 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) },
528 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) },
529 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) },
530 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) },
531 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 0) },
532 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 0) },
533 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) },
534};
535
536static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
537 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
538 {0,} /* 0 terminated list. */
539};
540
541/*
500 * pci_device_id table for which devices we are looking for 542 * pci_device_id table for which devices we are looking for
501 */ 543 */
502static const struct pci_device_id sbridge_pci_tbl[] = { 544static const struct pci_device_id sbridge_pci_tbl[] = {
503 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)}, 545 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
504 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, 546 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
505 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)}, 547 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
548 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)},
506 {0,} /* 0 terminated list. */ 549 {0,} /* 0 terminated list. */
507}; 550};
508 551
@@ -706,8 +749,8 @@ static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
706{ 749{
707 u32 reg; 750 u32 reg;
708 751
709 pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg); 752 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
710 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff; 753 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
711} 754}
712 755
713static u64 haswell_get_tohm(struct sbridge_pvt *pvt) 756static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
@@ -767,12 +810,22 @@ static int check_if_ecc_is_active(const u8 bus, enum type type)
767 struct pci_dev *pdev = NULL; 810 struct pci_dev *pdev = NULL;
768 u32 mcmtr, id; 811 u32 mcmtr, id;
769 812
770 if (type == IVY_BRIDGE) 813 switch (type) {
814 case IVY_BRIDGE:
771 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; 815 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
772 else if (type == HASWELL) 816 break;
817 case HASWELL:
773 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; 818 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
774 else 819 break;
820 case SANDY_BRIDGE:
775 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; 821 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
822 break;
823 case BROADWELL:
824 id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
825 break;
826 default:
827 return -ENODEV;
828 }
776 829
777 pdev = get_pdev_same_bus(bus, id); 830 pdev = get_pdev_same_bus(bus, id);
778 if (!pdev) { 831 if (!pdev) {
@@ -800,7 +853,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
800 enum edac_type mode; 853 enum edac_type mode;
801 enum mem_type mtype; 854 enum mem_type mtype;
802 855
803 if (pvt->info.type == HASWELL) 856 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL)
804 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); 857 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
805 else 858 else
806 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg); 859 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
@@ -848,7 +901,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
848 else 901 else
849 edac_dbg(0, "Memory is unregistered\n"); 902 edac_dbg(0, "Memory is unregistered\n");
850 903
851 if (mtype == MEM_DDR4 || MEM_RDDR4) 904 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
852 banks = 16; 905 banks = 16;
853 else 906 else
854 banks = 8; 907 banks = 8;
@@ -909,7 +962,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
909 u32 reg; 962 u32 reg;
910 u64 limit, prv = 0; 963 u64 limit, prv = 0;
911 u64 tmp_mb; 964 u64 tmp_mb;
912 u32 mb, kb; 965 u32 gb, mb;
913 u32 rir_way; 966 u32 rir_way;
914 967
915 /* 968 /*
@@ -919,15 +972,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
919 pvt->tolm = pvt->info.get_tolm(pvt); 972 pvt->tolm = pvt->info.get_tolm(pvt);
920 tmp_mb = (1 + pvt->tolm) >> 20; 973 tmp_mb = (1 + pvt->tolm) >> 20;
921 974
922 mb = div_u64_rem(tmp_mb, 1000, &kb); 975 gb = div_u64_rem(tmp_mb, 1024, &mb);
923 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); 976 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
977 gb, (mb*1000)/1024, (u64)pvt->tolm);
924 978
925 /* Address range is already 45:25 */ 979 /* Address range is already 45:25 */
926 pvt->tohm = pvt->info.get_tohm(pvt); 980 pvt->tohm = pvt->info.get_tohm(pvt);
927 tmp_mb = (1 + pvt->tohm) >> 20; 981 tmp_mb = (1 + pvt->tohm) >> 20;
928 982
929 mb = div_u64_rem(tmp_mb, 1000, &kb); 983 gb = div_u64_rem(tmp_mb, 1024, &mb);
930 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm); 984 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
985 gb, (mb*1000)/1024, (u64)pvt->tohm);
931 986
932 /* 987 /*
933 * Step 2) Get SAD range and SAD Interleave list 988 * Step 2) Get SAD range and SAD Interleave list
@@ -949,11 +1004,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
949 break; 1004 break;
950 1005
951 tmp_mb = (limit + 1) >> 20; 1006 tmp_mb = (limit + 1) >> 20;
952 mb = div_u64_rem(tmp_mb, 1000, &kb); 1007 gb = div_u64_rem(tmp_mb, 1024, &mb);
953 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", 1008 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
954 n_sads, 1009 n_sads,
955 get_dram_attr(reg), 1010 get_dram_attr(reg),
956 mb, kb, 1011 gb, (mb*1000)/1024,
957 ((u64)tmp_mb) << 20L, 1012 ((u64)tmp_mb) << 20L,
958 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", 1013 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
959 reg); 1014 reg);
@@ -984,9 +1039,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
984 break; 1039 break;
985 tmp_mb = (limit + 1) >> 20; 1040 tmp_mb = (limit + 1) >> 20;
986 1041
987 mb = div_u64_rem(tmp_mb, 1000, &kb); 1042 gb = div_u64_rem(tmp_mb, 1024, &mb);
988 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", 1043 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
989 n_tads, mb, kb, 1044 n_tads, gb, (mb*1000)/1024,
990 ((u64)tmp_mb) << 20L, 1045 ((u64)tmp_mb) << 20L,
991 (u32)TAD_SOCK(reg), 1046 (u32)TAD_SOCK(reg),
992 (u32)TAD_CH(reg), 1047 (u32)TAD_CH(reg),
@@ -1009,10 +1064,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1009 tad_ch_nilv_offset[j], 1064 tad_ch_nilv_offset[j],
1010 &reg); 1065 &reg);
1011 tmp_mb = TAD_OFFSET(reg) >> 20; 1066 tmp_mb = TAD_OFFSET(reg) >> 20;
1012 mb = div_u64_rem(tmp_mb, 1000, &kb); 1067 gb = div_u64_rem(tmp_mb, 1024, &mb);
1013 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", 1068 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1014 i, j, 1069 i, j,
1015 mb, kb, 1070 gb, (mb*1000)/1024,
1016 ((u64)tmp_mb) << 20L, 1071 ((u64)tmp_mb) << 20L,
1017 reg); 1072 reg);
1018 } 1073 }
@@ -1034,10 +1089,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1034 1089
1035 tmp_mb = pvt->info.rir_limit(reg) >> 20; 1090 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1036 rir_way = 1 << RIR_WAY(reg); 1091 rir_way = 1 << RIR_WAY(reg);
1037 mb = div_u64_rem(tmp_mb, 1000, &kb); 1092 gb = div_u64_rem(tmp_mb, 1024, &mb);
1038 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", 1093 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1039 i, j, 1094 i, j,
1040 mb, kb, 1095 gb, (mb*1000)/1024,
1041 ((u64)tmp_mb) << 20L, 1096 ((u64)tmp_mb) << 20L,
1042 rir_way, 1097 rir_way,
1043 reg); 1098 reg);
@@ -1048,10 +1103,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
1048 &reg); 1103 &reg);
1049 tmp_mb = RIR_OFFSET(reg) << 6; 1104 tmp_mb = RIR_OFFSET(reg) << 6;
1050 1105
1051 mb = div_u64_rem(tmp_mb, 1000, &kb); 1106 gb = div_u64_rem(tmp_mb, 1024, &mb);
1052 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", 1107 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1053 i, j, k, 1108 i, j, k,
1054 mb, kb, 1109 gb, (mb*1000)/1024,
1055 ((u64)tmp_mb) << 20L, 1110 ((u64)tmp_mb) << 20L,
1056 (u32)RIR_RNK_TGT(reg), 1111 (u32)RIR_RNK_TGT(reg),
1057 reg); 1112 reg);
@@ -1089,7 +1144,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1089 u8 ch_way, sck_way, pkg, sad_ha = 0; 1144 u8 ch_way, sck_way, pkg, sad_ha = 0;
1090 u32 tad_offset; 1145 u32 tad_offset;
1091 u32 rir_way; 1146 u32 rir_way;
1092 u32 mb, kb; 1147 u32 mb, gb;
1093 u64 ch_addr, offset, limit = 0, prv = 0; 1148 u64 ch_addr, offset, limit = 0, prv = 0;
1094 1149
1095 1150
@@ -1179,7 +1234,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1179 *socket = sad_interleave[idx]; 1234 *socket = sad_interleave[idx];
1180 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", 1235 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1181 idx, sad_way, *socket); 1236 idx, sad_way, *socket);
1182 } else if (pvt->info.type == HASWELL) { 1237 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1183 int bits, a7mode = A7MODE(dram_rule); 1238 int bits, a7mode = A7MODE(dram_rule);
1184 1239
1185 if (a7mode) { 1240 if (a7mode) {
@@ -1358,10 +1413,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1358 continue; 1413 continue;
1359 1414
1360 limit = pvt->info.rir_limit(reg); 1415 limit = pvt->info.rir_limit(reg);
1361 mb = div_u64_rem(limit >> 20, 1000, &kb); 1416 gb = div_u64_rem(limit >> 20, 1024, &mb);
1362 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", 1417 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1363 n_rir, 1418 n_rir,
1364 mb, kb, 1419 gb, (mb*1000)/1024,
1365 limit, 1420 limit,
1366 1 << RIR_WAY(reg)); 1421 1 << RIR_WAY(reg));
1367 if (ch_addr <= limit) 1422 if (ch_addr <= limit)
@@ -1828,6 +1883,82 @@ enodev:
1828 return -ENODEV; 1883 return -ENODEV;
1829} 1884}
1830 1885
1886static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
1887 struct sbridge_dev *sbridge_dev)
1888{
1889 struct sbridge_pvt *pvt = mci->pvt_info;
1890 struct pci_dev *pdev;
1891 int i;
1892
1893 /* there's only one device per system; not tied to any bus */
1894 if (pvt->info.pci_vtd == NULL)
1895 /* result will be checked later */
1896 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
1897 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
1898 NULL);
1899
1900 for (i = 0; i < sbridge_dev->n_devs; i++) {
1901 pdev = sbridge_dev->pdev[i];
1902 if (!pdev)
1903 continue;
1904
1905 switch (pdev->device) {
1906 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
1907 pvt->pci_sad0 = pdev;
1908 break;
1909 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
1910 pvt->pci_sad1 = pdev;
1911 break;
1912 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
1913 pvt->pci_ha0 = pdev;
1914 break;
1915 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
1916 pvt->pci_ta = pdev;
1917 break;
1918 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
1919 pvt->pci_ras = pdev;
1920 break;
1921 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
1922 pvt->pci_tad[0] = pdev;
1923 break;
1924 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
1925 pvt->pci_tad[1] = pdev;
1926 break;
1927 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
1928 pvt->pci_tad[2] = pdev;
1929 break;
1930 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
1931 pvt->pci_tad[3] = pdev;
1932 break;
1933 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
1934 pvt->pci_ddrio = pdev;
1935 break;
1936 default:
1937 break;
1938 }
1939
1940 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1941 sbridge_dev->bus,
1942 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1943 pdev);
1944 }
1945
1946 /* Check if everything were registered */
1947 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
1948 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
1949 goto enodev;
1950
1951 for (i = 0; i < NUM_CHANNELS; i++) {
1952 if (!pvt->pci_tad[i])
1953 goto enodev;
1954 }
1955 return 0;
1956
1957enodev:
1958 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1959 return -ENODEV;
1960}
1961
1831/**************************************************************************** 1962/****************************************************************************
1832 Error check routines 1963 Error check routines
1833 ****************************************************************************/ 1964 ****************************************************************************/
@@ -2240,6 +2371,25 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2240 if (unlikely(rc < 0)) 2371 if (unlikely(rc < 0))
2241 goto fail0; 2372 goto fail0;
2242 break; 2373 break;
2374 case BROADWELL:
2375 /* rankcfgr isn't used */
2376 pvt->info.get_tolm = haswell_get_tolm;
2377 pvt->info.get_tohm = haswell_get_tohm;
2378 pvt->info.dram_rule = ibridge_dram_rule;
2379 pvt->info.get_memory_type = haswell_get_memory_type;
2380 pvt->info.get_node_id = haswell_get_node_id;
2381 pvt->info.rir_limit = haswell_rir_limit;
2382 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2383 pvt->info.interleave_list = ibridge_interleave_list;
2384 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2385 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2386 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
2387
2388 /* Store pci devices at mci for faster access */
2389 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
2390 if (unlikely(rc < 0))
2391 goto fail0;
2392 break;
2243 } 2393 }
2244 2394
2245 /* Get dimm basic config and the memory layout */ 2395 /* Get dimm basic config and the memory layout */
@@ -2305,6 +2455,10 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2305 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table); 2455 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
2306 type = HASWELL; 2456 type = HASWELL;
2307 break; 2457 break;
2458 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2459 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_broadwell_table);
2460 type = BROADWELL;
2461 break;
2308 } 2462 }
2309 if (unlikely(rc < 0)) 2463 if (unlikely(rc < 0))
2310 goto fail0; 2464 goto fail0;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index e644b52c287c..7c5cdc62f31c 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -500,8 +500,7 @@ fail1:
500 pci_unregister_driver(&x38_driver); 500 pci_unregister_driver(&x38_driver);
501 501
502fail0: 502fail0:
503 if (mci_pdev) 503 pci_dev_put(mci_pdev);
504 pci_dev_put(mci_pdev);
505 504
506 return pci_rc; 505 return pci_rc;
507} 506}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 17afc51f3054..c5f7b4e9eb6c 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -93,6 +93,12 @@ static void dmi_table(u8 *buf, int len, int num,
93 const struct dmi_header *dm = (const struct dmi_header *)data; 93 const struct dmi_header *dm = (const struct dmi_header *)data;
94 94
95 /* 95 /*
96 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
97 */
98 if (dm->type == DMI_ENTRY_END_OF_TABLE)
99 break;
100
101 /*
96 * We want to know the total length (formatted area and 102 * We want to know the total length (formatted area and
97 * strings) before decoding to make sure we won't run off the 103 * strings) before decoding to make sure we won't run off the
98 * table in dmi_decode or dmi_string 104 * table in dmi_decode or dmi_string
@@ -107,7 +113,7 @@ static void dmi_table(u8 *buf, int len, int num,
107 } 113 }
108} 114}
109 115
110static u32 dmi_base; 116static phys_addr_t dmi_base;
111static u16 dmi_len; 117static u16 dmi_len;
112static u16 dmi_num; 118static u16 dmi_num;
113 119
@@ -467,7 +473,7 @@ static int __init dmi_present(const u8 *buf)
467 473
468 if (memcmp(buf, "_SM_", 4) == 0 && 474 if (memcmp(buf, "_SM_", 4) == 0 &&
469 buf[5] < 32 && dmi_checksum(buf, buf[5])) { 475 buf[5] < 32 && dmi_checksum(buf, buf[5])) {
470 smbios_ver = (buf[6] << 8) + buf[7]; 476 smbios_ver = get_unaligned_be16(buf + 6);
471 477
472 /* Some BIOS report weird SMBIOS version, fix that up */ 478 /* Some BIOS report weird SMBIOS version, fix that up */
473 switch (smbios_ver) { 479 switch (smbios_ver) {
@@ -489,10 +495,9 @@ static int __init dmi_present(const u8 *buf)
489 buf += 16; 495 buf += 16;
490 496
491 if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { 497 if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
492 dmi_num = (buf[13] << 8) | buf[12]; 498 dmi_num = get_unaligned_le16(buf + 12);
493 dmi_len = (buf[7] << 8) | buf[6]; 499 dmi_len = get_unaligned_le16(buf + 6);
494 dmi_base = (buf[11] << 24) | (buf[10] << 16) | 500 dmi_base = get_unaligned_le32(buf + 8);
495 (buf[9] << 8) | buf[8];
496 501
497 if (dmi_walk_early(dmi_decode) == 0) { 502 if (dmi_walk_early(dmi_decode) == 0) {
498 if (smbios_ver) { 503 if (smbios_ver) {
@@ -514,12 +519,72 @@ static int __init dmi_present(const u8 *buf)
514 return 1; 519 return 1;
515} 520}
516 521
522/*
523 * Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
524 * 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
525 */
526static int __init dmi_smbios3_present(const u8 *buf)
527{
528 if (memcmp(buf, "_SM3_", 5) == 0 &&
529 buf[6] < 32 && dmi_checksum(buf, buf[6])) {
530 dmi_ver = get_unaligned_be16(buf + 7);
531 dmi_len = get_unaligned_le32(buf + 12);
532 dmi_base = get_unaligned_le64(buf + 16);
533
534 /*
535 * The 64-bit SMBIOS 3.0 entry point no longer has a field
536 * containing the number of structures present in the table.
537 * Instead, it defines the table size as a maximum size, and
538 * relies on the end-of-table structure type (#127) to be used
539 * to signal the end of the table.
540 * So let's define dmi_num as an upper bound as well: each
541 * structure has a 4 byte header, so dmi_len / 4 is an upper
542 * bound for the number of structures in the table.
543 */
544 dmi_num = dmi_len / 4;
545
546 if (dmi_walk_early(dmi_decode) == 0) {
547 pr_info("SMBIOS %d.%d present.\n",
548 dmi_ver >> 8, dmi_ver & 0xFF);
549 dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
550 pr_debug("DMI: %s\n", dmi_ids_string);
551 return 0;
552 }
553 }
554 return 1;
555}
556
517void __init dmi_scan_machine(void) 557void __init dmi_scan_machine(void)
518{ 558{
519 char __iomem *p, *q; 559 char __iomem *p, *q;
520 char buf[32]; 560 char buf[32];
521 561
522 if (efi_enabled(EFI_CONFIG_TABLES)) { 562 if (efi_enabled(EFI_CONFIG_TABLES)) {
563 /*
564 * According to the DMTF SMBIOS reference spec v3.0.0, it is
565 * allowed to define both the 64-bit entry point (smbios3) and
566 * the 32-bit entry point (smbios), in which case they should
567 * either both point to the same SMBIOS structure table, or the
568 * table pointed to by the 64-bit entry point should contain a
569 * superset of the table contents pointed to by the 32-bit entry
570 * point (section 5.2)
571 * This implies that the 64-bit entry point should have
572 * precedence if it is defined and supported by the OS. If we
573 * have the 64-bit entry point, but fail to decode it, fall
574 * back to the legacy one (if available)
575 */
576 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
577 p = dmi_early_remap(efi.smbios3, 32);
578 if (p == NULL)
579 goto error;
580 memcpy_fromio(buf, p, 32);
581 dmi_early_unmap(p, 32);
582
583 if (!dmi_smbios3_present(buf)) {
584 dmi_available = 1;
585 goto out;
586 }
587 }
523 if (efi.smbios == EFI_INVALID_TABLE_ADDR) 588 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
524 goto error; 589 goto error;
525 590
@@ -552,7 +617,7 @@ void __init dmi_scan_machine(void)
552 memset(buf, 0, 16); 617 memset(buf, 0, 16);
553 for (q = p; q < p + 0x10000; q += 16) { 618 for (q = p; q < p + 0x10000; q += 16) {
554 memcpy_fromio(buf + 16, q, 16); 619 memcpy_fromio(buf + 16, q, 16);
555 if (!dmi_present(buf)) { 620 if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
556 dmi_available = 1; 621 dmi_available = 1;
557 dmi_early_unmap(p, 0x10000); 622 dmi_early_unmap(p, 0x10000);
558 goto out; 623 goto out;
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index aef6a95adef5..d8be608a9f3b 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
8obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o 8obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
9obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o 9obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
10obj-$(CONFIG_EFI_ARM_STUB) += libstub/ 10obj-$(CONFIG_EFI_STUB) += libstub/
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 5b53d6183b6b..4fd9961d552e 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -294,7 +294,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
294const char *cper_mem_err_unpack(struct trace_seq *p, 294const char *cper_mem_err_unpack(struct trace_seq *p,
295 struct cper_mem_err_compact *cmem) 295 struct cper_mem_err_compact *cmem)
296{ 296{
297 const char *ret = p->buffer + p->len; 297 const char *ret = trace_seq_buffer_ptr(p);
298 298
299 if (cper_mem_err_location(cmem, rcd_decode_str)) 299 if (cper_mem_err_location(cmem, rcd_decode_str))
300 trace_seq_printf(p, "%s", rcd_decode_str); 300 trace_seq_printf(p, "%s", rcd_decode_str);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8590099ac148..9035c1b74d58 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -30,6 +30,7 @@ struct efi __read_mostly efi = {
30 .acpi = EFI_INVALID_TABLE_ADDR, 30 .acpi = EFI_INVALID_TABLE_ADDR,
31 .acpi20 = EFI_INVALID_TABLE_ADDR, 31 .acpi20 = EFI_INVALID_TABLE_ADDR,
32 .smbios = EFI_INVALID_TABLE_ADDR, 32 .smbios = EFI_INVALID_TABLE_ADDR,
33 .smbios3 = EFI_INVALID_TABLE_ADDR,
33 .sal_systab = EFI_INVALID_TABLE_ADDR, 34 .sal_systab = EFI_INVALID_TABLE_ADDR,
34 .boot_info = EFI_INVALID_TABLE_ADDR, 35 .boot_info = EFI_INVALID_TABLE_ADDR,
35 .hcdp = EFI_INVALID_TABLE_ADDR, 36 .hcdp = EFI_INVALID_TABLE_ADDR,
@@ -86,6 +87,8 @@ static ssize_t systab_show(struct kobject *kobj,
86 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 87 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
87 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 88 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
88 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 89 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
90 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
91 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
89 if (efi.hcdp != EFI_INVALID_TABLE_ADDR) 92 if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
90 str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); 93 str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
91 if (efi.boot_info != EFI_INVALID_TABLE_ADDR) 94 if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
@@ -260,6 +263,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
260 {MPS_TABLE_GUID, "MPS", &efi.mps}, 263 {MPS_TABLE_GUID, "MPS", &efi.mps},
261 {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab}, 264 {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
262 {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, 265 {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
266 {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
263 {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, 267 {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
264 {NULL_GUID, NULL, NULL}, 268 {NULL_GUID, NULL, NULL},
265}; 269};
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 75ee05964cbc..eb48a1a1a576 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -247,9 +247,18 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table,
247 goto fail_free_cmdline; 247 goto fail_free_cmdline;
248 } 248 }
249 } 249 }
250 if (!fdt_addr) 250
251 if (fdt_addr) {
252 pr_efi(sys_table, "Using DTB from command line\n");
253 } else {
251 /* Look for a device tree configuration table entry. */ 254 /* Look for a device tree configuration table entry. */
252 fdt_addr = (uintptr_t)get_fdt(sys_table); 255 fdt_addr = (uintptr_t)get_fdt(sys_table);
256 if (fdt_addr)
257 pr_efi(sys_table, "Using DTB from configuration table\n");
258 }
259
260 if (!fdt_addr)
261 pr_efi(sys_table, "Generating empty DTB\n");
253 262
254 status = handle_cmdline_files(sys_table, image, cmdline_ptr, 263 status = handle_cmdline_files(sys_table, image, cmdline_ptr,
255 "initrd=", dram_base + SZ_512M, 264 "initrd=", dram_base + SZ_512M,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0959ca9b6b27..23dfd5f59b39 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -905,4 +905,16 @@ config GPIO_VIPERBOARD
905 River Tech's viperboard.h for detailed meaning 905 River Tech's viperboard.h for detailed meaning
906 of the module parameters. 906 of the module parameters.
907 907
908config GPIO_DLN2
909 tristate "Diolan DLN2 GPIO support"
910 depends on MFD_DLN2
911 select GPIOLIB_IRQCHIP
912
913 help
914 Select this option to enable GPIO driver for the Diolan DLN2
915 board.
916
917 This driver can also be built as a module. If so, the module
918 will be called gpio-dln2.
919
908endif 920endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e5d346cf3b6e..e60677b8ccb4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o
26obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o 26obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
27obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o 27obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
28obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o 28obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
29obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o
29obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o 30obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
30obj-$(CONFIG_GPIO_EM) += gpio-em.o 31obj-$(CONFIG_GPIO_EM) += gpio-em.o
31obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 32obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 954b9f6b0ef8..13dbd3dfc33a 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -109,6 +109,38 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
109EXPORT_SYMBOL(__devm_gpiod_get_index); 109EXPORT_SYMBOL(__devm_gpiod_get_index);
110 110
111/** 111/**
112 * devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
113 * @dev: GPIO consumer
114 * @child: firmware node (child of @dev)
115 *
116 * GPIO descriptors returned from this function are automatically disposed on
117 * driver detach.
118 */
119struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
120 struct fwnode_handle *child)
121{
122 struct gpio_desc **dr;
123 struct gpio_desc *desc;
124
125 dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
126 GFP_KERNEL);
127 if (!dr)
128 return ERR_PTR(-ENOMEM);
129
130 desc = fwnode_get_named_gpiod(child, "gpios");
131 if (IS_ERR(desc)) {
132 devres_free(dr);
133 return desc;
134 }
135
136 *dr = desc;
137 devres_add(dev, dr);
138
139 return desc;
140}
141EXPORT_SYMBOL(devm_get_gpiod_from_child);
142
143/**
112 * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional() 144 * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
113 * @dev: GPIO consumer 145 * @dev: GPIO consumer
114 * @con_id: function within the GPIO consumer 146 * @con_id: function within the GPIO consumer
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
new file mode 100644
index 000000000000..978b51eae2ec
--- /dev/null
+++ b/drivers/gpio/gpio-dln2.c
@@ -0,0 +1,553 @@
1/*
2 * Driver for the Diolan DLN-2 USB-GPIO adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/irqdomain.h>
16#include <linux/irq.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/gpio.h>
19#include <linux/gpio/driver.h>
20#include <linux/platform_device.h>
21#include <linux/mfd/dln2.h>
22
23#define DLN2_GPIO_ID 0x01
24
25#define DLN2_GPIO_GET_PIN_COUNT DLN2_CMD(0x01, DLN2_GPIO_ID)
26#define DLN2_GPIO_SET_DEBOUNCE DLN2_CMD(0x04, DLN2_GPIO_ID)
27#define DLN2_GPIO_GET_DEBOUNCE DLN2_CMD(0x05, DLN2_GPIO_ID)
28#define DLN2_GPIO_PORT_GET_VAL DLN2_CMD(0x06, DLN2_GPIO_ID)
29#define DLN2_GPIO_PIN_GET_VAL DLN2_CMD(0x0B, DLN2_GPIO_ID)
30#define DLN2_GPIO_PIN_SET_OUT_VAL DLN2_CMD(0x0C, DLN2_GPIO_ID)
31#define DLN2_GPIO_PIN_GET_OUT_VAL DLN2_CMD(0x0D, DLN2_GPIO_ID)
32#define DLN2_GPIO_CONDITION_MET_EV DLN2_CMD(0x0F, DLN2_GPIO_ID)
33#define DLN2_GPIO_PIN_ENABLE DLN2_CMD(0x10, DLN2_GPIO_ID)
34#define DLN2_GPIO_PIN_DISABLE DLN2_CMD(0x11, DLN2_GPIO_ID)
35#define DLN2_GPIO_PIN_SET_DIRECTION DLN2_CMD(0x13, DLN2_GPIO_ID)
36#define DLN2_GPIO_PIN_GET_DIRECTION DLN2_CMD(0x14, DLN2_GPIO_ID)
37#define DLN2_GPIO_PIN_SET_EVENT_CFG DLN2_CMD(0x1E, DLN2_GPIO_ID)
38#define DLN2_GPIO_PIN_GET_EVENT_CFG DLN2_CMD(0x1F, DLN2_GPIO_ID)
39
40#define DLN2_GPIO_EVENT_NONE 0
41#define DLN2_GPIO_EVENT_CHANGE 1
42#define DLN2_GPIO_EVENT_LVL_HIGH 2
43#define DLN2_GPIO_EVENT_LVL_LOW 3
44#define DLN2_GPIO_EVENT_CHANGE_RISING 0x11
45#define DLN2_GPIO_EVENT_CHANGE_FALLING 0x21
46#define DLN2_GPIO_EVENT_MASK 0x0F
47
48#define DLN2_GPIO_MAX_PINS 32
49
50struct dln2_irq_work {
51 struct work_struct work;
52 struct dln2_gpio *dln2;
53 int pin;
54 int type;
55};
56
57struct dln2_gpio {
58 struct platform_device *pdev;
59 struct gpio_chip gpio;
60
61 /*
62 * Cache pin direction to save us one transfer, since the hardware has
63 * separate commands to read the in and out values.
64 */
65 DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
66
67 DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
68 DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
69 DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
70 struct dln2_irq_work *irq_work;
71};
72
73struct dln2_gpio_pin {
74 __le16 pin;
75};
76
77struct dln2_gpio_pin_val {
78 __le16 pin __packed;
79 u8 value;
80};
81
82static int dln2_gpio_get_pin_count(struct platform_device *pdev)
83{
84 int ret;
85 __le16 count;
86 int len = sizeof(count);
87
88 ret = dln2_transfer_rx(pdev, DLN2_GPIO_GET_PIN_COUNT, &count, &len);
89 if (ret < 0)
90 return ret;
91 if (len < sizeof(count))
92 return -EPROTO;
93
94 return le16_to_cpu(count);
95}
96
97static int dln2_gpio_pin_cmd(struct dln2_gpio *dln2, int cmd, unsigned pin)
98{
99 struct dln2_gpio_pin req = {
100 .pin = cpu_to_le16(pin),
101 };
102
103 return dln2_transfer_tx(dln2->pdev, cmd, &req, sizeof(req));
104}
105
106static int dln2_gpio_pin_val(struct dln2_gpio *dln2, int cmd, unsigned int pin)
107{
108 int ret;
109 struct dln2_gpio_pin req = {
110 .pin = cpu_to_le16(pin),
111 };
112 struct dln2_gpio_pin_val rsp;
113 int len = sizeof(rsp);
114
115 ret = dln2_transfer(dln2->pdev, cmd, &req, sizeof(req), &rsp, &len);
116 if (ret < 0)
117 return ret;
118 if (len < sizeof(rsp) || req.pin != rsp.pin)
119 return -EPROTO;
120
121 return rsp.value;
122}
123
124static int dln2_gpio_pin_get_in_val(struct dln2_gpio *dln2, unsigned int pin)
125{
126 int ret;
127
128 ret = dln2_gpio_pin_val(dln2, DLN2_GPIO_PIN_GET_VAL, pin);
129 if (ret < 0)
130 return ret;
131 return !!ret;
132}
133
134static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
135{
136 int ret;
137
138 ret = dln2_gpio_pin_val(dln2, DLN2_GPIO_PIN_GET_OUT_VAL, pin);
139 if (ret < 0)
140 return ret;
141 return !!ret;
142}
143
144static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
145 unsigned int pin, int value)
146{
147 struct dln2_gpio_pin_val req = {
148 .pin = cpu_to_le16(pin),
149 .value = value,
150 };
151
152 dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
153 sizeof(req));
154}
155
156#define DLN2_GPIO_DIRECTION_IN 0
157#define DLN2_GPIO_DIRECTION_OUT 1
158
159static int dln2_gpio_request(struct gpio_chip *chip, unsigned offset)
160{
161 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
162 struct dln2_gpio_pin req = {
163 .pin = cpu_to_le16(offset),
164 };
165 struct dln2_gpio_pin_val rsp;
166 int len = sizeof(rsp);
167 int ret;
168
169 ret = dln2_gpio_pin_cmd(dln2, DLN2_GPIO_PIN_ENABLE, offset);
170 if (ret < 0)
171 return ret;
172
173 /* cache the pin direction */
174 ret = dln2_transfer(dln2->pdev, DLN2_GPIO_PIN_GET_DIRECTION,
175 &req, sizeof(req), &rsp, &len);
176 if (ret < 0)
177 return ret;
178 if (len < sizeof(rsp) || req.pin != rsp.pin) {
179 ret = -EPROTO;
180 goto out_disable;
181 }
182
183 switch (rsp.value) {
184 case DLN2_GPIO_DIRECTION_IN:
185 clear_bit(offset, dln2->output_enabled);
186 return 0;
187 case DLN2_GPIO_DIRECTION_OUT:
188 set_bit(offset, dln2->output_enabled);
189 return 0;
190 default:
191 ret = -EPROTO;
192 goto out_disable;
193 }
194
195out_disable:
196 dln2_gpio_pin_cmd(dln2, DLN2_GPIO_PIN_DISABLE, offset);
197 return ret;
198}
199
200static void dln2_gpio_free(struct gpio_chip *chip, unsigned offset)
201{
202 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
203
204 dln2_gpio_pin_cmd(dln2, DLN2_GPIO_PIN_DISABLE, offset);
205}
206
207static int dln2_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
208{
209 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
210
211 if (test_bit(offset, dln2->output_enabled))
212 return GPIOF_DIR_OUT;
213
214 return GPIOF_DIR_IN;
215}
216
217static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
218{
219 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
220 int dir;
221
222 dir = dln2_gpio_get_direction(chip, offset);
223 if (dir < 0)
224 return dir;
225
226 if (dir == GPIOF_DIR_IN)
227 return dln2_gpio_pin_get_in_val(dln2, offset);
228
229 return dln2_gpio_pin_get_out_val(dln2, offset);
230}
231
232static void dln2_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
233{
234 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
235
236 dln2_gpio_pin_set_out_val(dln2, offset, value);
237}
238
239static int dln2_gpio_set_direction(struct gpio_chip *chip, unsigned offset,
240 unsigned dir)
241{
242 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
243 struct dln2_gpio_pin_val req = {
244 .pin = cpu_to_le16(offset),
245 .value = dir,
246 };
247 int ret;
248
249 ret = dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_DIRECTION,
250 &req, sizeof(req));
251 if (ret < 0)
252 return ret;
253
254 if (dir == DLN2_GPIO_DIRECTION_OUT)
255 set_bit(offset, dln2->output_enabled);
256 else
257 clear_bit(offset, dln2->output_enabled);
258
259 return ret;
260}
261
262static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
263{
264 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_IN);
265}
266
267static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
268 int value)
269{
270 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
271}
272
273static int dln2_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
274 unsigned debounce)
275{
276 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
277 __le32 duration = cpu_to_le32(debounce);
278
279 return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_SET_DEBOUNCE,
280 &duration, sizeof(duration));
281}
282
283static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
284 unsigned type, unsigned period)
285{
286 struct {
287 __le16 pin;
288 u8 type;
289 __le16 period;
290 } __packed req = {
291 .pin = cpu_to_le16(pin),
292 .type = type,
293 .period = cpu_to_le16(period),
294 };
295
296 return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_EVENT_CFG,
297 &req, sizeof(req));
298}
299
300static void dln2_irq_work(struct work_struct *w)
301{
302 struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
303 struct dln2_gpio *dln2 = iw->dln2;
304 u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
305
306 if (test_bit(iw->pin, dln2->irqs_enabled))
307 dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
308 else
309 dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
310}
311
312static void dln2_irq_enable(struct irq_data *irqd)
313{
314 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
315 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
316 int pin = irqd_to_hwirq(irqd);
317
318 set_bit(pin, dln2->irqs_enabled);
319 schedule_work(&dln2->irq_work[pin].work);
320}
321
322static void dln2_irq_disable(struct irq_data *irqd)
323{
324 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
325 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
326 int pin = irqd_to_hwirq(irqd);
327
328 clear_bit(pin, dln2->irqs_enabled);
329 schedule_work(&dln2->irq_work[pin].work);
330}
331
332static void dln2_irq_mask(struct irq_data *irqd)
333{
334 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
335 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
336 int pin = irqd_to_hwirq(irqd);
337
338 set_bit(pin, dln2->irqs_masked);
339}
340
341static void dln2_irq_unmask(struct irq_data *irqd)
342{
343 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
344 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
345 struct device *dev = dln2->gpio.dev;
346 int pin = irqd_to_hwirq(irqd);
347
348 if (test_and_clear_bit(pin, dln2->irqs_pending)) {
349 int irq;
350
351 irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
352 if (!irq) {
353 dev_err(dev, "pin %d not mapped to IRQ\n", pin);
354 return;
355 }
356
357 generic_handle_irq(irq);
358 }
359}
360
361static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
362{
363 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
364 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
365 int pin = irqd_to_hwirq(irqd);
366
367 switch (type) {
368 case IRQ_TYPE_LEVEL_HIGH:
369 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
370 break;
371 case IRQ_TYPE_LEVEL_LOW:
372 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
373 break;
374 case IRQ_TYPE_EDGE_BOTH:
375 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
376 break;
377 case IRQ_TYPE_EDGE_RISING:
378 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
379 break;
380 case IRQ_TYPE_EDGE_FALLING:
381 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 return 0;
388}
389
390static struct irq_chip dln2_gpio_irqchip = {
391 .name = "dln2-irq",
392 .irq_enable = dln2_irq_enable,
393 .irq_disable = dln2_irq_disable,
394 .irq_mask = dln2_irq_mask,
395 .irq_unmask = dln2_irq_unmask,
396 .irq_set_type = dln2_irq_set_type,
397};
398
399static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
400 const void *data, int len)
401{
402 int pin, irq;
403 const struct {
404 __le16 count;
405 __u8 type;
406 __le16 pin;
407 __u8 value;
408 } __packed *event = data;
409 struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
410
411 if (len < sizeof(*event)) {
412 dev_err(dln2->gpio.dev, "short event message\n");
413 return;
414 }
415
416 pin = le16_to_cpu(event->pin);
417 if (pin >= dln2->gpio.ngpio) {
418 dev_err(dln2->gpio.dev, "out of bounds pin %d\n", pin);
419 return;
420 }
421
422 irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
423 if (!irq) {
424 dev_err(dln2->gpio.dev, "pin %d not mapped to IRQ\n", pin);
425 return;
426 }
427
428 if (!test_bit(pin, dln2->irqs_enabled))
429 return;
430 if (test_bit(pin, dln2->irqs_masked)) {
431 set_bit(pin, dln2->irqs_pending);
432 return;
433 }
434
435 switch (dln2->irq_work[pin].type) {
436 case DLN2_GPIO_EVENT_CHANGE_RISING:
437 if (event->value)
438 generic_handle_irq(irq);
439 break;
440 case DLN2_GPIO_EVENT_CHANGE_FALLING:
441 if (!event->value)
442 generic_handle_irq(irq);
443 break;
444 default:
445 generic_handle_irq(irq);
446 }
447}
448
449static int dln2_gpio_probe(struct platform_device *pdev)
450{
451 struct dln2_gpio *dln2;
452 struct device *dev = &pdev->dev;
453 int pins;
454 int i, ret;
455
456 pins = dln2_gpio_get_pin_count(pdev);
457 if (pins < 0) {
458 dev_err(dev, "failed to get pin count: %d\n", pins);
459 return pins;
460 }
461 if (pins > DLN2_GPIO_MAX_PINS) {
462 pins = DLN2_GPIO_MAX_PINS;
463 dev_warn(dev, "clamping pins to %d\n", DLN2_GPIO_MAX_PINS);
464 }
465
466 dln2 = devm_kzalloc(&pdev->dev, sizeof(*dln2), GFP_KERNEL);
467 if (!dln2)
468 return -ENOMEM;
469
470 dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
471 sizeof(struct dln2_irq_work), GFP_KERNEL);
472 if (!dln2->irq_work)
473 return -ENOMEM;
474 for (i = 0; i < pins; i++) {
475 INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
476 dln2->irq_work[i].pin = i;
477 dln2->irq_work[i].dln2 = dln2;
478 }
479
480 dln2->pdev = pdev;
481
482 dln2->gpio.label = "dln2";
483 dln2->gpio.dev = dev;
484 dln2->gpio.owner = THIS_MODULE;
485 dln2->gpio.base = -1;
486 dln2->gpio.ngpio = pins;
487 dln2->gpio.exported = true;
488 dln2->gpio.can_sleep = true;
489 dln2->gpio.irq_not_threaded = true;
490 dln2->gpio.set = dln2_gpio_set;
491 dln2->gpio.get = dln2_gpio_get;
492 dln2->gpio.request = dln2_gpio_request;
493 dln2->gpio.free = dln2_gpio_free;
494 dln2->gpio.get_direction = dln2_gpio_get_direction;
495 dln2->gpio.direction_input = dln2_gpio_direction_input;
496 dln2->gpio.direction_output = dln2_gpio_direction_output;
497 dln2->gpio.set_debounce = dln2_gpio_set_debounce;
498
499 platform_set_drvdata(pdev, dln2);
500
501 ret = gpiochip_add(&dln2->gpio);
502 if (ret < 0) {
503 dev_err(dev, "failed to add gpio chip: %d\n", ret);
504 goto out;
505 }
506
507 ret = gpiochip_irqchip_add(&dln2->gpio, &dln2_gpio_irqchip, 0,
508 handle_simple_irq, IRQ_TYPE_NONE);
509 if (ret < 0) {
510 dev_err(dev, "failed to add irq chip: %d\n", ret);
511 goto out_gpiochip_remove;
512 }
513
514 ret = dln2_register_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV,
515 dln2_gpio_event);
516 if (ret) {
517 dev_err(dev, "failed to register event cb: %d\n", ret);
518 goto out_gpiochip_remove;
519 }
520
521 return 0;
522
523out_gpiochip_remove:
524 gpiochip_remove(&dln2->gpio);
525out:
526 return ret;
527}
528
529static int dln2_gpio_remove(struct platform_device *pdev)
530{
531 struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
532 int i;
533
534 dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
535 for (i = 0; i < dln2->gpio.ngpio; i++)
536 flush_work(&dln2->irq_work[i].work);
537 gpiochip_remove(&dln2->gpio);
538
539 return 0;
540}
541
542static struct platform_driver dln2_gpio_driver = {
543 .driver.name = "dln2-gpio",
544 .probe = dln2_gpio_probe,
545 .remove = dln2_gpio_remove,
546};
547
548module_platform_driver(dln2_gpio_driver);
549
550MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
551MODULE_DESCRIPTION("Driver for the Diolan DLN2 GPIO interface");
552MODULE_LICENSE("GPL v2");
553MODULE_ALIAS("platform:dln2-gpio");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 415682f69214..3d6b445665ad 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1259,7 +1259,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1259 1259
1260#ifdef CONFIG_ARCH_OMAP2PLUS 1260#ifdef CONFIG_ARCH_OMAP2PLUS
1261 1261
1262#if defined(CONFIG_PM_RUNTIME) 1262#if defined(CONFIG_PM)
1263static void omap_gpio_restore_context(struct gpio_bank *bank); 1263static void omap_gpio_restore_context(struct gpio_bank *bank);
1264 1264
1265static int omap_gpio_runtime_suspend(struct device *dev) 1265static int omap_gpio_runtime_suspend(struct device *dev)
@@ -1440,7 +1440,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
1440 1440
1441 return 0; 1441 return 0;
1442} 1442}
1443#endif /* CONFIG_PM_RUNTIME */ 1443#endif /* CONFIG_PM */
1444 1444
1445void omap2_gpio_prepare_for_idle(int pwr_mode) 1445void omap2_gpio_prepare_for_idle(int pwr_mode)
1446{ 1446{
@@ -1468,7 +1468,7 @@ void omap2_gpio_resume_after_idle(void)
1468 } 1468 }
1469} 1469}
1470 1470
1471#if defined(CONFIG_PM_RUNTIME) 1471#if defined(CONFIG_PM)
1472static void omap_gpio_init_context(struct gpio_bank *p) 1472static void omap_gpio_init_context(struct gpio_bank *p)
1473{ 1473{
1474 struct omap_gpio_reg_offs *regs = p->regs; 1474 struct omap_gpio_reg_offs *regs = p->regs;
@@ -1525,7 +1525,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
1525 writel_relaxed(bank->context.irqenable2, 1525 writel_relaxed(bank->context.irqenable2,
1526 bank->base + bank->regs->irqenable2); 1526 bank->base + bank->regs->irqenable2);
1527} 1527}
1528#endif /* CONFIG_PM_RUNTIME */ 1528#endif /* CONFIG_PM */
1529#else 1529#else
1530#define omap_gpio_runtime_suspend NULL 1530#define omap_gpio_runtime_suspend NULL
1531#define omap_gpio_runtime_resume NULL 1531#define omap_gpio_runtime_resume NULL
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 41e91d70301e..99720c8bc8ed 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -29,290 +29,221 @@
29 29
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31 31
32static DEFINE_SPINLOCK(gpio_lock); 32#define GEN 0x00
33 33#define GIO 0x04
34#define CGEN (0x00) 34#define GLV 0x08
35#define CGIO (0x04) 35
36#define CGLV (0x08) 36struct sch_gpio {
37 37 struct gpio_chip chip;
38#define RGEN (0x20) 38 spinlock_t lock;
39#define RGIO (0x24) 39 unsigned short iobase;
40#define RGLV (0x28) 40 unsigned short core_base;
41 41 unsigned short resume_base;
42static unsigned short gpio_ba; 42};
43
44static int sch_gpio_core_direction_in(struct gpio_chip *gc, unsigned gpio_num)
45{
46 u8 curr_dirs;
47 unsigned short offset, bit;
48
49 spin_lock(&gpio_lock);
50
51 offset = CGIO + gpio_num / 8;
52 bit = gpio_num % 8;
53
54 curr_dirs = inb(gpio_ba + offset);
55
56 if (!(curr_dirs & (1 << bit)))
57 outb(curr_dirs | (1 << bit), gpio_ba + offset);
58 43
59 spin_unlock(&gpio_lock); 44#define to_sch_gpio(c) container_of(c, struct sch_gpio, chip)
60 return 0;
61}
62 45
63static int sch_gpio_core_get(struct gpio_chip *gc, unsigned gpio_num) 46static unsigned sch_gpio_offset(struct sch_gpio *sch, unsigned gpio,
47 unsigned reg)
64{ 48{
65 int res; 49 unsigned base = 0;
66 unsigned short offset, bit;
67 50
68 offset = CGLV + gpio_num / 8; 51 if (gpio >= sch->resume_base) {
69 bit = gpio_num % 8; 52 gpio -= sch->resume_base;
53 base += 0x20;
54 }
70 55
71 res = !!(inb(gpio_ba + offset) & (1 << bit)); 56 return base + reg + gpio / 8;
72 return res;
73} 57}
74 58
75static void sch_gpio_core_set(struct gpio_chip *gc, unsigned gpio_num, int val) 59static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
76{ 60{
77 u8 curr_vals; 61 if (gpio >= sch->resume_base)
78 unsigned short offset, bit; 62 gpio -= sch->resume_base;
79 63 return gpio % 8;
80 spin_lock(&gpio_lock);
81
82 offset = CGLV + gpio_num / 8;
83 bit = gpio_num % 8;
84
85 curr_vals = inb(gpio_ba + offset);
86
87 if (val)
88 outb(curr_vals | (1 << bit), gpio_ba + offset);
89 else
90 outb((curr_vals & ~(1 << bit)), gpio_ba + offset);
91 spin_unlock(&gpio_lock);
92} 64}
93 65
94static int sch_gpio_core_direction_out(struct gpio_chip *gc, 66static void sch_gpio_enable(struct sch_gpio *sch, unsigned gpio)
95 unsigned gpio_num, int val)
96{ 67{
97 u8 curr_dirs;
98 unsigned short offset, bit; 68 unsigned short offset, bit;
69 u8 enable;
99 70
100 spin_lock(&gpio_lock); 71 spin_lock(&sch->lock);
101 72
102 offset = CGIO + gpio_num / 8; 73 offset = sch_gpio_offset(sch, gpio, GEN);
103 bit = gpio_num % 8; 74 bit = sch_gpio_bit(sch, gpio);
104
105 curr_dirs = inb(gpio_ba + offset);
106 if (curr_dirs & (1 << bit))
107 outb(curr_dirs & ~(1 << bit), gpio_ba + offset);
108 75
109 spin_unlock(&gpio_lock); 76 enable = inb(sch->iobase + offset);
77 if (!(enable & (1 << bit)))
78 outb(enable | (1 << bit), sch->iobase + offset);
110 79
111 /* 80 spin_unlock(&sch->lock);
112 * according to the datasheet, writing to the level register has no
113 * effect when GPIO is programmed as input.
114 * Actually the the level register is read-only when configured as input.
115 * Thus presetting the output level before switching to output is _NOT_ possible.
116 * Hence we set the level after configuring the GPIO as output.
117 * But we cannot prevent a short low pulse if direction is set to high
118 * and an external pull-up is connected.
119 */
120 sch_gpio_core_set(gc, gpio_num, val);
121 return 0;
122} 81}
123 82
124static struct gpio_chip sch_gpio_core = { 83static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
125 .label = "sch_gpio_core",
126 .owner = THIS_MODULE,
127 .direction_input = sch_gpio_core_direction_in,
128 .get = sch_gpio_core_get,
129 .direction_output = sch_gpio_core_direction_out,
130 .set = sch_gpio_core_set,
131};
132
133static int sch_gpio_resume_direction_in(struct gpio_chip *gc,
134 unsigned gpio_num)
135{ 84{
85 struct sch_gpio *sch = to_sch_gpio(gc);
136 u8 curr_dirs; 86 u8 curr_dirs;
137 unsigned short offset, bit; 87 unsigned short offset, bit;
138 88
139 spin_lock(&gpio_lock); 89 spin_lock(&sch->lock);
140 90
141 offset = RGIO + gpio_num / 8; 91 offset = sch_gpio_offset(sch, gpio_num, GIO);
142 bit = gpio_num % 8; 92 bit = sch_gpio_bit(sch, gpio_num);
143 93
144 curr_dirs = inb(gpio_ba + offset); 94 curr_dirs = inb(sch->iobase + offset);
145 95
146 if (!(curr_dirs & (1 << bit))) 96 if (!(curr_dirs & (1 << bit)))
147 outb(curr_dirs | (1 << bit), gpio_ba + offset); 97 outb(curr_dirs | (1 << bit), sch->iobase + offset);
148 98
149 spin_unlock(&gpio_lock); 99 spin_unlock(&sch->lock);
150 return 0; 100 return 0;
151} 101}
152 102
153static int sch_gpio_resume_get(struct gpio_chip *gc, unsigned gpio_num) 103static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
154{ 104{
105 struct sch_gpio *sch = to_sch_gpio(gc);
106 int res;
155 unsigned short offset, bit; 107 unsigned short offset, bit;
156 108
157 offset = RGLV + gpio_num / 8; 109 offset = sch_gpio_offset(sch, gpio_num, GLV);
158 bit = gpio_num % 8; 110 bit = sch_gpio_bit(sch, gpio_num);
111
112 res = !!(inb(sch->iobase + offset) & (1 << bit));
159 113
160 return !!(inb(gpio_ba + offset) & (1 << bit)); 114 return res;
161} 115}
162 116
163static void sch_gpio_resume_set(struct gpio_chip *gc, 117static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
164 unsigned gpio_num, int val)
165{ 118{
119 struct sch_gpio *sch = to_sch_gpio(gc);
166 u8 curr_vals; 120 u8 curr_vals;
167 unsigned short offset, bit; 121 unsigned short offset, bit;
168 122
169 spin_lock(&gpio_lock); 123 spin_lock(&sch->lock);
170 124
171 offset = RGLV + gpio_num / 8; 125 offset = sch_gpio_offset(sch, gpio_num, GLV);
172 bit = gpio_num % 8; 126 bit = sch_gpio_bit(sch, gpio_num);
173 127
174 curr_vals = inb(gpio_ba + offset); 128 curr_vals = inb(sch->iobase + offset);
175 129
176 if (val) 130 if (val)
177 outb(curr_vals | (1 << bit), gpio_ba + offset); 131 outb(curr_vals | (1 << bit), sch->iobase + offset);
178 else 132 else
179 outb((curr_vals & ~(1 << bit)), gpio_ba + offset); 133 outb((curr_vals & ~(1 << bit)), sch->iobase + offset);
180 134
181 spin_unlock(&gpio_lock); 135 spin_unlock(&sch->lock);
182} 136}
183 137
184static int sch_gpio_resume_direction_out(struct gpio_chip *gc, 138static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
185 unsigned gpio_num, int val) 139 int val)
186{ 140{
141 struct sch_gpio *sch = to_sch_gpio(gc);
187 u8 curr_dirs; 142 u8 curr_dirs;
188 unsigned short offset, bit; 143 unsigned short offset, bit;
189 144
190 offset = RGIO + gpio_num / 8; 145 spin_lock(&sch->lock);
191 bit = gpio_num % 8;
192 146
193 spin_lock(&gpio_lock); 147 offset = sch_gpio_offset(sch, gpio_num, GIO);
148 bit = sch_gpio_bit(sch, gpio_num);
194 149
195 curr_dirs = inb(gpio_ba + offset); 150 curr_dirs = inb(sch->iobase + offset);
196 if (curr_dirs & (1 << bit)) 151 if (curr_dirs & (1 << bit))
197 outb(curr_dirs & ~(1 << bit), gpio_ba + offset); 152 outb(curr_dirs & ~(1 << bit), sch->iobase + offset);
198 153
199 spin_unlock(&gpio_lock); 154 spin_unlock(&sch->lock);
200 155
201 /* 156 /*
202 * according to the datasheet, writing to the level register has no 157 * according to the datasheet, writing to the level register has no
203 * effect when GPIO is programmed as input. 158 * effect when GPIO is programmed as input.
204 * Actually the the level register is read-only when configured as input. 159 * Actually the the level register is read-only when configured as input.
205 * Thus presetting the output level before switching to output is _NOT_ possible. 160 * Thus presetting the output level before switching to output is _NOT_ possible.
206 * Hence we set the level after configuring the GPIO as output. 161 * Hence we set the level after configuring the GPIO as output.
207 * But we cannot prevent a short low pulse if direction is set to high 162 * But we cannot prevent a short low pulse if direction is set to high
208 * and an external pull-up is connected. 163 * and an external pull-up is connected.
209 */ 164 */
210 sch_gpio_resume_set(gc, gpio_num, val); 165 sch_gpio_set(gc, gpio_num, val);
211 return 0; 166 return 0;
212} 167}
213 168
214static struct gpio_chip sch_gpio_resume = { 169static struct gpio_chip sch_gpio_chip = {
215 .label = "sch_gpio_resume", 170 .label = "sch_gpio",
216 .owner = THIS_MODULE, 171 .owner = THIS_MODULE,
217 .direction_input = sch_gpio_resume_direction_in, 172 .direction_input = sch_gpio_direction_in,
218 .get = sch_gpio_resume_get, 173 .get = sch_gpio_get,
219 .direction_output = sch_gpio_resume_direction_out, 174 .direction_output = sch_gpio_direction_out,
220 .set = sch_gpio_resume_set, 175 .set = sch_gpio_set,
221}; 176};
222 177
223static int sch_gpio_probe(struct platform_device *pdev) 178static int sch_gpio_probe(struct platform_device *pdev)
224{ 179{
180 struct sch_gpio *sch;
225 struct resource *res; 181 struct resource *res;
226 int err, id;
227 182
228 id = pdev->id; 183 sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL);
229 if (!id) 184 if (!sch)
230 return -ENODEV; 185 return -ENOMEM;
231 186
232 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 187 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
233 if (!res) 188 if (!res)
234 return -EBUSY; 189 return -EBUSY;
235 190
236 if (!request_region(res->start, resource_size(res), pdev->name)) 191 if (!devm_request_region(&pdev->dev, res->start, resource_size(res),
192 pdev->name))
237 return -EBUSY; 193 return -EBUSY;
238 194
239 gpio_ba = res->start; 195 spin_lock_init(&sch->lock);
196 sch->iobase = res->start;
197 sch->chip = sch_gpio_chip;
198 sch->chip.label = dev_name(&pdev->dev);
199 sch->chip.dev = &pdev->dev;
240 200
241 switch (id) { 201 switch (pdev->id) {
242 case PCI_DEVICE_ID_INTEL_SCH_LPC: 202 case PCI_DEVICE_ID_INTEL_SCH_LPC:
243 sch_gpio_core.base = 0; 203 sch->core_base = 0;
244 sch_gpio_core.ngpio = 10; 204 sch->resume_base = 10;
245 sch_gpio_resume.base = 10; 205 sch->chip.ngpio = 14;
246 sch_gpio_resume.ngpio = 4; 206
247 /* 207 /*
248 * GPIO[6:0] enabled by default 208 * GPIO[6:0] enabled by default
249 * GPIO7 is configured by the CMC as SLPIOVR 209 * GPIO7 is configured by the CMC as SLPIOVR
250 * Enable GPIO[9:8] core powered gpios explicitly 210 * Enable GPIO[9:8] core powered gpios explicitly
251 */ 211 */
252 outb(0x3, gpio_ba + CGEN + 1); 212 sch_gpio_enable(sch, 8);
213 sch_gpio_enable(sch, 9);
253 /* 214 /*
254 * SUS_GPIO[2:0] enabled by default 215 * SUS_GPIO[2:0] enabled by default
255 * Enable SUS_GPIO3 resume powered gpio explicitly 216 * Enable SUS_GPIO3 resume powered gpio explicitly
256 */ 217 */
257 outb(0x8, gpio_ba + RGEN); 218 sch_gpio_enable(sch, 13);
258 break; 219 break;
259 220
260 case PCI_DEVICE_ID_INTEL_ITC_LPC: 221 case PCI_DEVICE_ID_INTEL_ITC_LPC:
261 sch_gpio_core.base = 0; 222 sch->core_base = 0;
262 sch_gpio_core.ngpio = 5; 223 sch->resume_base = 5;
263 sch_gpio_resume.base = 5; 224 sch->chip.ngpio = 14;
264 sch_gpio_resume.ngpio = 9;
265 break; 225 break;
266 226
267 case PCI_DEVICE_ID_INTEL_CENTERTON_ILB: 227 case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
268 sch_gpio_core.base = 0; 228 sch->core_base = 0;
269 sch_gpio_core.ngpio = 21; 229 sch->resume_base = 21;
270 sch_gpio_resume.base = 21; 230 sch->chip.ngpio = 30;
271 sch_gpio_resume.ngpio = 9;
272 break; 231 break;
273 232
274 default: 233 default:
275 err = -ENODEV; 234 return -ENODEV;
276 goto err_sch_gpio_core;
277 } 235 }
278 236
279 sch_gpio_core.dev = &pdev->dev; 237 platform_set_drvdata(pdev, sch);
280 sch_gpio_resume.dev = &pdev->dev;
281
282 err = gpiochip_add(&sch_gpio_core);
283 if (err < 0)
284 goto err_sch_gpio_core;
285 238
286 err = gpiochip_add(&sch_gpio_resume); 239 return gpiochip_add(&sch->chip);
287 if (err < 0)
288 goto err_sch_gpio_resume;
289
290 return 0;
291
292err_sch_gpio_resume:
293 gpiochip_remove(&sch_gpio_core);
294
295err_sch_gpio_core:
296 release_region(res->start, resource_size(res));
297 gpio_ba = 0;
298
299 return err;
300} 240}
301 241
302static int sch_gpio_remove(struct platform_device *pdev) 242static int sch_gpio_remove(struct platform_device *pdev)
303{ 243{
304 struct resource *res; 244 struct sch_gpio *sch = platform_get_drvdata(pdev);
305 if (gpio_ba) {
306
307 gpiochip_remove(&sch_gpio_core);
308 gpiochip_remove(&sch_gpio_resume);
309
310 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
311
312 release_region(res->start, resource_size(res));
313 gpio_ba = 0;
314 }
315 245
246 gpiochip_remove(&sch->chip);
316 return 0; 247 return 0;
317} 248}
318 249
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index ae0f6466eb09..abdcf58935f5 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -262,7 +262,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
262 tc3589x_gpio->chip = template_chip; 262 tc3589x_gpio->chip = template_chip;
263 tc3589x_gpio->chip.ngpio = tc3589x->num_gpio; 263 tc3589x_gpio->chip.ngpio = tc3589x->num_gpio;
264 tc3589x_gpio->chip.dev = &pdev->dev; 264 tc3589x_gpio->chip.dev = &pdev->dev;
265 tc3589x_gpio->chip.base = (pdata) ? pdata->gpio_base : -1; 265 tc3589x_gpio->chip.base = -1;
266 266
267#ifdef CONFIG_OF_GPIO 267#ifdef CONFIG_OF_GPIO
268 tc3589x_gpio->chip.of_node = np; 268 tc3589x_gpio->chip.of_node = np;
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 5246a60eff6d..6107d0682fd6 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -446,7 +446,7 @@ static int tz1090_gpio_bank_probe(struct tz1090_gpio_bank_info *info)
446 bank->irq = irq_of_parse_and_map(np, 0); 446 bank->irq = irq_of_parse_and_map(np, 0);
447 447
448 /* The interrupt is optional (it may be used by another core on chip) */ 448 /* The interrupt is optional (it may be used by another core on chip) */
449 if (bank->irq < 0) { 449 if (!bank->irq) {
450 dev_info(dev, "IRQ not provided for bank %u, IRQs disabled\n", 450 dev_info(dev, "IRQ not provided for bank %u, IRQs disabled\n",
451 info->index); 451 info->index);
452 return 0; 452 return 0;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 74cd480bf8de..184c4b1b2558 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -578,7 +578,7 @@ static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
578 578
579static const struct dev_pm_ops zynq_gpio_dev_pm_ops = { 579static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
580 SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume) 580 SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
581 SET_PM_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend, 581 SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
582 zynq_gpio_runtime_resume, NULL) 582 zynq_gpio_runtime_resume, NULL)
583}; 583};
584 584
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 05c6275da224..c3bdaff71c25 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -11,12 +11,14 @@
11 */ 11 */
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/gpio.h>
14#include <linux/gpio/consumer.h> 15#include <linux/gpio/consumer.h>
15#include <linux/gpio/driver.h> 16#include <linux/gpio/driver.h>
16#include <linux/export.h> 17#include <linux/export.h>
17#include <linux/acpi.h> 18#include <linux/acpi.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/pinctrl/pinctrl.h>
20 22
21#include "gpiolib.h" 23#include "gpiolib.h"
22 24
@@ -55,6 +57,58 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
55 return ACPI_HANDLE(gc->dev) == data; 57 return ACPI_HANDLE(gc->dev) == data;
56} 58}
57 59
60#ifdef CONFIG_PINCTRL
61/**
62 * acpi_gpiochip_pin_to_gpio_offset() - translates ACPI GPIO to Linux GPIO
63 * @chip: GPIO chip
64 * @pin: ACPI GPIO pin number from GpioIo/GpioInt resource
65 *
66 * Function takes ACPI GpioIo/GpioInt pin number as a parameter and
67 * translates it to a corresponding offset suitable to be passed to a
68 * GPIO controller driver.
69 *
70 * Typically the returned offset is same as @pin, but if the GPIO
71 * controller uses pin controller and the mapping is not contigous the
72 * offset might be different.
73 */
74static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
75{
76 struct gpio_pin_range *pin_range;
77
78 /* If there are no ranges in this chip, use 1:1 mapping */
79 if (list_empty(&chip->pin_ranges))
80 return pin;
81
82 list_for_each_entry(pin_range, &chip->pin_ranges, node) {
83 const struct pinctrl_gpio_range *range = &pin_range->range;
84 int i;
85
86 if (range->pins) {
87 for (i = 0; i < range->npins; i++) {
88 if (range->pins[i] == pin)
89 return range->base + i - chip->base;
90 }
91 } else {
92 if (pin >= range->pin_base &&
93 pin < range->pin_base + range->npins) {
94 unsigned gpio_base;
95
96 gpio_base = range->base - chip->base;
97 return gpio_base + pin - range->pin_base;
98 }
99 }
100 }
101
102 return -EINVAL;
103}
104#else
105static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip,
106 int pin)
107{
108 return pin;
109}
110#endif
111
58/** 112/**
59 * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API 113 * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
60 * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1") 114 * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
@@ -69,6 +123,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
69 struct gpio_chip *chip; 123 struct gpio_chip *chip;
70 acpi_handle handle; 124 acpi_handle handle;
71 acpi_status status; 125 acpi_status status;
126 int offset;
72 127
73 status = acpi_get_handle(NULL, path, &handle); 128 status = acpi_get_handle(NULL, path, &handle);
74 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
@@ -78,10 +133,11 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
78 if (!chip) 133 if (!chip)
79 return ERR_PTR(-ENODEV); 134 return ERR_PTR(-ENODEV);
80 135
81 if (pin < 0 || pin > chip->ngpio) 136 offset = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
82 return ERR_PTR(-EINVAL); 137 if (offset < 0)
138 return ERR_PTR(offset);
83 139
84 return gpiochip_get_desc(chip, pin); 140 return gpiochip_get_desc(chip, offset);
85} 141}
86 142
87static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) 143static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
@@ -287,9 +343,45 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
287 } 343 }
288} 344}
289 345
346int acpi_dev_add_driver_gpios(struct acpi_device *adev,
347 const struct acpi_gpio_mapping *gpios)
348{
349 if (adev && gpios) {
350 adev->driver_gpios = gpios;
351 return 0;
352 }
353 return -EINVAL;
354}
355EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
356
357static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
358 const char *name, int index,
359 struct acpi_reference_args *args)
360{
361 const struct acpi_gpio_mapping *gm;
362
363 if (!adev->driver_gpios)
364 return false;
365
366 for (gm = adev->driver_gpios; gm->name; gm++)
367 if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
368 const struct acpi_gpio_params *par = gm->data + index;
369
370 args->adev = adev;
371 args->args[0] = par->crs_entry_index;
372 args->args[1] = par->line_index;
373 args->args[2] = par->active_low;
374 args->nargs = 3;
375 return true;
376 }
377
378 return false;
379}
380
290struct acpi_gpio_lookup { 381struct acpi_gpio_lookup {
291 struct acpi_gpio_info info; 382 struct acpi_gpio_info info;
292 int index; 383 int index;
384 int pin_index;
293 struct gpio_desc *desc; 385 struct gpio_desc *desc;
294 int n; 386 int n;
295}; 387};
@@ -303,13 +395,24 @@ static int acpi_find_gpio(struct acpi_resource *ares, void *data)
303 395
304 if (lookup->n++ == lookup->index && !lookup->desc) { 396 if (lookup->n++ == lookup->index && !lookup->desc) {
305 const struct acpi_resource_gpio *agpio = &ares->data.gpio; 397 const struct acpi_resource_gpio *agpio = &ares->data.gpio;
398 int pin_index = lookup->pin_index;
399
400 if (pin_index >= agpio->pin_table_length)
401 return 1;
306 402
307 lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr, 403 lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
308 agpio->pin_table[0]); 404 agpio->pin_table[pin_index]);
309 lookup->info.gpioint = 405 lookup->info.gpioint =
310 agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; 406 agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
311 lookup->info.active_low = 407
312 agpio->polarity == ACPI_ACTIVE_LOW; 408 /*
409 * ActiveLow is only specified for GpioInt resource. If
410 * GpioIo is used then the only way to set the flag is
411 * to use _DSD "gpios" property.
412 */
413 if (lookup->info.gpioint)
414 lookup->info.active_low =
415 agpio->polarity == ACPI_ACTIVE_LOW;
313 } 416 }
314 417
315 return 1; 418 return 1;
@@ -317,40 +420,79 @@ static int acpi_find_gpio(struct acpi_resource *ares, void *data)
317 420
318/** 421/**
319 * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources 422 * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
320 * @dev: pointer to a device to get GPIO from 423 * @adev: pointer to a ACPI device to get GPIO from
424 * @propname: Property name of the GPIO (optional)
321 * @index: index of GpioIo/GpioInt resource (starting from %0) 425 * @index: index of GpioIo/GpioInt resource (starting from %0)
322 * @info: info pointer to fill in (optional) 426 * @info: info pointer to fill in (optional)
323 * 427 *
324 * Function goes through ACPI resources for @dev and based on @index looks 428 * Function goes through ACPI resources for @adev and based on @index looks
325 * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor, 429 * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
326 * and returns it. @index matches GpioIo/GpioInt resources only so if there 430 * and returns it. @index matches GpioIo/GpioInt resources only so if there
327 * are total %3 GPIO resources, the index goes from %0 to %2. 431 * are total %3 GPIO resources, the index goes from %0 to %2.
328 * 432 *
433 * If @propname is specified the GPIO is looked using device property. In
434 * that case @index is used to select the GPIO entry in the property value
435 * (in case of multiple).
436 *
329 * If the GPIO cannot be translated or there is an error an ERR_PTR is 437 * If the GPIO cannot be translated or there is an error an ERR_PTR is
330 * returned. 438 * returned.
331 * 439 *
332 * Note: if the GPIO resource has multiple entries in the pin list, this 440 * Note: if the GPIO resource has multiple entries in the pin list, this
333 * function only returns the first. 441 * function only returns the first.
334 */ 442 */
335struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index, 443struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
444 const char *propname, int index,
336 struct acpi_gpio_info *info) 445 struct acpi_gpio_info *info)
337{ 446{
338 struct acpi_gpio_lookup lookup; 447 struct acpi_gpio_lookup lookup;
339 struct list_head resource_list; 448 struct list_head resource_list;
340 struct acpi_device *adev; 449 bool active_low = false;
341 acpi_handle handle;
342 int ret; 450 int ret;
343 451
344 if (!dev) 452 if (!adev)
345 return ERR_PTR(-EINVAL);
346
347 handle = ACPI_HANDLE(dev);
348 if (!handle || acpi_bus_get_device(handle, &adev))
349 return ERR_PTR(-ENODEV); 453 return ERR_PTR(-ENODEV);
350 454
351 memset(&lookup, 0, sizeof(lookup)); 455 memset(&lookup, 0, sizeof(lookup));
352 lookup.index = index; 456 lookup.index = index;
353 457
458 if (propname) {
459 struct acpi_reference_args args;
460
461 dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
462
463 memset(&args, 0, sizeof(args));
464 ret = acpi_dev_get_property_reference(adev, propname,
465 index, &args);
466 if (ret) {
467 bool found = acpi_get_driver_gpio_data(adev, propname,
468 index, &args);
469 if (!found)
470 return ERR_PTR(ret);
471 }
472
473 /*
474 * The property was found and resolved so need to
475 * lookup the GPIO based on returned args instead.
476 */
477 adev = args.adev;
478 if (args.nargs >= 2) {
479 lookup.index = args.args[0];
480 lookup.pin_index = args.args[1];
481 /*
482 * 3rd argument, if present is used to
483 * specify active_low.
484 */
485 if (args.nargs >= 3)
486 active_low = !!args.args[2];
487 }
488
489 dev_dbg(&adev->dev, "GPIO: _DSD returned %s %zd %llu %llu %llu\n",
490 dev_name(&adev->dev), args.nargs,
491 args.args[0], args.args[1], args.args[2]);
492 } else {
493 dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
494 }
495
354 INIT_LIST_HEAD(&resource_list); 496 INIT_LIST_HEAD(&resource_list);
355 ret = acpi_dev_get_resources(adev, &resource_list, acpi_find_gpio, 497 ret = acpi_dev_get_resources(adev, &resource_list, acpi_find_gpio,
356 &lookup); 498 &lookup);
@@ -359,8 +501,11 @@ struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
359 501
360 acpi_dev_free_resource_list(&resource_list); 502 acpi_dev_free_resource_list(&resource_list);
361 503
362 if (lookup.desc && info) 504 if (lookup.desc && info) {
363 *info = lookup.info; 505 *info = lookup.info;
506 if (active_low)
507 info->active_low = active_low;
508 }
364 509
365 return lookup.desc ? lookup.desc : ERR_PTR(-ENOENT); 510 return lookup.desc ? lookup.desc : ERR_PTR(-ENOENT);
366} 511}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e8e98ca25ec7..58659dbe702a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1505,14 +1505,36 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
1505 unsigned int idx, 1505 unsigned int idx,
1506 enum gpio_lookup_flags *flags) 1506 enum gpio_lookup_flags *flags)
1507{ 1507{
1508 static const char * const suffixes[] = { "gpios", "gpio" };
1509 struct acpi_device *adev = ACPI_COMPANION(dev);
1508 struct acpi_gpio_info info; 1510 struct acpi_gpio_info info;
1509 struct gpio_desc *desc; 1511 struct gpio_desc *desc;
1512 char propname[32];
1513 int i;
1510 1514
1511 desc = acpi_get_gpiod_by_index(dev, idx, &info); 1515 /* Try first from _DSD */
1512 if (IS_ERR(desc)) 1516 for (i = 0; i < ARRAY_SIZE(suffixes); i++) {
1513 return desc; 1517 if (con_id && strcmp(con_id, "gpios")) {
1518 snprintf(propname, sizeof(propname), "%s-%s",
1519 con_id, suffixes[i]);
1520 } else {
1521 snprintf(propname, sizeof(propname), "%s",
1522 suffixes[i]);
1523 }
1524
1525 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
1526 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
1527 break;
1528 }
1514 1529
1515 if (info.gpioint && info.active_low) 1530 /* Then from plain _CRS GPIOs */
1531 if (IS_ERR(desc)) {
1532 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
1533 if (IS_ERR(desc))
1534 return desc;
1535 }
1536
1537 if (info.active_low)
1516 *flags |= GPIO_ACTIVE_LOW; 1538 *flags |= GPIO_ACTIVE_LOW;
1517 1539
1518 return desc; 1540 return desc;
@@ -1713,6 +1735,61 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
1713EXPORT_SYMBOL_GPL(__gpiod_get_index); 1735EXPORT_SYMBOL_GPL(__gpiod_get_index);
1714 1736
1715/** 1737/**
1738 * fwnode_get_named_gpiod - obtain a GPIO from firmware node
1739 * @fwnode: handle of the firmware node
1740 * @propname: name of the firmware property representing the GPIO
1741 *
1742 * This function can be used for drivers that get their configuration
1743 * from firmware.
1744 *
1745 * Function properly finds the corresponding GPIO using whatever is the
1746 * underlying firmware interface and then makes sure that the GPIO
1747 * descriptor is requested before it is returned to the caller.
1748 *
1749 * In case of error an ERR_PTR() is returned.
1750 */
1751struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
1752 const char *propname)
1753{
1754 struct gpio_desc *desc = ERR_PTR(-ENODEV);
1755 bool active_low = false;
1756 int ret;
1757
1758 if (!fwnode)
1759 return ERR_PTR(-EINVAL);
1760
1761 if (is_of_node(fwnode)) {
1762 enum of_gpio_flags flags;
1763
1764 desc = of_get_named_gpiod_flags(of_node(fwnode), propname, 0,
1765 &flags);
1766 if (!IS_ERR(desc))
1767 active_low = flags & OF_GPIO_ACTIVE_LOW;
1768 } else if (is_acpi_node(fwnode)) {
1769 struct acpi_gpio_info info;
1770
1771 desc = acpi_get_gpiod_by_index(acpi_node(fwnode), propname, 0,
1772 &info);
1773 if (!IS_ERR(desc))
1774 active_low = info.active_low;
1775 }
1776
1777 if (IS_ERR(desc))
1778 return desc;
1779
1780 ret = gpiod_request(desc, NULL);
1781 if (ret)
1782 return ERR_PTR(ret);
1783
1784 /* Only value flag can be set from both DT and ACPI is active_low */
1785 if (active_low)
1786 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1787
1788 return desc;
1789}
1790EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
1791
1792/**
1716 * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO 1793 * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO
1717 * function 1794 * function
1718 * @dev: GPIO consumer, can be NULL for system-global GPIOs 1795 * @dev: GPIO consumer, can be NULL for system-global GPIOs
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 9db2b6a71c5d..e3a52113a541 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -34,7 +34,8 @@ void acpi_gpiochip_remove(struct gpio_chip *chip);
34void acpi_gpiochip_request_interrupts(struct gpio_chip *chip); 34void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
35void acpi_gpiochip_free_interrupts(struct gpio_chip *chip); 35void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
36 36
37struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index, 37struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
38 const char *propname, int index,
38 struct acpi_gpio_info *info); 39 struct acpi_gpio_info *info);
39#else 40#else
40static inline void acpi_gpiochip_add(struct gpio_chip *chip) { } 41static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
@@ -47,8 +48,8 @@ static inline void
47acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { } 48acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
48 49
49static inline struct gpio_desc * 50static inline struct gpio_desc *
50acpi_get_gpiod_by_index(struct device *dev, int index, 51acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
51 struct acpi_gpio_info *info) 52 int index, struct acpi_gpio_info *info)
52{ 53{
53 return ERR_PTR(-ENOSYS); 54 return ERR_PTR(-ENOSYS);
54} 55}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 7496f55611a5..ef5feeecec84 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -226,7 +226,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
226 226
227 obj->dev_addr = DMA_ERROR_CODE; 227 obj->dev_addr = DMA_ERROR_CODE;
228 228
229 mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping; 229 mapping = file_inode(obj->obj.filp)->i_mapping;
230 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); 230 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
231 231
232 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); 232 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 68d38eb6774d..835b6af00970 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1817,7 +1817,7 @@ static int fimc_resume(struct device *dev)
1817} 1817}
1818#endif 1818#endif
1819 1819
1820#ifdef CONFIG_PM_RUNTIME 1820#ifdef CONFIG_PM
1821static int fimc_runtime_suspend(struct device *dev) 1821static int fimc_runtime_suspend(struct device *dev)
1822{ 1822{
1823 struct fimc_context *ctx = get_fimc_context(dev); 1823 struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6ff8599f6cbf..81a250830808 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1540,7 +1540,7 @@ static int g2d_resume(struct device *dev)
1540} 1540}
1541#endif 1541#endif
1542 1542
1543#ifdef CONFIG_PM_RUNTIME 1543#ifdef CONFIG_PM
1544static int g2d_runtime_suspend(struct device *dev) 1544static int g2d_runtime_suspend(struct device *dev)
1545{ 1545{
1546 struct g2d_data *g2d = dev_get_drvdata(dev); 1546 struct g2d_data *g2d = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index c6a013fc321c..0261468c8019 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1764,7 +1764,7 @@ static int gsc_resume(struct device *dev)
1764} 1764}
1765#endif 1765#endif
1766 1766
1767#ifdef CONFIG_PM_RUNTIME 1767#ifdef CONFIG_PM
1768static int gsc_runtime_suspend(struct device *dev) 1768static int gsc_runtime_suspend(struct device *dev)
1769{ 1769{
1770 struct gsc_context *ctx = get_gsc_context(dev); 1770 struct gsc_context *ctx = get_gsc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index b6a37d4f5b13..425e70625388 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -822,7 +822,7 @@ static int rotator_resume(struct device *dev)
822} 822}
823#endif 823#endif
824 824
825#ifdef CONFIG_PM_RUNTIME 825#ifdef CONFIG_PM
826static int rotator_runtime_suspend(struct device *dev) 826static int rotator_runtime_suspend(struct device *dev)
827{ 827{
828 struct rot_context *rot = dev_get_drvdata(dev); 828 struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0a1a56406eb..9cb5c95d5898 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4325,7 +4325,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4325 ironlake_fdi_disable(crtc); 4325 ironlake_fdi_disable(crtc);
4326 4326
4327 ironlake_disable_pch_transcoder(dev_priv, pipe); 4327 ironlake_disable_pch_transcoder(dev_priv, pipe);
4328 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4329 4328
4330 if (HAS_PCH_CPT(dev)) { 4329 if (HAS_PCH_CPT(dev)) {
4331 /* disable TRANS_DP_CTL */ 4330 /* disable TRANS_DP_CTL */
@@ -4389,7 +4388,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4389 4388
4390 if (intel_crtc->config.has_pch_encoder) { 4389 if (intel_crtc->config.has_pch_encoder) {
4391 lpt_disable_pch_transcoder(dev_priv); 4390 lpt_disable_pch_transcoder(dev_priv);
4392 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4393 intel_ddi_fdi_disable(crtc); 4391 intel_ddi_fdi_disable(crtc);
4394 } 4392 }
4395 4393
@@ -9408,6 +9406,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
9408 struct drm_device *dev = crtc->base.dev; 9406 struct drm_device *dev = crtc->base.dev;
9409 struct drm_i915_private *dev_priv = dev->dev_private; 9407 struct drm_i915_private *dev_priv = dev->dev_private;
9410 9408
9409 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9410 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9411 return true;
9412
9411 /* 9413 /*
9412 * The relevant registers doen't exist on pre-ctg. 9414 * The relevant registers doen't exist on pre-ctg.
9413 * As the flip done interrupt doesn't trigger for mmio 9415 * As the flip done interrupt doesn't trigger for mmio
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ad45bfff3fe..4bcd91757321 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4450,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4450 * vdd might still be enabled do to the delayed vdd off. 4450 * vdd might still be enabled do to the delayed vdd off.
4451 * Make sure vdd is actually turned off here. 4451 * Make sure vdd is actually turned off here.
4452 */ 4452 */
4453 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4453 pps_lock(intel_dp); 4454 pps_lock(intel_dp);
4454 edp_panel_vdd_off_sync(intel_dp); 4455 edp_panel_vdd_off_sync(intel_dp);
4455 pps_unlock(intel_dp); 4456 pps_unlock(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a6bd1422e38f..c0bbf2172446 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
899 int pipe; 899 int pipe;
900 u8 pin; 900 u8 pin;
901 901
902 /*
903 * Unlock registers and just leave them unlocked. Do this before
904 * checking quirk lists to avoid bogus WARNINGs.
905 */
906 if (HAS_PCH_SPLIT(dev)) {
907 I915_WRITE(PCH_PP_CONTROL,
908 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
909 } else {
910 I915_WRITE(PP_CONTROL,
911 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
912 }
902 if (!intel_lvds_supported(dev)) 913 if (!intel_lvds_supported(dev))
903 return; 914 return;
904 915
@@ -1097,17 +1108,6 @@ out:
1097 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1108 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1098 LVDS_A3_POWER_MASK; 1109 LVDS_A3_POWER_MASK;
1099 1110
1100 /*
1101 * Unlock registers and just
1102 * leave them unlocked
1103 */
1104 if (HAS_PCH_SPLIT(dev)) {
1105 I915_WRITE(PCH_PP_CONTROL,
1106 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1107 } else {
1108 I915_WRITE(PP_CONTROL,
1109 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1110 }
1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1113 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1113 DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; 221 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 222 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
224 break; 223 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
551 } 551 }
552 552
553 if (status & 0x40000000) { 553 if (status & 0x40000000) {
554 nouveau_fifo_uevent(&priv->base);
555 nv_wr32(priv, 0x002100, 0x40000000); 554 nv_wr32(priv, 0x002100, 0x40000000);
555 nouveau_fifo_uevent(&priv->base);
556 status &= ~0x40000000; 556 status &= ~0x40000000;
557 } 557 }
558 } 558 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
740 u32 inte = nv_rd32(priv, 0x002628); 740 u32 inte = nv_rd32(priv, 0x002628);
741 u32 unkn; 741 u32 unkn;
742 742
743 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
744
743 for (unkn = 0; unkn < 8; unkn++) { 745 for (unkn = 0; unkn < 8; unkn++) {
744 u32 ints = (intr >> (unkn * 0x04)) & inte; 746 u32 ints = (intr >> (unkn * 0x04)) & inte;
745 if (ints & 0x1) { 747 if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
751 nv_mask(priv, 0x002628, ints, 0); 753 nv_mask(priv, 0x002628, ints, 0);
752 } 754 }
753 } 755 }
754
755 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
756} 756}
757 757
758static void 758static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index d2f0fd39c145..f8734eb74eaa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952 } 952 }
953 953
954 if (stat & 0x80000000) { 954 if (stat & 0x80000000) {
955 nve0_fifo_intr_engine(priv);
956 nv_wr32(priv, 0x002100, 0x80000000); 955 nv_wr32(priv, 0x002100, 0x80000000);
956 nve0_fifo_intr_engine(priv);
957 stat &= ~0x80000000; 957 stat &= ~0x80000000;
958 } 958 }
959 959
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 57238076049f..62b97c4eef8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev)
629 629
630 pci_save_state(pdev); 630 pci_save_state(pdev);
631 pci_disable_device(pdev); 631 pci_disable_device(pdev);
632 pci_ignore_hotplug(pdev);
633 pci_set_power_state(pdev, PCI_D3hot); 632 pci_set_power_state(pdev, PCI_D3hot);
634 return 0; 633 return 0;
635} 634}
@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
933 ret = nouveau_do_suspend(drm_dev, true); 932 ret = nouveau_do_suspend(drm_dev, true);
934 pci_save_state(pdev); 933 pci_save_state(pdev);
935 pci_disable_device(pdev); 934 pci_disable_device(pdev);
935 pci_ignore_hotplug(pdev);
936 pci_set_power_state(pdev, PCI_D3cold); 936 pci_set_power_state(pdev, PCI_D3cold);
937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
938 return ret; 938 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53} 53}
54 54
55static void 55static int
56nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
57{ 57{
58 int drop = 0;
59
58 fence_signal_locked(&fence->base); 60 fence_signal_locked(&fence->base);
59 list_del(&fence->head); 61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
60 63
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 64 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 66
64 if (!--fctx->notify_ref) 67 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify); 68 drop = 1;
66 } 69 }
67 70
68 fence_put(&fence->base); 71 fence_put(&fence->base);
72 return drop;
69} 73}
70 74
71static struct nouveau_fence * 75static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{ 92{
89 struct nouveau_fence *fence; 93 struct nouveau_fence *fence;
90 94
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock); 95 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) { 96 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head); 97 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 98
97 nouveau_fence_signal(fence); 99 if (nouveau_fence_signal(fence))
98 fence->channel = NULL; 100 nvif_notify_put(&fctx->notify);
99 } 101 }
100 spin_unlock_irq(&fctx->lock); 102 spin_unlock_irq(&fctx->lock);
103
104 nvif_notify_fini(&fctx->notify);
105 fctx->dead = 1;
106
107 /*
108 * Ensure that all accesses to fence->channel complete before freeing
109 * the channel.
110 */
111 synchronize_rcu();
101} 112}
102 113
103static void 114static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 123 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113} 124}
114 125
115static void 126static int
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 127nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{ 128{
118 struct nouveau_fence *fence; 129 struct nouveau_fence *fence;
119 130 int drop = 0;
120 u32 seq = fctx->read(chan); 131 u32 seq = fctx->read(chan);
121 132
122 while (!list_empty(&fctx->pending)) { 133 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head); 134 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 135
125 if ((int)(seq - fence->base.seqno) < 0) 136 if ((int)(seq - fence->base.seqno) < 0)
126 return; 137 break;
127 138
128 nouveau_fence_signal(fence); 139 drop |= nouveau_fence_signal(fence);
129 } 140 }
141
142 return drop;
130} 143}
131 144
132static int 145static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
135 struct nouveau_fence_chan *fctx = 148 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify); 149 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags; 150 unsigned long flags;
151 int ret = NVIF_NOTIFY_KEEP;
138 152
139 spin_lock_irqsave(&fctx->lock, flags); 153 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) { 154 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence; 155 struct nouveau_fence *fence;
156 struct nouveau_channel *chan;
142 157
143 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx); 159 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
160 if (nouveau_fence_update(fence->channel, fctx))
161 ret = NVIF_NOTIFY_DROP;
145 } 162 }
146 spin_unlock_irqrestore(&fctx->lock, flags); 163 spin_unlock_irqrestore(&fctx->lock, flags);
147 164
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ 165 return ret;
149 return NVIF_NOTIFY_KEEP;
150} 166}
151 167
152void 168void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
262 if (!ret) { 278 if (!ret) {
263 fence_get(&fence->base); 279 fence_get(&fence->base);
264 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx); 281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
266 list_add_tail(&fence->head, &fctx->pending); 285 list_add_tail(&fence->head, &fctx->pending);
267 spin_unlock_irq(&fctx->lock); 286 spin_unlock_irq(&fctx->lock);
268 } 287 }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
276 if (fence->base.ops == &nouveau_fence_ops_legacy || 295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 fence->base.ops == &nouveau_fence_ops_uevent) { 296 fence->base.ops == &nouveau_fence_ops_uevent) {
278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
279 unsigned long flags; 299 unsigned long flags;
280 300
281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 return true; 302 return true;
283 303
284 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
285 nouveau_fence_update(fence->channel, fctx); 305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
286 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
287 } 309 }
288 return fence_is_signaled(&fence->base); 310 return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
387 409
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL; 411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
390 413
391 f = nouveau_local_fence(fence, chan->drm); 414 f = nouveau_local_fence(fence, chan->drm);
392 if (f) 415 if (f) {
393 prev = f->channel; 416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
394 422
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 423 if (must_wait)
396 ret = fence_wait(fence, intr); 424 ret = fence_wait(fence, intr);
397 425
398 return ret; 426 return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
403 431
404 for (i = 0; i < fobj->shared_count && !ret; ++i) { 432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL; 433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
406 435
407 fence = rcu_dereference_protected(fobj->shared[i], 436 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv)); 437 reservation_object_held(resv));
409 438
410 f = nouveau_local_fence(fence, chan->drm); 439 f = nouveau_local_fence(fence, chan->drm);
411 if (f) 440 if (f) {
412 prev = f->channel; 441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
413 447
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 448 if (must_wait)
415 ret = fence_wait(fence, intr); 449 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 } 450 }
420 451
421 return ret; 452 return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
463 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 496
466 return fence->channel ? fctx->name : "dead channel"; 497 return !fctx->dead ? fctx->name : "dead channel";
467} 498}
468 499
469/* 500/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
476{ 507{
477 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel; 510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
480 518
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0; 519 return ret;
482} 520}
483 521
484static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
14 14
15 bool sysmem; 15 bool sysmem;
16 16
17 struct nouveau_channel *channel; 17 struct nouveau_channel __rcu *channel;
18 unsigned long timeout; 18 unsigned long timeout;
19}; 19};
20 20
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
47 char name[32]; 47 char name[32];
48 48
49 struct nvif_notify notify; 49 struct nvif_notify notify;
50 int notify_ref; 50 int notify_ref, dead;
51}; 51};
52 52
53struct nouveau_fence_priv { 53struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 300c4b3d4669..26baa9c05f6c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
322 } 322 }
323 323
324 if (!radeon_connector->edid) { 324 if (!radeon_connector->edid) {
325 /* don't fetch the edid from the vbios if ddc fails and runpm is
326 * enabled so we report disconnected.
327 */
328 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
329 return;
330
325 if (rdev->is_atom_bios) { 331 if (rdev->is_atom_bios) {
326 /* some laptops provide a hardcoded edid in rom for LCDs */ 332 /* some laptops provide a hardcoded edid in rom for LCDs */
327 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || 333 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
826static enum drm_connector_status 832static enum drm_connector_status
827radeon_lvds_detect(struct drm_connector *connector, bool force) 833radeon_lvds_detect(struct drm_connector *connector, bool force)
828{ 834{
835 struct drm_device *dev = connector->dev;
836 struct radeon_device *rdev = dev->dev_private;
829 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 837 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
830 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 838 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
831 enum drm_connector_status ret = connector_status_disconnected; 839 enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
842 /* check if panel is valid */ 850 /* check if panel is valid */
843 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 851 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
844 ret = connector_status_connected; 852 ret = connector_status_connected;
845 853 /* don't fetch the edid from the vbios if ddc fails and runpm is
854 * enabled so we report disconnected.
855 */
856 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
857 ret = connector_status_disconnected;
846 } 858 }
847 859
848 /* check for edid as well */ 860 /* check for edid as well */
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1589 /* check if panel is valid */ 1601 /* check if panel is valid */
1590 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 1602 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
1591 ret = connector_status_connected; 1603 ret = connector_status_connected;
1604 /* don't fetch the edid from the vbios if ddc fails and runpm is
1605 * enabled so we report disconnected.
1606 */
1607 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
1608 ret = connector_status_disconnected;
1592 } 1609 }
1593 /* eDP is always DP */ 1610 /* eDP is always DP */
1594 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1611 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a3e7aed7e680..6f377de099f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 251
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 253{
254 int i, r = 0; 254 struct radeon_cs_reloc *reloc;
255 int r;
255 256
256 for (i = 0; i < p->nrelocs; i++) { 257 list_for_each_entry(reloc, &p->validated, tv.head) {
257 struct reservation_object *resv; 258 struct reservation_object *resv;
258 259
259 if (!p->relocs[i].robj) 260 resv = reloc->robj->tbo.resv;
260 continue;
261
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared); 262 reloc->tv.shared);
265
266 if (r) 263 if (r)
267 break; 264 return r;
268 } 265 }
269 return r; 266 return 0;
270} 267}
271 268
272/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 269/* XXX: note that this is called from the legacy UMS CS ioctl as well */
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 7784911d78ef..00fc59762e0d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
185 if (rdev->flags & RADEON_IS_AGP) 185 if (rdev->flags & RADEON_IS_AGP)
186 return false; 186 return false;
187 187
188 /*
189 * Older chips have a HW limitation, they can only generate 40 bits
190 * of address for "64-bit" MSIs which breaks on some platforms, notably
191 * IBM POWER servers, so we limit them
192 */
193 if (rdev->family < CHIP_BONAIRE) {
194 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
195 rdev->pdev->no_64bit_msi = 1;
196 }
197
188 /* force MSI on */ 198 /* force MSI on */
189 if (radeon_msi == 1) 199 if (radeon_msi == 1)
190 return true; 200 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8309b11e674d..03586763ee86 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
795 795
796 /* Get associated drm_crtc: */ 796 /* Get associated drm_crtc: */
797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
798 if (!drmcrtc)
799 return -EINVAL;
798 800
799 /* Helper routine in DRM core does all the work: */ 801 /* Helper routine in DRM core does all the work: */
800 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 802 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 99a960a4f302..4c0d786d5c7a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev,
213 if (!(rdev->flags & RADEON_IS_PCIE)) 213 if (!(rdev->flags & RADEON_IS_PCIE))
214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
215 215
216#ifdef CONFIG_X86_32
217 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
218 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
219 */
220 bo->flags &= ~RADEON_GEM_GTT_WC;
221#endif
222
216 radeon_ttm_placement_from_domain(bo, domain); 223 radeon_ttm_placement_from_domain(bo, domain);
217 /* Kernel allocation are uninterruptible */ 224 /* Kernel allocation are uninterruptible */
218 down_read(&rdev->pm.mclk_lock); 225 down_read(&rdev->pm.mclk_lock);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6f56471bd2a..752cdd2da89a 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -227,83 +227,83 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
227static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) 227static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
228{ 228{
229 switch (mbus_code) { 229 switch (mbus_code) {
230 case V4L2_MBUS_FMT_BGR565_2X8_BE: 230 case MEDIA_BUS_FMT_BGR565_2X8_BE:
231 case V4L2_MBUS_FMT_BGR565_2X8_LE: 231 case MEDIA_BUS_FMT_BGR565_2X8_LE:
232 case V4L2_MBUS_FMT_RGB565_2X8_BE: 232 case MEDIA_BUS_FMT_RGB565_2X8_BE:
233 case V4L2_MBUS_FMT_RGB565_2X8_LE: 233 case MEDIA_BUS_FMT_RGB565_2X8_LE:
234 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565; 234 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
235 cfg->mipi_dt = MIPI_DT_RGB565; 235 cfg->mipi_dt = MIPI_DT_RGB565;
236 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 236 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
237 break; 237 break;
238 case V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE: 238 case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
239 case V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE: 239 case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
240 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444; 240 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB444;
241 cfg->mipi_dt = MIPI_DT_RGB444; 241 cfg->mipi_dt = MIPI_DT_RGB444;
242 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 242 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
243 break; 243 break;
244 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: 244 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
245 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 245 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
246 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555; 246 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
247 cfg->mipi_dt = MIPI_DT_RGB555; 247 cfg->mipi_dt = MIPI_DT_RGB555;
248 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 248 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
249 break; 249 break;
250 case V4L2_MBUS_FMT_UYVY8_2X8: 250 case MEDIA_BUS_FMT_UYVY8_2X8:
251 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; 251 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
252 cfg->mipi_dt = MIPI_DT_YUV422; 252 cfg->mipi_dt = MIPI_DT_YUV422;
253 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 253 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
254 break; 254 break;
255 case V4L2_MBUS_FMT_YUYV8_2X8: 255 case MEDIA_BUS_FMT_YUYV8_2X8:
256 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; 256 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
257 cfg->mipi_dt = MIPI_DT_YUV422; 257 cfg->mipi_dt = MIPI_DT_YUV422;
258 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 258 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
259 break; 259 break;
260 case V4L2_MBUS_FMT_UYVY8_1X16: 260 case MEDIA_BUS_FMT_UYVY8_1X16:
261 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; 261 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
262 cfg->mipi_dt = MIPI_DT_YUV422; 262 cfg->mipi_dt = MIPI_DT_YUV422;
263 cfg->data_width = IPU_CSI_DATA_WIDTH_16; 263 cfg->data_width = IPU_CSI_DATA_WIDTH_16;
264 break; 264 break;
265 case V4L2_MBUS_FMT_YUYV8_1X16: 265 case MEDIA_BUS_FMT_YUYV8_1X16:
266 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; 266 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
267 cfg->mipi_dt = MIPI_DT_YUV422; 267 cfg->mipi_dt = MIPI_DT_YUV422;
268 cfg->data_width = IPU_CSI_DATA_WIDTH_16; 268 cfg->data_width = IPU_CSI_DATA_WIDTH_16;
269 break; 269 break;
270 case V4L2_MBUS_FMT_SBGGR8_1X8: 270 case MEDIA_BUS_FMT_SBGGR8_1X8:
271 case V4L2_MBUS_FMT_SGBRG8_1X8: 271 case MEDIA_BUS_FMT_SGBRG8_1X8:
272 case V4L2_MBUS_FMT_SGRBG8_1X8: 272 case MEDIA_BUS_FMT_SGRBG8_1X8:
273 case V4L2_MBUS_FMT_SRGGB8_1X8: 273 case MEDIA_BUS_FMT_SRGGB8_1X8:
274 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 274 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
275 cfg->mipi_dt = MIPI_DT_RAW8; 275 cfg->mipi_dt = MIPI_DT_RAW8;
276 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 276 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
277 break; 277 break;
278 case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8: 278 case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
279 case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8: 279 case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
280 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 280 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
281 case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8: 281 case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
282 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: 282 case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
283 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: 283 case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
284 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: 284 case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
285 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: 285 case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
286 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 286 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
287 cfg->mipi_dt = MIPI_DT_RAW10; 287 cfg->mipi_dt = MIPI_DT_RAW10;
288 cfg->data_width = IPU_CSI_DATA_WIDTH_8; 288 cfg->data_width = IPU_CSI_DATA_WIDTH_8;
289 break; 289 break;
290 case V4L2_MBUS_FMT_SBGGR10_1X10: 290 case MEDIA_BUS_FMT_SBGGR10_1X10:
291 case V4L2_MBUS_FMT_SGBRG10_1X10: 291 case MEDIA_BUS_FMT_SGBRG10_1X10:
292 case V4L2_MBUS_FMT_SGRBG10_1X10: 292 case MEDIA_BUS_FMT_SGRBG10_1X10:
293 case V4L2_MBUS_FMT_SRGGB10_1X10: 293 case MEDIA_BUS_FMT_SRGGB10_1X10:
294 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 294 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
295 cfg->mipi_dt = MIPI_DT_RAW10; 295 cfg->mipi_dt = MIPI_DT_RAW10;
296 cfg->data_width = IPU_CSI_DATA_WIDTH_10; 296 cfg->data_width = IPU_CSI_DATA_WIDTH_10;
297 break; 297 break;
298 case V4L2_MBUS_FMT_SBGGR12_1X12: 298 case MEDIA_BUS_FMT_SBGGR12_1X12:
299 case V4L2_MBUS_FMT_SGBRG12_1X12: 299 case MEDIA_BUS_FMT_SGBRG12_1X12:
300 case V4L2_MBUS_FMT_SGRBG12_1X12: 300 case MEDIA_BUS_FMT_SGRBG12_1X12:
301 case V4L2_MBUS_FMT_SRGGB12_1X12: 301 case MEDIA_BUS_FMT_SRGGB12_1X12:
302 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; 302 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
303 cfg->mipi_dt = MIPI_DT_RAW12; 303 cfg->mipi_dt = MIPI_DT_RAW12;
304 cfg->data_width = IPU_CSI_DATA_WIDTH_12; 304 cfg->data_width = IPU_CSI_DATA_WIDTH_12;
305 break; 305 break;
306 case V4L2_MBUS_FMT_JPEG_1X8: 306 case MEDIA_BUS_FMT_JPEG_1X8:
307 /* TODO */ 307 /* TODO */
308 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG; 308 cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_JPEG;
309 cfg->mipi_dt = MIPI_DT_RAW8; 309 cfg->mipi_dt = MIPI_DT_RAW8;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index e6d8e18dae97..6a58b6c723aa 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -641,9 +641,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
641 goto err_stop_hw; 641 goto err_stop_hw;
642 } 642 }
643 sd->hid_sensor_hub_client_devs[ 643 sd->hid_sensor_hub_client_devs[
644 sd->hid_sensor_client_cnt].id =
645 PLATFORM_DEVID_AUTO;
646 sd->hid_sensor_hub_client_devs[
647 sd->hid_sensor_client_cnt].name = name; 644 sd->hid_sensor_client_cnt].name = name;
648 sd->hid_sensor_hub_client_devs[ 645 sd->hid_sensor_hub_client_devs[
649 sd->hid_sensor_client_cnt].platform_data = 646 sd->hid_sensor_client_cnt].platform_data =
@@ -659,8 +656,9 @@ static int sensor_hub_probe(struct hid_device *hdev,
659 if (last_hsdev) 656 if (last_hsdev)
660 last_hsdev->end_collection_index = i; 657 last_hsdev->end_collection_index = i;
661 658
662 ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs, 659 ret = mfd_add_hotplug_devices(&hdev->dev,
663 sd->hid_sensor_client_cnt, NULL, 0, NULL); 660 sd->hid_sensor_hub_client_devs,
661 sd->hid_sensor_client_cnt);
664 if (ret < 0) 662 if (ret < 0)
665 goto err_stop_hw; 663 goto err_stop_hw;
666 664
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 747d54421e73..f09e70cafaf1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1095,7 +1095,7 @@ static int i2c_hid_resume(struct device *dev)
1095} 1095}
1096#endif 1096#endif
1097 1097
1098#ifdef CONFIG_PM_RUNTIME 1098#ifdef CONFIG_PM
1099static int i2c_hid_runtime_suspend(struct device *dev) 1099static int i2c_hid_runtime_suspend(struct device *dev)
1100{ 1100{
1101 struct i2c_client *client = to_i2c_client(dev); 1101 struct i2c_client *client = to_i2c_client(dev);
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
index bf0eace4cb67..4d5b682fc6af 100644
--- a/drivers/hsi/controllers/omap_ssi.c
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -555,7 +555,7 @@ static int __exit ssi_remove(struct platform_device *pd)
555 return 0; 555 return 0;
556} 556}
557 557
558#ifdef CONFIG_PM_RUNTIME 558#ifdef CONFIG_PM
559static int omap_ssi_runtime_suspend(struct device *dev) 559static int omap_ssi_runtime_suspend(struct device *dev)
560{ 560{
561 struct hsi_controller *ssi = dev_get_drvdata(dev); 561 struct hsi_controller *ssi = dev_get_drvdata(dev);
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 4c0b5820581e..d836cfe50513 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1260,7 +1260,7 @@ static int __exit ssi_port_remove(struct platform_device *pd)
1260 return 0; 1260 return 0;
1261} 1261}
1262 1262
1263#ifdef CONFIG_PM_RUNTIME 1263#ifdef CONFIG_PM
1264static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) 1264static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1265{ 1265{
1266 struct hsi_port *port = to_hsi_port(omap_port->dev); 1266 struct hsi_port *port = to_hsi_port(omap_port->dev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5286d7ce1f9e..6529c09c46f0 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1028,11 +1028,11 @@ config SENSORS_LM93
1028 will be called lm93. 1028 will be called lm93.
1029 1029
1030config SENSORS_LM95234 1030config SENSORS_LM95234
1031 tristate "National Semiconductor LM95234" 1031 tristate "National Semiconductor LM95234 and compatibles"
1032 depends on I2C 1032 depends on I2C
1033 help 1033 help
1034 If you say yes here you get support for the LM95234 temperature 1034 If you say yes here you get support for the LM95233 and LM95234
1035 sensor. 1035 temperature sensor chips.
1036 1036
1037 This driver can also be built as a module. If so, the module 1037 This driver can also be built as a module. If so, the module
1038 will be called lm95234. 1038 will be called lm95234.
@@ -1048,10 +1048,11 @@ config SENSORS_LM95241
1048 will be called lm95241. 1048 will be called lm95241.
1049 1049
1050config SENSORS_LM95245 1050config SENSORS_LM95245
1051 tristate "National Semiconductor LM95245 sensor chip" 1051 tristate "National Semiconductor LM95245 and compatibles"
1052 depends on I2C 1052 depends on I2C
1053 help 1053 help
1054 If you say yes here you get support for LM95245 sensor chip. 1054 If you say yes here you get support for LM95235 and LM95245
1055 temperature sensor chips.
1055 1056
1056 This driver can also be built as a module. If so, the module 1057 This driver can also be built as a module. If so, the module
1057 will be called lm95245. 1058 will be called lm95245.
@@ -1117,12 +1118,23 @@ config SENSORS_NCT6775
1117 help 1118 help
1118 If you say yes here you get support for the hardware monitoring 1119 If you say yes here you get support for the hardware monitoring
1119 functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D, 1120 functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D,
1120 NCT6791D and compatible Super-I/O chips. This driver replaces the 1121 NCT6791D, NCT6792D and compatible Super-I/O chips. This driver
1121 w83627ehf driver for NCT6775F and NCT6776F. 1122 replaces the w83627ehf driver for NCT6775F and NCT6776F.
1122 1123
1123 This driver can also be built as a module. If so, the module 1124 This driver can also be built as a module. If so, the module
1124 will be called nct6775. 1125 will be called nct6775.
1125 1126
1127config SENSORS_NCT7802
1128 tristate "Nuvoton NCT7802Y"
1129 depends on I2C
1130 select REGMAP_I2C
1131 help
1132 If you say yes here you get support for the Nuvoton NCT7802Y
1133 hardware monitoring chip.
1134
1135 This driver can also be built as a module. If so, the module
1136 will be called nct7802.
1137
1126config SENSORS_PCF8591 1138config SENSORS_PCF8591
1127 tristate "Philips PCF8591 ADC/DAC" 1139 tristate "Philips PCF8591 ADC/DAC"
1128 depends on I2C 1140 depends on I2C
@@ -1454,7 +1466,7 @@ config SENSORS_TMP401
1454 depends on I2C 1466 depends on I2C
1455 help 1467 help
1456 If you say yes here you get support for Texas Instruments TMP401, 1468 If you say yes here you get support for Texas Instruments TMP401,
1457 TMP411, TMP431, and TMP432 temperature sensor chips. 1469 TMP411, TMP431, TMP432 and TMP435 temperature sensor chips.
1458 1470
1459 This driver can also be built as a module. If so, the module 1471 This driver can also be built as a module. If so, the module
1460 will be called tmp401. 1472 will be called tmp401.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index c90a7611efaa..67280643bcf0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
118obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o 118obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
119obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o 119obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
120obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o 120obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
121obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
121obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o 122obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
122obj-$(CONFIG_SENSORS_PC87360) += pc87360.o 123obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
123obj-$(CONFIG_SENSORS_PC87427) += pc87427.o 124obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 6aac695b1688..9b55e673b67c 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
1084 if (ret) 1084 if (ret)
1085 goto clock_dis; 1085 goto clock_dis;
1086 1086
1087 data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, 1087 data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
1088 client->name, 1088 data, g762_groups);
1089 data,
1090 g762_groups);
1091 if (IS_ERR(data->hwmon_dev)) { 1089 if (IS_ERR(data->hwmon_dev)) {
1092 ret = PTR_ERR(data->hwmon_dev); 1090 ret = PTR_ERR(data->hwmon_dev);
1093 goto clock_dis; 1091 goto clock_dis;
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 4efa1734bdad..36abf814b8c7 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -79,7 +79,7 @@ static ssize_t show_fan_alarm(struct device *dev,
79{ 79{
80 struct gpio_fan_data *fan_data = dev_get_drvdata(dev); 80 struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
81 struct gpio_fan_alarm *alarm = fan_data->alarm; 81 struct gpio_fan_alarm *alarm = fan_data->alarm;
82 int value = gpio_get_value(alarm->gpio); 82 int value = gpio_get_value_cansleep(alarm->gpio);
83 83
84 if (alarm->active_low) 84 if (alarm->active_low)
85 value = !value; 85 value = !value;
@@ -131,7 +131,7 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
131 int i; 131 int i;
132 132
133 for (i = 0; i < fan_data->num_ctrl; i++) 133 for (i = 0; i < fan_data->num_ctrl; i++)
134 gpio_set_value(fan_data->ctrl[i], (ctrl_val >> i) & 1); 134 gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1);
135} 135}
136 136
137static int __get_fan_ctrl(struct gpio_fan_data *fan_data) 137static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
@@ -142,7 +142,7 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
142 for (i = 0; i < fan_data->num_ctrl; i++) { 142 for (i = 0; i < fan_data->num_ctrl; i++) {
143 int value; 143 int value;
144 144
145 value = gpio_get_value(fan_data->ctrl[i]); 145 value = gpio_get_value_cansleep(fan_data->ctrl[i]);
146 ctrl_val |= (value << i); 146 ctrl_val |= (value << i);
147 } 147 }
148 return ctrl_val; 148 return ctrl_val;
@@ -369,7 +369,8 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
369 if (err) 369 if (err)
370 return err; 370 return err;
371 371
372 err = gpio_direction_output(ctrl[i], gpio_get_value(ctrl[i])); 372 err = gpio_direction_output(ctrl[i],
373 gpio_get_value_cansleep(ctrl[i]));
373 if (err) 374 if (err)
374 return err; 375 return err;
375 } 376 }
@@ -549,6 +550,14 @@ static int gpio_fan_probe(struct platform_device *pdev)
549 return 0; 550 return 0;
550} 551}
551 552
553static void gpio_fan_shutdown(struct platform_device *pdev)
554{
555 struct gpio_fan_data *fan_data = dev_get_drvdata(&pdev->dev);
556
557 if (fan_data->ctrl)
558 set_fan_speed(fan_data, 0);
559}
560
552#ifdef CONFIG_PM_SLEEP 561#ifdef CONFIG_PM_SLEEP
553static int gpio_fan_suspend(struct device *dev) 562static int gpio_fan_suspend(struct device *dev)
554{ 563{
@@ -580,6 +589,7 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
580 589
581static struct platform_driver gpio_fan_driver = { 590static struct platform_driver gpio_fan_driver = {
582 .probe = gpio_fan_probe, 591 .probe = gpio_fan_probe,
592 .shutdown = gpio_fan_shutdown,
583 .driver = { 593 .driver = {
584 .name = "gpio-fan", 594 .name = "gpio-fan",
585 .pm = GPIO_FAN_PM, 595 .pm = GPIO_FAN_PM,
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 6a30eeea94be..7c2c7be182f2 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -74,9 +74,6 @@ struct platform_data {
74 u32 sensors_count; /* Total count of sensors from each group */ 74 u32 sensors_count; /* Total count of sensors from each group */
75}; 75};
76 76
77/* Platform device representing all the ibmpowernv sensors */
78static struct platform_device *pdevice;
79
80static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr, 77static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
81 char *buf) 78 char *buf)
82{ 79{
@@ -99,7 +96,7 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr,
99 return sprintf(buf, "%u\n", x); 96 return sprintf(buf, "%u\n", x);
100} 97}
101 98
102static int __init get_sensor_index_attr(const char *name, u32 *index, 99static int get_sensor_index_attr(const char *name, u32 *index,
103 char *attr) 100 char *attr)
104{ 101{
105 char *hash_pos = strchr(name, '#'); 102 char *hash_pos = strchr(name, '#');
@@ -136,7 +133,7 @@ static int __init get_sensor_index_attr(const char *name, u32 *index,
136 * which need to be mapped as fan2_input, temp1_max respectively before 133 * which need to be mapped as fan2_input, temp1_max respectively before
137 * populating them inside hwmon device class. 134 * populating them inside hwmon device class.
138 */ 135 */
139static int __init create_hwmon_attr_name(struct device *dev, enum sensors type, 136static int create_hwmon_attr_name(struct device *dev, enum sensors type,
140 const char *node_name, 137 const char *node_name,
141 char *hwmon_attr_name) 138 char *hwmon_attr_name)
142{ 139{
@@ -172,7 +169,7 @@ static int __init create_hwmon_attr_name(struct device *dev, enum sensors type,
172 return 0; 169 return 0;
173} 170}
174 171
175static int __init populate_attr_groups(struct platform_device *pdev) 172static int populate_attr_groups(struct platform_device *pdev)
176{ 173{
177 struct platform_data *pdata = platform_get_drvdata(pdev); 174 struct platform_data *pdata = platform_get_drvdata(pdev);
178 const struct attribute_group **pgroups = pdata->attr_groups; 175 const struct attribute_group **pgroups = pdata->attr_groups;
@@ -180,11 +177,6 @@ static int __init populate_attr_groups(struct platform_device *pdev)
180 enum sensors type; 177 enum sensors type;
181 178
182 opal = of_find_node_by_path("/ibm,opal/sensors"); 179 opal = of_find_node_by_path("/ibm,opal/sensors");
183 if (!opal) {
184 dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
185 return -ENODEV;
186 }
187
188 for_each_child_of_node(opal, np) { 180 for_each_child_of_node(opal, np) {
189 if (np->name == NULL) 181 if (np->name == NULL)
190 continue; 182 continue;
@@ -221,7 +213,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
221 * to the name required by the higher 'hwmon' driver like fan1_input, temp1_max 213 * to the name required by the higher 'hwmon' driver like fan1_input, temp1_max
222 * etc.. 214 * etc..
223 */ 215 */
224static int __init create_device_attrs(struct platform_device *pdev) 216static int create_device_attrs(struct platform_device *pdev)
225{ 217{
226 struct platform_data *pdata = platform_get_drvdata(pdev); 218 struct platform_data *pdata = platform_get_drvdata(pdev);
227 const struct attribute_group **pgroups = pdata->attr_groups; 219 const struct attribute_group **pgroups = pdata->attr_groups;
@@ -280,7 +272,7 @@ exit_put_node:
280 return err; 272 return err;
281} 273}
282 274
283static int __init ibmpowernv_probe(struct platform_device *pdev) 275static int ibmpowernv_probe(struct platform_device *pdev)
284{ 276{
285 struct platform_data *pdata; 277 struct platform_data *pdata;
286 struct device *hwmon_dev; 278 struct device *hwmon_dev;
@@ -309,57 +301,25 @@ static int __init ibmpowernv_probe(struct platform_device *pdev)
309 return PTR_ERR_OR_ZERO(hwmon_dev); 301 return PTR_ERR_OR_ZERO(hwmon_dev);
310} 302}
311 303
312static struct platform_driver ibmpowernv_driver = { 304static const struct platform_device_id opal_sensor_driver_ids[] = {
313 .driver = { 305 {
314 .owner = THIS_MODULE, 306 .name = "opal-sensor",
315 .name = DRVNAME,
316 }, 307 },
308 { }
317}; 309};
310MODULE_DEVICE_TABLE(platform, opal_sensor_driver_ids);
318 311
319static int __init ibmpowernv_init(void) 312static struct platform_driver ibmpowernv_driver = {
320{ 313 .probe = ibmpowernv_probe,
321 int err; 314 .id_table = opal_sensor_driver_ids,
322 315 .driver = {
323 pdevice = platform_device_alloc(DRVNAME, 0); 316 .owner = THIS_MODULE,
324 if (!pdevice) { 317 .name = DRVNAME,
325 pr_err("Device allocation failed\n"); 318 },
326 err = -ENOMEM; 319};
327 goto exit;
328 }
329
330 err = platform_device_add(pdevice);
331 if (err) {
332 pr_err("Device addition failed (%d)\n", err);
333 goto exit_device_put;
334 }
335
336 err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
337 if (err) {
338 if (err != -ENODEV)
339 pr_err("Platform driver probe failed (%d)\n", err);
340
341 goto exit_device_del;
342 }
343
344 return 0;
345
346exit_device_del:
347 platform_device_del(pdevice);
348exit_device_put:
349 platform_device_put(pdevice);
350exit:
351 return err;
352}
353 320
354static void __exit ibmpowernv_exit(void) 321module_platform_driver(ibmpowernv_driver);
355{
356 platform_driver_unregister(&ibmpowernv_driver);
357 platform_device_unregister(pdevice);
358}
359 322
360MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>"); 323MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
361MODULE_DESCRIPTION("IBM POWERNV platform sensors"); 324MODULE_DESCRIPTION("IBM POWERNV platform sensors");
362MODULE_LICENSE("GPL"); 325MODULE_LICENSE("GPL");
363
364module_init(ibmpowernv_init);
365module_exit(ibmpowernv_exit);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 14c82daab019..980175628563 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -63,7 +63,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
63 struct iio_hwmon_state *st; 63 struct iio_hwmon_state *st;
64 struct sensor_device_attribute *a; 64 struct sensor_device_attribute *a;
65 int ret, i; 65 int ret, i;
66 int in_i = 1, temp_i = 1, curr_i = 1; 66 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
67 enum iio_chan_type type; 67 enum iio_chan_type type;
68 struct iio_channel *channels; 68 struct iio_channel *channels;
69 const char *name = "iio_hwmon"; 69 const char *name = "iio_hwmon";
@@ -123,6 +123,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
123 "curr%d_input", 123 "curr%d_input",
124 curr_i++); 124 curr_i++);
125 break; 125 break;
126 case IIO_HUMIDITYRELATIVE:
127 a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
128 "humidity%d_input",
129 humidity_i++);
130 break;
126 default: 131 default:
127 ret = -EINVAL; 132 ret = -EINVAL;
128 goto error_release_channels; 133 goto error_release_channels;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index bfd3f3eeabcd..e01feba909c3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -223,6 +223,7 @@ static int ina2xx_probe(struct i2c_client *client,
223 struct device *hwmon_dev; 223 struct device *hwmon_dev;
224 long shunt = 10000; /* default shunt value 10mOhms */ 224 long shunt = 10000; /* default shunt value 10mOhms */
225 u32 val; 225 u32 val;
226 int ret;
226 227
227 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) 228 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
228 return -ENODEV; 229 return -ENODEV;
@@ -247,12 +248,25 @@ static int ina2xx_probe(struct i2c_client *client,
247 data->config = &ina2xx_config[data->kind]; 248 data->config = &ina2xx_config[data->kind];
248 249
249 /* device configuration */ 250 /* device configuration */
250 i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, 251 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
251 data->config->config_default); 252 data->config->config_default);
252 /* set current LSB to 1mA, shunt is in uOhms */ 253 if (ret < 0) {
253 /* (equation 13 in datasheet) */ 254 dev_err(dev,
254 i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, 255 "error writing to the config register: %d", ret);
255 data->config->calibration_factor / shunt); 256 return -ENODEV;
257 }
258
259 /*
260 * Set current LSB to 1mA, shunt is in uOhms
261 * (equation 13 in datasheet).
262 */
263 ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
264 data->config->calibration_factor / shunt);
265 if (ret < 0) {
266 dev_err(dev,
267 "error writing to the calibration register: %d", ret);
268 return -ENODEV;
269 }
256 270
257 data->client = client; 271 data->client = client;
258 mutex_init(&data->update_lock); 272 mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d16dbb33a531..6753fd940c76 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -44,6 +44,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
44 g751, 44 g751,
45 lm75, 45 lm75,
46 lm75a, 46 lm75a,
47 lm75b,
47 max6625, 48 max6625,
48 max6626, 49 max6626,
49 mcp980x, 50 mcp980x,
@@ -233,6 +234,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
233 data->resolution = 9; 234 data->resolution = 9;
234 data->sample_time = HZ / 2; 235 data->sample_time = HZ / 2;
235 break; 236 break;
237 case lm75b:
238 data->resolution = 11;
239 data->sample_time = HZ / 4;
240 break;
236 case max6625: 241 case max6625:
237 data->resolution = 9; 242 data->resolution = 9;
238 data->sample_time = HZ / 4; 243 data->sample_time = HZ / 4;
@@ -322,6 +327,7 @@ static const struct i2c_device_id lm75_ids[] = {
322 { "g751", g751, }, 327 { "g751", g751, },
323 { "lm75", lm75, }, 328 { "lm75", lm75, },
324 { "lm75a", lm75a, }, 329 { "lm75a", lm75a, },
330 { "lm75b", lm75b, },
325 { "max6625", max6625, }, 331 { "max6625", max6625, },
326 { "max6626", max6626, }, 332 { "max6626", max6626, },
327 { "mcp980x", mcp980x, }, 333 { "mcp980x", mcp980x, },
@@ -409,6 +415,12 @@ static int lm75_detect(struct i2c_client *new_client,
409 || i2c_smbus_read_byte_data(new_client, 7) != os) 415 || i2c_smbus_read_byte_data(new_client, 7) != os)
410 return -ENODEV; 416 return -ENODEV;
411 } 417 }
418 /*
419 * It is very unlikely that this is a LM75 if both
420 * hysteresis and temperature limit registers are 0.
421 */
422 if (hyst == 0 && os == 0)
423 return -ENODEV;
412 424
413 /* Addresses cycling */ 425 /* Addresses cycling */
414 for (i = 8; i <= 248; i += 40) { 426 for (i = 8; i <= 248; i += 40) {
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 411202bdaf6b..8796de39ff9b 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Texas Instruments / National Semiconductor LM95234 2 * Driver for Texas Instruments / National Semiconductor LM95234
3 * 3 *
4 * Copyright (c) 2013 Guenter Roeck <linux@roeck-us.net> 4 * Copyright (c) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
5 * 5 *
6 * Derived from lm95241.c 6 * Derived from lm95241.c
7 * Copyright (C) 2008, 2010 Davide Rizzo <elpa.rizzo@gmail.com> 7 * Copyright (C) 2008, 2010 Davide Rizzo <elpa.rizzo@gmail.com>
@@ -30,7 +30,10 @@
30 30
31#define DRVNAME "lm95234" 31#define DRVNAME "lm95234"
32 32
33static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END }; 33enum chips { lm95233, lm95234 };
34
35static const unsigned short normal_i2c[] = {
36 0x18, 0x2a, 0x2b, 0x4d, 0x4e, I2C_CLIENT_END };
34 37
35/* LM95234 registers */ 38/* LM95234 registers */
36#define LM95234_REG_MAN_ID 0xFE 39#define LM95234_REG_MAN_ID 0xFE
@@ -53,11 +56,13 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END };
53#define LM95234_REG_TCRIT_HYST 0x5a 56#define LM95234_REG_TCRIT_HYST 0x5a
54 57
55#define NATSEMI_MAN_ID 0x01 58#define NATSEMI_MAN_ID 0x01
59#define LM95233_CHIP_ID 0x89
56#define LM95234_CHIP_ID 0x79 60#define LM95234_CHIP_ID 0x79
57 61
58/* Client data (each client gets its own) */ 62/* Client data (each client gets its own) */
59struct lm95234_data { 63struct lm95234_data {
60 struct i2c_client *client; 64 struct i2c_client *client;
65 const struct attribute_group *groups[3];
61 struct mutex update_lock; 66 struct mutex update_lock;
62 unsigned long last_updated, interval; /* in jiffies */ 67 unsigned long last_updated, interval; /* in jiffies */
63 bool valid; /* false until following fields are valid */ 68 bool valid; /* false until following fields are valid */
@@ -564,35 +569,23 @@ static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
564static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval, 569static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
565 set_interval); 570 set_interval);
566 571
567static struct attribute *lm95234_attrs[] = { 572static struct attribute *lm95234_common_attrs[] = {
568 &sensor_dev_attr_temp1_input.dev_attr.attr, 573 &sensor_dev_attr_temp1_input.dev_attr.attr,
569 &sensor_dev_attr_temp2_input.dev_attr.attr, 574 &sensor_dev_attr_temp2_input.dev_attr.attr,
570 &sensor_dev_attr_temp3_input.dev_attr.attr, 575 &sensor_dev_attr_temp3_input.dev_attr.attr,
571 &sensor_dev_attr_temp4_input.dev_attr.attr,
572 &sensor_dev_attr_temp5_input.dev_attr.attr,
573 &sensor_dev_attr_temp2_fault.dev_attr.attr, 576 &sensor_dev_attr_temp2_fault.dev_attr.attr,
574 &sensor_dev_attr_temp3_fault.dev_attr.attr, 577 &sensor_dev_attr_temp3_fault.dev_attr.attr,
575 &sensor_dev_attr_temp4_fault.dev_attr.attr,
576 &sensor_dev_attr_temp5_fault.dev_attr.attr,
577 &sensor_dev_attr_temp2_type.dev_attr.attr, 578 &sensor_dev_attr_temp2_type.dev_attr.attr,
578 &sensor_dev_attr_temp3_type.dev_attr.attr, 579 &sensor_dev_attr_temp3_type.dev_attr.attr,
579 &sensor_dev_attr_temp4_type.dev_attr.attr,
580 &sensor_dev_attr_temp5_type.dev_attr.attr,
581 &sensor_dev_attr_temp1_max.dev_attr.attr, 580 &sensor_dev_attr_temp1_max.dev_attr.attr,
582 &sensor_dev_attr_temp2_max.dev_attr.attr, 581 &sensor_dev_attr_temp2_max.dev_attr.attr,
583 &sensor_dev_attr_temp3_max.dev_attr.attr, 582 &sensor_dev_attr_temp3_max.dev_attr.attr,
584 &sensor_dev_attr_temp4_max.dev_attr.attr,
585 &sensor_dev_attr_temp5_max.dev_attr.attr,
586 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, 583 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
587 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, 584 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
588 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, 585 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
589 &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
590 &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
591 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, 586 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
592 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, 587 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
593 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, 588 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
594 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
595 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
596 &sensor_dev_attr_temp2_crit.dev_attr.attr, 589 &sensor_dev_attr_temp2_crit.dev_attr.attr,
597 &sensor_dev_attr_temp3_crit.dev_attr.attr, 590 &sensor_dev_attr_temp3_crit.dev_attr.attr,
598 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 591 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
@@ -601,18 +594,44 @@ static struct attribute *lm95234_attrs[] = {
601 &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, 594 &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
602 &sensor_dev_attr_temp2_offset.dev_attr.attr, 595 &sensor_dev_attr_temp2_offset.dev_attr.attr,
603 &sensor_dev_attr_temp3_offset.dev_attr.attr, 596 &sensor_dev_attr_temp3_offset.dev_attr.attr,
597 &dev_attr_update_interval.attr,
598 NULL
599};
600
601static const struct attribute_group lm95234_common_group = {
602 .attrs = lm95234_common_attrs,
603};
604
605static struct attribute *lm95234_attrs[] = {
606 &sensor_dev_attr_temp4_input.dev_attr.attr,
607 &sensor_dev_attr_temp5_input.dev_attr.attr,
608 &sensor_dev_attr_temp4_fault.dev_attr.attr,
609 &sensor_dev_attr_temp5_fault.dev_attr.attr,
610 &sensor_dev_attr_temp4_type.dev_attr.attr,
611 &sensor_dev_attr_temp5_type.dev_attr.attr,
612 &sensor_dev_attr_temp4_max.dev_attr.attr,
613 &sensor_dev_attr_temp5_max.dev_attr.attr,
614 &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
615 &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
616 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
617 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
604 &sensor_dev_attr_temp4_offset.dev_attr.attr, 618 &sensor_dev_attr_temp4_offset.dev_attr.attr,
605 &sensor_dev_attr_temp5_offset.dev_attr.attr, 619 &sensor_dev_attr_temp5_offset.dev_attr.attr,
606 &dev_attr_update_interval.attr,
607 NULL 620 NULL
608}; 621};
609ATTRIBUTE_GROUPS(lm95234); 622
623static const struct attribute_group lm95234_group = {
624 .attrs = lm95234_attrs,
625};
610 626
611static int lm95234_detect(struct i2c_client *client, 627static int lm95234_detect(struct i2c_client *client,
612 struct i2c_board_info *info) 628 struct i2c_board_info *info)
613{ 629{
614 struct i2c_adapter *adapter = client->adapter; 630 struct i2c_adapter *adapter = client->adapter;
631 int address = client->addr;
632 u8 config_mask, model_mask;
615 int mfg_id, chip_id, val; 633 int mfg_id, chip_id, val;
634 const char *name;
616 635
617 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 636 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
618 return -ENODEV; 637 return -ENODEV;
@@ -622,15 +641,31 @@ static int lm95234_detect(struct i2c_client *client,
622 return -ENODEV; 641 return -ENODEV;
623 642
624 chip_id = i2c_smbus_read_byte_data(client, LM95234_REG_CHIP_ID); 643 chip_id = i2c_smbus_read_byte_data(client, LM95234_REG_CHIP_ID);
625 if (chip_id != LM95234_CHIP_ID) 644 switch (chip_id) {
645 case LM95233_CHIP_ID:
646 if (address != 0x18 && address != 0x2a && address != 0x2b)
647 return -ENODEV;
648 config_mask = 0xbf;
649 model_mask = 0xf9;
650 name = "lm95233";
651 break;
652 case LM95234_CHIP_ID:
653 if (address != 0x18 && address != 0x4d && address != 0x4e)
654 return -ENODEV;
655 config_mask = 0xbc;
656 model_mask = 0xe1;
657 name = "lm95234";
658 break;
659 default:
626 return -ENODEV; 660 return -ENODEV;
661 }
627 662
628 val = i2c_smbus_read_byte_data(client, LM95234_REG_STATUS); 663 val = i2c_smbus_read_byte_data(client, LM95234_REG_STATUS);
629 if (val & 0x30) 664 if (val & 0x30)
630 return -ENODEV; 665 return -ENODEV;
631 666
632 val = i2c_smbus_read_byte_data(client, LM95234_REG_CONFIG); 667 val = i2c_smbus_read_byte_data(client, LM95234_REG_CONFIG);
633 if (val & 0xbc) 668 if (val & config_mask)
634 return -ENODEV; 669 return -ENODEV;
635 670
636 val = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE); 671 val = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
@@ -638,14 +673,14 @@ static int lm95234_detect(struct i2c_client *client,
638 return -ENODEV; 673 return -ENODEV;
639 674
640 val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL); 675 val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL);
641 if (val & 0xe1) 676 if (val & model_mask)
642 return -ENODEV; 677 return -ENODEV;
643 678
644 val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL_STS); 679 val = i2c_smbus_read_byte_data(client, LM95234_REG_REM_MODEL_STS);
645 if (val & 0xe1) 680 if (val & model_mask)
646 return -ENODEV; 681 return -ENODEV;
647 682
648 strlcpy(info->type, "lm95234", I2C_NAME_SIZE); 683 strlcpy(info->type, name, I2C_NAME_SIZE);
649 return 0; 684 return 0;
650} 685}
651 686
@@ -698,15 +733,19 @@ static int lm95234_probe(struct i2c_client *client,
698 if (err < 0) 733 if (err < 0)
699 return err; 734 return err;
700 735
736 data->groups[0] = &lm95234_common_group;
737 if (id->driver_data == lm95234)
738 data->groups[1] = &lm95234_group;
739
701 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 740 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
702 data, 741 data, data->groups);
703 lm95234_groups);
704 return PTR_ERR_OR_ZERO(hwmon_dev); 742 return PTR_ERR_OR_ZERO(hwmon_dev);
705} 743}
706 744
707/* Driver data (common to all clients) */ 745/* Driver data (common to all clients) */
708static const struct i2c_device_id lm95234_id[] = { 746static const struct i2c_device_id lm95234_id[] = {
709 { "lm95234", 0 }, 747 { "lm95233", lm95233 },
748 { "lm95234", lm95234 },
710 { } 749 { }
711}; 750};
712MODULE_DEVICE_TABLE(i2c, lm95234_id); 751MODULE_DEVICE_TABLE(i2c, lm95234_id);
@@ -725,5 +764,5 @@ static struct i2c_driver lm95234_driver = {
725module_i2c_driver(lm95234_driver); 764module_i2c_driver(lm95234_driver);
726 765
727MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); 766MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
728MODULE_DESCRIPTION("LM95234 sensor driver"); 767MODULE_DESCRIPTION("LM95233/LM95234 sensor driver");
729MODULE_LICENSE("GPL"); 768MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 0ae0dfdafdff..e7aef4561c83 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -1,10 +1,8 @@
1/* 1/*
2 * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com> 2 * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com>
3 * 3 *
4 * The LM95245 is a sensor chip made by National Semiconductors. 4 * The LM95245 is a sensor chip made by TI / National Semiconductor.
5 * It reports up to two temperatures (its own plus an external one). 5 * It reports up to two temperatures (its own plus an external one).
6 * Complete datasheet can be obtained from National's website at:
7 * http://www.national.com/ds.cgi/LM/LM95245.pdf
8 * 6 *
9 * This driver is based on lm95241.c 7 * This driver is based on lm95241.c
10 * 8 *
@@ -34,8 +32,6 @@
34#include <linux/mutex.h> 32#include <linux/mutex.h>
35#include <linux/sysfs.h> 33#include <linux/sysfs.h>
36 34
37#define DEVNAME "lm95245"
38
39static const unsigned short normal_i2c[] = { 35static const unsigned short normal_i2c[] = {
40 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END }; 36 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
41 37
@@ -98,7 +94,8 @@ static const unsigned short normal_i2c[] = {
98#define STATUS1_LOC 0x01 94#define STATUS1_LOC 0x01
99 95
100#define MANUFACTURER_ID 0x01 96#define MANUFACTURER_ID 0x01
101#define DEFAULT_REVISION 0xB3 97#define LM95235_REVISION 0xB1
98#define LM95245_REVISION 0xB3
102 99
103static const u8 lm95245_reg_address[] = { 100static const u8 lm95245_reg_address[] = {
104 LM95245_REG_R_LOCAL_TEMPH_S, 101 LM95245_REG_R_LOCAL_TEMPH_S,
@@ -427,17 +424,32 @@ static int lm95245_detect(struct i2c_client *new_client,
427 struct i2c_board_info *info) 424 struct i2c_board_info *info)
428{ 425{
429 struct i2c_adapter *adapter = new_client->adapter; 426 struct i2c_adapter *adapter = new_client->adapter;
427 int address = new_client->addr;
428 const char *name;
429 int rev, id;
430 430
431 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 431 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
432 return -ENODEV; 432 return -ENODEV;
433 433
434 if (i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID) 434 id = i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID);
435 != MANUFACTURER_ID 435 if (id != MANUFACTURER_ID)
436 || i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID)
437 != DEFAULT_REVISION)
438 return -ENODEV; 436 return -ENODEV;
439 437
440 strlcpy(info->type, DEVNAME, I2C_NAME_SIZE); 438 rev = i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID);
439 switch (rev) {
440 case LM95235_REVISION:
441 if (address != 0x18 && address != 0x29 && address != 0x4c)
442 return -ENODEV;
443 name = "lm95235";
444 break;
445 case LM95245_REVISION:
446 name = "lm95245";
447 break;
448 default:
449 return -ENODEV;
450 }
451
452 strlcpy(info->type, name, I2C_NAME_SIZE);
441 return 0; 453 return 0;
442} 454}
443 455
@@ -484,7 +496,8 @@ static int lm95245_probe(struct i2c_client *client,
484 496
485/* Driver data (common to all clients) */ 497/* Driver data (common to all clients) */
486static const struct i2c_device_id lm95245_id[] = { 498static const struct i2c_device_id lm95245_id[] = {
487 { DEVNAME, 0 }, 499 { "lm95235", 0 },
500 { "lm95245", 0 },
488 { } 501 { }
489}; 502};
490MODULE_DEVICE_TABLE(i2c, lm95245_id); 503MODULE_DEVICE_TABLE(i2c, lm95245_id);
@@ -492,7 +505,7 @@ MODULE_DEVICE_TABLE(i2c, lm95245_id);
492static struct i2c_driver lm95245_driver = { 505static struct i2c_driver lm95245_driver = {
493 .class = I2C_CLASS_HWMON, 506 .class = I2C_CLASS_HWMON,
494 .driver = { 507 .driver = {
495 .name = DEVNAME, 508 .name = "lm95245",
496 }, 509 },
497 .probe = lm95245_probe, 510 .probe = lm95245_probe,
498 .id_table = lm95245_id, 511 .id_table = lm95245_id,
@@ -503,5 +516,5 @@ static struct i2c_driver lm95245_driver = {
503module_i2c_driver(lm95245_driver); 516module_i2c_driver(lm95245_driver);
504 517
505MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>"); 518MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
506MODULE_DESCRIPTION("LM95245 sensor driver"); 519MODULE_DESCRIPTION("LM95235/LM95245 sensor driver");
507MODULE_LICENSE("GPL"); 520MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 504cbddbdd90..dc0df57200cd 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -38,6 +38,7 @@
38 * nct6776f 9 5 3 6+3 0xc330 0xc1 0x5ca3 38 * nct6776f 9 5 3 6+3 0xc330 0xc1 0x5ca3
39 * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3 39 * nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
40 * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3 40 * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3
41 * nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3
41 * 42 *
42 * #temp lists the number of monitored temperature sources (first value) plus 43 * #temp lists the number of monitored temperature sources (first value) plus
43 * the number of directly connectable temperature sensors (second value). 44 * the number of directly connectable temperature sensors (second value).
@@ -61,7 +62,7 @@
61 62
62#define USE_ALTERNATE 63#define USE_ALTERNATE
63 64
64enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791 }; 65enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 };
65 66
66/* used to set data->name = nct6775_device_names[data->sio_kind] */ 67/* used to set data->name = nct6775_device_names[data->sio_kind] */
67static const char * const nct6775_device_names[] = { 68static const char * const nct6775_device_names[] = {
@@ -70,6 +71,7 @@ static const char * const nct6775_device_names[] = {
70 "nct6776", 71 "nct6776",
71 "nct6779", 72 "nct6779",
72 "nct6791", 73 "nct6791",
74 "nct6792",
73}; 75};
74 76
75static unsigned short force_id; 77static unsigned short force_id;
@@ -100,6 +102,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
100#define SIO_NCT6776_ID 0xc330 102#define SIO_NCT6776_ID 0xc330
101#define SIO_NCT6779_ID 0xc560 103#define SIO_NCT6779_ID 0xc560
102#define SIO_NCT6791_ID 0xc800 104#define SIO_NCT6791_ID 0xc800
105#define SIO_NCT6792_ID 0xc910
103#define SIO_ID_MASK 0xFFF0 106#define SIO_ID_MASK 0xFFF0
104 107
105enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; 108enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -529,6 +532,12 @@ static const s8 NCT6791_ALARM_BITS[] = {
529 4, 5, 13, -1, -1, -1, /* temp1..temp6 */ 532 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
530 12, 9 }; /* intrusion0, intrusion1 */ 533 12, 9 }; /* intrusion0, intrusion1 */
531 534
535/* NCT6792 specific data */
536
537static const u16 NCT6792_REG_TEMP_MON[] = {
538 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d };
539static const u16 NCT6792_REG_BEEP[NUM_REG_BEEP] = {
540 0xb2, 0xb3, 0xb4, 0xb5, 0xbf };
532 541
533/* NCT6102D/NCT6106D specific data */ 542/* NCT6102D/NCT6106D specific data */
534 543
@@ -1043,13 +1052,14 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
1043 reg == 0x73 || reg == 0x75 || reg == 0x77; 1052 reg == 0x73 || reg == 0x75 || reg == 0x77;
1044 case nct6779: 1053 case nct6779:
1045 case nct6791: 1054 case nct6791:
1055 case nct6792:
1046 return reg == 0x150 || reg == 0x153 || reg == 0x155 || 1056 return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
1047 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || 1057 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
1048 reg == 0x402 || 1058 reg == 0x402 ||
1049 reg == 0x63a || reg == 0x63c || reg == 0x63e || 1059 reg == 0x63a || reg == 0x63c || reg == 0x63e ||
1050 reg == 0x640 || reg == 0x642 || 1060 reg == 0x640 || reg == 0x642 ||
1051 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || 1061 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
1052 reg == 0x7b; 1062 reg == 0x7b || reg == 0x7d;
1053 } 1063 }
1054 return false; 1064 return false;
1055} 1065}
@@ -1063,6 +1073,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
1063static inline void nct6775_set_bank(struct nct6775_data *data, u16 reg) 1073static inline void nct6775_set_bank(struct nct6775_data *data, u16 reg)
1064{ 1074{
1065 u8 bank = reg >> 8; 1075 u8 bank = reg >> 8;
1076
1066 if (data->bank != bank) { 1077 if (data->bank != bank) {
1067 outb_p(NCT6775_REG_BANK, data->addr + ADDR_REG_OFFSET); 1078 outb_p(NCT6775_REG_BANK, data->addr + ADDR_REG_OFFSET);
1068 outb_p(bank, data->addr + DATA_REG_OFFSET); 1079 outb_p(bank, data->addr + DATA_REG_OFFSET);
@@ -1300,6 +1311,7 @@ static void nct6775_update_pwm(struct device *dev)
1300 if (!data->target_speed_tolerance[i] || 1311 if (!data->target_speed_tolerance[i] ||
1301 data->pwm_enable[i] == speed_cruise) { 1312 data->pwm_enable[i] == speed_cruise) {
1302 u8 t = fanmodecfg & 0x0f; 1313 u8 t = fanmodecfg & 0x0f;
1314
1303 if (data->REG_TOLERANCE_H) { 1315 if (data->REG_TOLERANCE_H) {
1304 t |= (nct6775_read_value(data, 1316 t |= (nct6775_read_value(data,
1305 data->REG_TOLERANCE_H[i]) & 0x70) >> 1; 1317 data->REG_TOLERANCE_H[i]) & 0x70) >> 1;
@@ -1391,6 +1403,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
1391 case nct6106: 1403 case nct6106:
1392 case nct6779: 1404 case nct6779:
1393 case nct6791: 1405 case nct6791:
1406 case nct6792:
1394 reg = nct6775_read_value(data, 1407 reg = nct6775_read_value(data,
1395 data->REG_CRITICAL_PWM_ENABLE[i]); 1408 data->REG_CRITICAL_PWM_ENABLE[i]);
1396 if (reg & data->CRITICAL_PWM_ENABLE_MASK) 1409 if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -1473,6 +1486,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
1473 data->alarms = 0; 1486 data->alarms = 0;
1474 for (i = 0; i < NUM_REG_ALARM; i++) { 1487 for (i = 0; i < NUM_REG_ALARM; i++) {
1475 u8 alarm; 1488 u8 alarm;
1489
1476 if (!data->REG_ALARM[i]) 1490 if (!data->REG_ALARM[i])
1477 continue; 1491 continue;
1478 alarm = nct6775_read_value(data, data->REG_ALARM[i]); 1492 alarm = nct6775_read_value(data, data->REG_ALARM[i]);
@@ -1482,6 +1496,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
1482 data->beeps = 0; 1496 data->beeps = 0;
1483 for (i = 0; i < NUM_REG_BEEP; i++) { 1497 for (i = 0; i < NUM_REG_BEEP; i++) {
1484 u8 beep; 1498 u8 beep;
1499
1485 if (!data->REG_BEEP[i]) 1500 if (!data->REG_BEEP[i])
1486 continue; 1501 continue;
1487 beep = nct6775_read_value(data, data->REG_BEEP[i]); 1502 beep = nct6775_read_value(data, data->REG_BEEP[i]);
@@ -1504,8 +1519,9 @@ show_in_reg(struct device *dev, struct device_attribute *attr, char *buf)
1504{ 1519{
1505 struct nct6775_data *data = nct6775_update_device(dev); 1520 struct nct6775_data *data = nct6775_update_device(dev);
1506 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); 1521 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
1507 int nr = sattr->nr;
1508 int index = sattr->index; 1522 int index = sattr->index;
1523 int nr = sattr->nr;
1524
1509 return sprintf(buf, "%ld\n", in_from_reg(data->in[nr][index], nr)); 1525 return sprintf(buf, "%ld\n", in_from_reg(data->in[nr][index], nr));
1510} 1526}
1511 1527
@@ -1515,10 +1531,12 @@ store_in_reg(struct device *dev, struct device_attribute *attr, const char *buf,
1515{ 1531{
1516 struct nct6775_data *data = dev_get_drvdata(dev); 1532 struct nct6775_data *data = dev_get_drvdata(dev);
1517 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); 1533 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
1518 int nr = sattr->nr;
1519 int index = sattr->index; 1534 int index = sattr->index;
1535 int nr = sattr->nr;
1520 unsigned long val; 1536 unsigned long val;
1521 int err = kstrtoul(buf, 10, &val); 1537 int err;
1538
1539 err = kstrtoul(buf, 10, &val);
1522 if (err < 0) 1540 if (err < 0)
1523 return err; 1541 return err;
1524 mutex_lock(&data->update_lock); 1542 mutex_lock(&data->update_lock);
@@ -1535,6 +1553,7 @@ show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
1535 struct nct6775_data *data = nct6775_update_device(dev); 1553 struct nct6775_data *data = nct6775_update_device(dev);
1536 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1554 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1537 int nr = data->ALARM_BITS[sattr->index]; 1555 int nr = data->ALARM_BITS[sattr->index];
1556
1538 return sprintf(buf, "%u\n", 1557 return sprintf(buf, "%u\n",
1539 (unsigned int)((data->alarms >> nr) & 0x01)); 1558 (unsigned int)((data->alarms >> nr) & 0x01));
1540} 1559}
@@ -1570,6 +1589,7 @@ show_temp_alarm(struct device *dev, struct device_attribute *attr, char *buf)
1570 nr = find_temp_source(data, sattr->index, data->num_temp_alarms); 1589 nr = find_temp_source(data, sattr->index, data->num_temp_alarms);
1571 if (nr >= 0) { 1590 if (nr >= 0) {
1572 int bit = data->ALARM_BITS[nr + TEMP_ALARM_BASE]; 1591 int bit = data->ALARM_BITS[nr + TEMP_ALARM_BASE];
1592
1573 alarm = (data->alarms >> bit) & 0x01; 1593 alarm = (data->alarms >> bit) & 0x01;
1574 } 1594 }
1575 return sprintf(buf, "%u\n", alarm); 1595 return sprintf(buf, "%u\n", alarm);
@@ -1595,8 +1615,9 @@ store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
1595 int nr = data->BEEP_BITS[sattr->index]; 1615 int nr = data->BEEP_BITS[sattr->index];
1596 int regindex = nr >> 3; 1616 int regindex = nr >> 3;
1597 unsigned long val; 1617 unsigned long val;
1618 int err;
1598 1619
1599 int err = kstrtoul(buf, 10, &val); 1620 err = kstrtoul(buf, 10, &val);
1600 if (err < 0) 1621 if (err < 0)
1601 return err; 1622 return err;
1602 if (val > 1) 1623 if (val > 1)
@@ -1629,6 +1650,7 @@ show_temp_beep(struct device *dev, struct device_attribute *attr, char *buf)
1629 nr = find_temp_source(data, sattr->index, data->num_temp_beeps); 1650 nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
1630 if (nr >= 0) { 1651 if (nr >= 0) {
1631 int bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE]; 1652 int bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
1653
1632 beep = (data->beeps >> bit) & 0x01; 1654 beep = (data->beeps >> bit) & 0x01;
1633 } 1655 }
1634 return sprintf(buf, "%u\n", beep); 1656 return sprintf(buf, "%u\n", beep);
@@ -1642,8 +1664,9 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
1642 struct nct6775_data *data = dev_get_drvdata(dev); 1664 struct nct6775_data *data = dev_get_drvdata(dev);
1643 int nr, bit, regindex; 1665 int nr, bit, regindex;
1644 unsigned long val; 1666 unsigned long val;
1667 int err;
1645 1668
1646 int err = kstrtoul(buf, 10, &val); 1669 err = kstrtoul(buf, 10, &val);
1647 if (err < 0) 1670 if (err < 0)
1648 return err; 1671 return err;
1649 if (val > 1) 1672 if (val > 1)
@@ -1715,6 +1738,7 @@ show_fan(struct device *dev, struct device_attribute *attr, char *buf)
1715 struct nct6775_data *data = nct6775_update_device(dev); 1738 struct nct6775_data *data = nct6775_update_device(dev);
1716 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1739 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1717 int nr = sattr->index; 1740 int nr = sattr->index;
1741
1718 return sprintf(buf, "%d\n", data->rpm[nr]); 1742 return sprintf(buf, "%d\n", data->rpm[nr]);
1719} 1743}
1720 1744
@@ -1724,6 +1748,7 @@ show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
1724 struct nct6775_data *data = nct6775_update_device(dev); 1748 struct nct6775_data *data = nct6775_update_device(dev);
1725 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1749 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1726 int nr = sattr->index; 1750 int nr = sattr->index;
1751
1727 return sprintf(buf, "%d\n", 1752 return sprintf(buf, "%d\n",
1728 data->fan_from_reg_min(data->fan_min[nr], 1753 data->fan_from_reg_min(data->fan_min[nr],
1729 data->fan_div[nr])); 1754 data->fan_div[nr]));
@@ -1735,6 +1760,7 @@ show_fan_div(struct device *dev, struct device_attribute *attr, char *buf)
1735 struct nct6775_data *data = nct6775_update_device(dev); 1760 struct nct6775_data *data = nct6775_update_device(dev);
1736 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1761 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1737 int nr = sattr->index; 1762 int nr = sattr->index;
1763
1738 return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); 1764 return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
1739} 1765}
1740 1766
@@ -1746,9 +1772,9 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
1746 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1772 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1747 int nr = sattr->index; 1773 int nr = sattr->index;
1748 unsigned long val; 1774 unsigned long val;
1749 int err;
1750 unsigned int reg; 1775 unsigned int reg;
1751 u8 new_div; 1776 u8 new_div;
1777 int err;
1752 1778
1753 err = kstrtoul(buf, 10, &val); 1779 err = kstrtoul(buf, 10, &val);
1754 if (err < 0) 1780 if (err < 0)
@@ -1932,6 +1958,7 @@ show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
1932 struct nct6775_data *data = nct6775_update_device(dev); 1958 struct nct6775_data *data = nct6775_update_device(dev);
1933 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 1959 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1934 int nr = sattr->index; 1960 int nr = sattr->index;
1961
1935 return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); 1962 return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
1936} 1963}
1937 1964
@@ -2008,6 +2035,7 @@ show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
2008 struct nct6775_data *data = nct6775_update_device(dev); 2035 struct nct6775_data *data = nct6775_update_device(dev);
2009 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); 2036 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
2010 int nr = sattr->index; 2037 int nr = sattr->index;
2038
2011 return sprintf(buf, "%d\n", (int)data->temp_type[nr]); 2039 return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
2012} 2040}
2013 2041
@@ -2790,6 +2818,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
2790 case nct6106: 2818 case nct6106:
2791 case nct6779: 2819 case nct6779:
2792 case nct6791: 2820 case nct6791:
2821 case nct6792:
2793 nct6775_write_value(data, data->REG_CRITICAL_PWM[nr], 2822 nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
2794 val); 2823 val);
2795 reg = nct6775_read_value(data, 2824 reg = nct6775_read_value(data,
@@ -2997,6 +3026,7 @@ static ssize_t
2997show_vid(struct device *dev, struct device_attribute *attr, char *buf) 3026show_vid(struct device *dev, struct device_attribute *attr, char *buf)
2998{ 3027{
2999 struct nct6775_data *data = dev_get_drvdata(dev); 3028 struct nct6775_data *data = dev_get_drvdata(dev);
3029
3000 return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); 3030 return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
3001} 3031}
3002 3032
@@ -3202,7 +3232,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3202 pwm4pin = false; 3232 pwm4pin = false;
3203 pwm5pin = false; 3233 pwm5pin = false;
3204 pwm6pin = false; 3234 pwm6pin = false;
3205 } else { /* NCT6779D or NCT6791D */ 3235 } else { /* NCT6779D, NCT6791D, or NCT6792D */
3206 regval = superio_inb(sioreg, 0x1c); 3236 regval = superio_inb(sioreg, 0x1c);
3207 3237
3208 fan3pin = !(regval & (1 << 5)); 3238 fan3pin = !(regval & (1 << 5));
@@ -3215,7 +3245,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3215 3245
3216 fan4min = fan4pin; 3246 fan4min = fan4pin;
3217 3247
3218 if (data->kind == nct6791) { 3248 if (data->kind == nct6791 || data->kind == nct6792) {
3219 regval = superio_inb(sioreg, 0x2d); 3249 regval = superio_inb(sioreg, 0x2d);
3220 fan6pin = (regval & (1 << 1)); 3250 fan6pin = (regval & (1 << 1));
3221 pwm6pin = (regval & (1 << 0)); 3251 pwm6pin = (regval & (1 << 0));
@@ -3588,6 +3618,7 @@ static int nct6775_probe(struct platform_device *pdev)
3588 3618
3589 break; 3619 break;
3590 case nct6791: 3620 case nct6791:
3621 case nct6792:
3591 data->in_num = 15; 3622 data->in_num = 15;
3592 data->pwm_num = 6; 3623 data->pwm_num = 6;
3593 data->auto_pwm_num = 4; 3624 data->auto_pwm_num = 4;
@@ -3650,12 +3681,20 @@ static int nct6775_probe(struct platform_device *pdev)
3650 data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL; 3681 data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL;
3651 data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE; 3682 data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE;
3652 data->REG_ALARM = NCT6791_REG_ALARM; 3683 data->REG_ALARM = NCT6791_REG_ALARM;
3653 data->REG_BEEP = NCT6776_REG_BEEP; 3684 if (data->kind == nct6791)
3685 data->REG_BEEP = NCT6776_REG_BEEP;
3686 else
3687 data->REG_BEEP = NCT6792_REG_BEEP;
3654 3688
3655 reg_temp = NCT6779_REG_TEMP; 3689 reg_temp = NCT6779_REG_TEMP;
3656 reg_temp_mon = NCT6779_REG_TEMP_MON;
3657 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); 3690 num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
3658 num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON); 3691 if (data->kind == nct6791) {
3692 reg_temp_mon = NCT6779_REG_TEMP_MON;
3693 num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON);
3694 } else {
3695 reg_temp_mon = NCT6792_REG_TEMP_MON;
3696 num_reg_temp_mon = ARRAY_SIZE(NCT6792_REG_TEMP_MON);
3697 }
3659 reg_temp_over = NCT6779_REG_TEMP_OVER; 3698 reg_temp_over = NCT6779_REG_TEMP_OVER;
3660 reg_temp_hyst = NCT6779_REG_TEMP_HYST; 3699 reg_temp_hyst = NCT6779_REG_TEMP_HYST;
3661 reg_temp_config = NCT6779_REG_TEMP_CONFIG; 3700 reg_temp_config = NCT6779_REG_TEMP_CONFIG;
@@ -3854,6 +3893,7 @@ static int nct6775_probe(struct platform_device *pdev)
3854 case nct6106: 3893 case nct6106:
3855 case nct6779: 3894 case nct6779:
3856 case nct6791: 3895 case nct6791:
3896 case nct6792:
3857 break; 3897 break;
3858 } 3898 }
3859 3899
@@ -3885,6 +3925,7 @@ static int nct6775_probe(struct platform_device *pdev)
3885 tmp |= 0x3e; 3925 tmp |= 0x3e;
3886 break; 3926 break;
3887 case nct6791: 3927 case nct6791:
3928 case nct6792:
3888 tmp |= 0x7e; 3929 tmp |= 0x7e;
3889 break; 3930 break;
3890 } 3931 }
@@ -3972,7 +4013,7 @@ static int nct6775_resume(struct device *dev)
3972 mutex_lock(&data->update_lock); 4013 mutex_lock(&data->update_lock);
3973 data->bank = 0xff; /* Force initial bank selection */ 4014 data->bank = 0xff; /* Force initial bank selection */
3974 4015
3975 if (data->kind == nct6791) { 4016 if (data->kind == nct6791 || data->kind == nct6792) {
3976 err = superio_enter(data->sioreg); 4017 err = superio_enter(data->sioreg);
3977 if (err) 4018 if (err)
3978 goto abort; 4019 goto abort;
@@ -4052,6 +4093,7 @@ static const char * const nct6775_sio_names[] __initconst = {
4052 "NCT6776D/F", 4093 "NCT6776D/F",
4053 "NCT6779D", 4094 "NCT6779D",
4054 "NCT6791D", 4095 "NCT6791D",
4096 "NCT6792D",
4055}; 4097};
4056 4098
4057/* nct6775_find() looks for a '627 in the Super-I/O config space */ 4099/* nct6775_find() looks for a '627 in the Super-I/O config space */
@@ -4086,6 +4128,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4086 case SIO_NCT6791_ID: 4128 case SIO_NCT6791_ID:
4087 sio_data->kind = nct6791; 4129 sio_data->kind = nct6791;
4088 break; 4130 break;
4131 case SIO_NCT6792_ID:
4132 sio_data->kind = nct6792;
4133 break;
4089 default: 4134 default:
4090 if (val != 0xffff) 4135 if (val != 0xffff)
4091 pr_debug("unsupported chip ID: 0x%04x\n", val); 4136 pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4111,7 +4156,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4111 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); 4156 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
4112 } 4157 }
4113 4158
4114 if (sio_data->kind == nct6791) 4159 if (sio_data->kind == nct6791 || sio_data->kind == nct6792)
4115 nct6791_enable_io_mapping(sioaddr); 4160 nct6791_enable_io_mapping(sioaddr);
4116 4161
4117 superio_exit(sioaddr); 4162 superio_exit(sioaddr);
@@ -4221,7 +4266,7 @@ static void __exit sensors_nct6775_exit(void)
4221} 4266}
4222 4267
4223MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); 4268MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
4224MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D driver"); 4269MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver");
4225MODULE_LICENSE("GPL"); 4270MODULE_LICENSE("GPL");
4226 4271
4227module_init(sensors_nct6775_init); 4272module_init(sensors_nct6775_init);
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
new file mode 100644
index 000000000000..ec5678289e4a
--- /dev/null
+++ b/drivers/hwmon/nct7802.c
@@ -0,0 +1,860 @@
1/*
2 * nct7802 - Driver for Nuvoton NCT7802Y
3 *
4 * Copyright (C) 2014 Guenter Roeck <linux@roeck-us.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/err.h>
20#include <linux/i2c.h>
21#include <linux/init.h>
22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h>
24#include <linux/jiffies.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/regmap.h>
28#include <linux/slab.h>
29
30#define DRVNAME "nct7802"
31
32static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
33
34static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
35 { 0x40, 0x00, 0x42, 0x44, 0x46 },
36 { 0x3f, 0x00, 0x41, 0x43, 0x45 },
37};
38
39static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
40
41static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
42 { 0, 0, 4, 0, 4 },
43 { 2, 0, 6, 2, 6 },
44};
45
46#define REG_BANK 0x00
47#define REG_TEMP_LSB 0x05
48#define REG_TEMP_PECI_LSB 0x08
49#define REG_VOLTAGE_LOW 0x0f
50#define REG_FANCOUNT_LOW 0x13
51#define REG_START 0x21
52#define REG_MODE 0x22
53#define REG_PECI_ENABLE 0x23
54#define REG_FAN_ENABLE 0x24
55#define REG_VMON_ENABLE 0x25
56#define REG_VENDOR_ID 0xfd
57#define REG_CHIP_ID 0xfe
58#define REG_VERSION_ID 0xff
59
60/*
61 * Data structures and manipulation thereof
62 */
63
64struct nct7802_data {
65 struct regmap *regmap;
66 struct mutex access_lock; /* for multi-byte read and write operations */
67};
68
69static int nct7802_read_temp(struct nct7802_data *data,
70 u8 reg_temp, u8 reg_temp_low, int *temp)
71{
72 unsigned int t1, t2 = 0;
73 int err;
74
75 *temp = 0;
76
77 mutex_lock(&data->access_lock);
78 err = regmap_read(data->regmap, reg_temp, &t1);
79 if (err < 0)
80 goto abort;
81 t1 <<= 8;
82 if (reg_temp_low) { /* 11 bit data */
83 err = regmap_read(data->regmap, reg_temp_low, &t2);
84 if (err < 0)
85 goto abort;
86 }
87 t1 |= t2 & 0xe0;
88 *temp = (s16)t1 / 32 * 125;
89abort:
90 mutex_unlock(&data->access_lock);
91 return err;
92}
93
94static int nct7802_read_fan(struct nct7802_data *data, u8 reg_fan)
95{
96 unsigned int f1, f2;
97 int ret;
98
99 mutex_lock(&data->access_lock);
100 ret = regmap_read(data->regmap, reg_fan, &f1);
101 if (ret < 0)
102 goto abort;
103 ret = regmap_read(data->regmap, REG_FANCOUNT_LOW, &f2);
104 if (ret < 0)
105 goto abort;
106 ret = (f1 << 5) | (f2 >> 3);
107 /* convert fan count to rpm */
108 if (ret == 0x1fff) /* maximum value, assume fan is stopped */
109 ret = 0;
110 else if (ret)
111 ret = DIV_ROUND_CLOSEST(1350000U, ret);
112abort:
113 mutex_unlock(&data->access_lock);
114 return ret;
115}
116
117static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
118 u8 reg_fan_high)
119{
120 unsigned int f1, f2;
121 int ret;
122
123 mutex_lock(&data->access_lock);
124 ret = regmap_read(data->regmap, reg_fan_low, &f1);
125 if (ret < 0)
126 goto abort;
127 ret = regmap_read(data->regmap, reg_fan_high, &f2);
128 if (ret < 0)
129 goto abort;
130 ret = f1 | ((f2 & 0xf8) << 5);
131 /* convert fan count to rpm */
132 if (ret == 0x1fff) /* maximum value, assume no limit */
133 ret = 0;
134 else if (ret)
135 ret = DIV_ROUND_CLOSEST(1350000U, ret);
136abort:
137 mutex_unlock(&data->access_lock);
138 return ret;
139}
140
141static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
142 u8 reg_fan_high, unsigned int limit)
143{
144 int err;
145
146 if (limit)
147 limit = DIV_ROUND_CLOSEST(1350000U, limit);
148 else
149 limit = 0x1fff;
150 limit = clamp_val(limit, 0, 0x1fff);
151
152 mutex_lock(&data->access_lock);
153 err = regmap_write(data->regmap, reg_fan_low, limit & 0xff);
154 if (err < 0)
155 goto abort;
156
157 err = regmap_write(data->regmap, reg_fan_high, (limit & 0x1f00) >> 5);
158abort:
159 mutex_unlock(&data->access_lock);
160 return err;
161}
162
163static u8 nct7802_vmul[] = { 4, 2, 2, 2, 2 };
164
165static int nct7802_read_voltage(struct nct7802_data *data, int nr, int index)
166{
167 unsigned int v1, v2;
168 int ret;
169
170 mutex_lock(&data->access_lock);
171 if (index == 0) { /* voltage */
172 ret = regmap_read(data->regmap, REG_VOLTAGE[nr], &v1);
173 if (ret < 0)
174 goto abort;
175 ret = regmap_read(data->regmap, REG_VOLTAGE_LOW, &v2);
176 if (ret < 0)
177 goto abort;
178 ret = ((v1 << 2) | (v2 >> 6)) * nct7802_vmul[nr];
179 } else { /* limit */
180 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
181
182 ret = regmap_read(data->regmap,
183 REG_VOLTAGE_LIMIT_LSB[index - 1][nr], &v1);
184 if (ret < 0)
185 goto abort;
186 ret = regmap_read(data->regmap, REG_VOLTAGE_LIMIT_MSB[nr],
187 &v2);
188 if (ret < 0)
189 goto abort;
190 ret = (v1 | ((v2 << shift) & 0x300)) * nct7802_vmul[nr];
191 }
192abort:
193 mutex_unlock(&data->access_lock);
194 return ret;
195}
196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage)
199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err;
202
203 voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
204 voltage = clamp_val(voltage, 0, 0x3ff);
205
206 mutex_lock(&data->access_lock);
207 err = regmap_write(data->regmap,
208 REG_VOLTAGE_LIMIT_LSB[index - 1][nr],
209 voltage & 0xff);
210 if (err < 0)
211 goto abort;
212
213 err = regmap_update_bits(data->regmap, REG_VOLTAGE_LIMIT_MSB[nr],
214 0x0300 >> shift, (voltage & 0x0300) >> shift);
215abort:
216 mutex_unlock(&data->access_lock);
217 return err;
218}
219
220static ssize_t show_in(struct device *dev, struct device_attribute *attr,
221 char *buf)
222{
223 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
224 struct nct7802_data *data = dev_get_drvdata(dev);
225 int voltage;
226
227 voltage = nct7802_read_voltage(data, sattr->nr, sattr->index);
228 if (voltage < 0)
229 return voltage;
230
231 return sprintf(buf, "%d\n", voltage);
232}
233
234static ssize_t store_in(struct device *dev, struct device_attribute *attr,
235 const char *buf, size_t count)
236{
237 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
238 struct nct7802_data *data = dev_get_drvdata(dev);
239 int index = sattr->index;
240 int nr = sattr->nr;
241 unsigned long val;
242 int err;
243
244 err = kstrtoul(buf, 10, &val);
245 if (err < 0)
246 return err;
247
248 err = nct7802_write_voltage(data, nr, index, val);
249 return err ? : count;
250}
251
252static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
253 char *buf)
254{
255 struct nct7802_data *data = dev_get_drvdata(dev);
256 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
257 int err, temp;
258
259 err = nct7802_read_temp(data, sattr->nr, sattr->index, &temp);
260 if (err < 0)
261 return err;
262
263 return sprintf(buf, "%d\n", temp);
264}
265
266static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count)
268{
269 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
270 struct nct7802_data *data = dev_get_drvdata(dev);
271 int nr = sattr->nr;
272 long val;
273 int err;
274
275 err = kstrtol(buf, 10, &val);
276 if (err < 0)
277 return err;
278
279 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
280
281 err = regmap_write(data->regmap, nr, val & 0xff);
282 return err ? : count;
283}
284
285static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
286 char *buf)
287{
288 struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
289 struct nct7802_data *data = dev_get_drvdata(dev);
290 int speed;
291
292 speed = nct7802_read_fan(data, sattr->index);
293 if (speed < 0)
294 return speed;
295
296 return sprintf(buf, "%d\n", speed);
297}
298
299static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
300 char *buf)
301{
302 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
303 struct nct7802_data *data = dev_get_drvdata(dev);
304 int speed;
305
306 speed = nct7802_read_fan_min(data, sattr->nr, sattr->index);
307 if (speed < 0)
308 return speed;
309
310 return sprintf(buf, "%d\n", speed);
311}
312
313static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
314 const char *buf, size_t count)
315{
316 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
317 struct nct7802_data *data = dev_get_drvdata(dev);
318 unsigned long val;
319 int err;
320
321 err = kstrtoul(buf, 10, &val);
322 if (err < 0)
323 return err;
324
325 err = nct7802_write_fan_min(data, sattr->nr, sattr->index, val);
326 return err ? : count;
327}
328
329static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
330 char *buf)
331{
332 struct nct7802_data *data = dev_get_drvdata(dev);
333 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
334 int bit = sattr->index;
335 unsigned int val;
336 int ret;
337
338 ret = regmap_read(data->regmap, sattr->nr, &val);
339 if (ret < 0)
340 return ret;
341
342 return sprintf(buf, "%u\n", !!(val & (1 << bit)));
343}
344
345static ssize_t
346show_beep(struct device *dev, struct device_attribute *attr, char *buf)
347{
348 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
349 struct nct7802_data *data = dev_get_drvdata(dev);
350 unsigned int regval;
351 int err;
352
353 err = regmap_read(data->regmap, sattr->nr, &regval);
354 if (err)
355 return err;
356
357 return sprintf(buf, "%u\n", !!(regval & (1 << sattr->index)));
358}
359
360static ssize_t
361store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
362 size_t count)
363{
364 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
365 struct nct7802_data *data = dev_get_drvdata(dev);
366 unsigned long val;
367 int err;
368
369 err = kstrtoul(buf, 10, &val);
370 if (err < 0)
371 return err;
372 if (val > 1)
373 return -EINVAL;
374
375 err = regmap_update_bits(data->regmap, sattr->nr, 1 << sattr->index,
376 val ? 1 << sattr->index : 0);
377 return err ? : count;
378}
379
380static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0x01,
381 REG_TEMP_LSB);
382static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp,
383 store_temp, 0x31, 0);
384static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp,
385 store_temp, 0x30, 0);
386static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp,
387 store_temp, 0x3a, 0);
388
389static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0x02,
390 REG_TEMP_LSB);
391static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp,
392 store_temp, 0x33, 0);
393static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp,
394 store_temp, 0x32, 0);
395static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp,
396 store_temp, 0x3b, 0);
397
398static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0x03,
399 REG_TEMP_LSB);
400static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp,
401 store_temp, 0x35, 0);
402static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp,
403 store_temp, 0x34, 0);
404static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp,
405 store_temp, 0x3c, 0);
406
407static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 0x04, 0);
408static SENSOR_DEVICE_ATTR_2(temp4_min, S_IRUGO | S_IWUSR, show_temp,
409 store_temp, 0x37, 0);
410static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp,
411 store_temp, 0x36, 0);
412static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp,
413 store_temp, 0x3d, 0);
414
415static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 0x06,
416 REG_TEMP_PECI_LSB);
417static SENSOR_DEVICE_ATTR_2(temp5_min, S_IRUGO | S_IWUSR, show_temp,
418 store_temp, 0x39, 0);
419static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp,
420 store_temp, 0x38, 0);
421static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp,
422 store_temp, 0x3e, 0);
423
424static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 0x07,
425 REG_TEMP_PECI_LSB);
426
427static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
428 0x18, 0);
429static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, show_alarm, NULL,
430 0x18, 1);
431static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, show_alarm, NULL,
432 0x18, 2);
433static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO, show_alarm, NULL,
434 0x18, 3);
435static SENSOR_DEVICE_ATTR_2(temp5_min_alarm, S_IRUGO, show_alarm, NULL,
436 0x18, 4);
437
438static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
439 0x19, 0);
440static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, show_alarm, NULL,
441 0x19, 1);
442static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, show_alarm, NULL,
443 0x19, 2);
444static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO, show_alarm, NULL,
445 0x19, 3);
446static SENSOR_DEVICE_ATTR_2(temp5_max_alarm, S_IRUGO, show_alarm, NULL,
447 0x19, 4);
448
449static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
450 0x1b, 0);
451static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
452 0x1b, 1);
453static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, show_alarm, NULL,
454 0x1b, 2);
455static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO, show_alarm, NULL,
456 0x1b, 3);
457static SENSOR_DEVICE_ATTR_2(temp5_crit_alarm, S_IRUGO, show_alarm, NULL,
458 0x1b, 4);
459
460static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, show_alarm, NULL, 0x17, 0);
461static SENSOR_DEVICE_ATTR_2(temp2_fault, S_IRUGO, show_alarm, NULL, 0x17, 1);
462static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_alarm, NULL, 0x17, 2);
463
464static SENSOR_DEVICE_ATTR_2(temp1_beep, S_IRUGO | S_IWUSR, show_beep,
465 store_beep, 0x5c, 0);
466static SENSOR_DEVICE_ATTR_2(temp2_beep, S_IRUGO | S_IWUSR, show_beep,
467 store_beep, 0x5c, 1);
468static SENSOR_DEVICE_ATTR_2(temp3_beep, S_IRUGO | S_IWUSR, show_beep,
469 store_beep, 0x5c, 2);
470static SENSOR_DEVICE_ATTR_2(temp4_beep, S_IRUGO | S_IWUSR, show_beep,
471 store_beep, 0x5c, 3);
472static SENSOR_DEVICE_ATTR_2(temp5_beep, S_IRUGO | S_IWUSR, show_beep,
473 store_beep, 0x5c, 4);
474static SENSOR_DEVICE_ATTR_2(temp6_beep, S_IRUGO | S_IWUSR, show_beep,
475 store_beep, 0x5c, 5);
476
477static struct attribute *nct7802_temp_attrs[] = {
478 &sensor_dev_attr_temp1_input.dev_attr.attr,
479 &sensor_dev_attr_temp1_min.dev_attr.attr,
480 &sensor_dev_attr_temp1_max.dev_attr.attr,
481 &sensor_dev_attr_temp1_crit.dev_attr.attr,
482 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
483 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
484 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
485 &sensor_dev_attr_temp1_fault.dev_attr.attr,
486 &sensor_dev_attr_temp1_beep.dev_attr.attr,
487
488 &sensor_dev_attr_temp2_input.dev_attr.attr, /* 9 */
489 &sensor_dev_attr_temp2_min.dev_attr.attr,
490 &sensor_dev_attr_temp2_max.dev_attr.attr,
491 &sensor_dev_attr_temp2_crit.dev_attr.attr,
492 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
493 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
494 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
495 &sensor_dev_attr_temp2_fault.dev_attr.attr,
496 &sensor_dev_attr_temp2_beep.dev_attr.attr,
497
498 &sensor_dev_attr_temp3_input.dev_attr.attr, /* 18 */
499 &sensor_dev_attr_temp3_min.dev_attr.attr,
500 &sensor_dev_attr_temp3_max.dev_attr.attr,
501 &sensor_dev_attr_temp3_crit.dev_attr.attr,
502 &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
503 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
504 &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
505 &sensor_dev_attr_temp3_fault.dev_attr.attr,
506 &sensor_dev_attr_temp3_beep.dev_attr.attr,
507
508 &sensor_dev_attr_temp4_input.dev_attr.attr, /* 27 */
509 &sensor_dev_attr_temp4_min.dev_attr.attr,
510 &sensor_dev_attr_temp4_max.dev_attr.attr,
511 &sensor_dev_attr_temp4_crit.dev_attr.attr,
512 &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
513 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
514 &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
515 &sensor_dev_attr_temp4_beep.dev_attr.attr,
516
517 &sensor_dev_attr_temp5_input.dev_attr.attr, /* 35 */
518 &sensor_dev_attr_temp5_min.dev_attr.attr,
519 &sensor_dev_attr_temp5_max.dev_attr.attr,
520 &sensor_dev_attr_temp5_crit.dev_attr.attr,
521 &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
522 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
523 &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
524 &sensor_dev_attr_temp5_beep.dev_attr.attr,
525
526 &sensor_dev_attr_temp6_input.dev_attr.attr, /* 43 */
527 &sensor_dev_attr_temp6_beep.dev_attr.attr,
528
529 NULL
530};
531
532static umode_t nct7802_temp_is_visible(struct kobject *kobj,
533 struct attribute *attr, int index)
534{
535 struct device *dev = container_of(kobj, struct device, kobj);
536 struct nct7802_data *data = dev_get_drvdata(dev);
537 unsigned int reg;
538 int err;
539
540 err = regmap_read(data->regmap, REG_MODE, &reg);
541 if (err < 0)
542 return 0;
543
544 if (index < 9 &&
545 (reg & 03) != 0x01 && (reg & 0x03) != 0x02) /* RD1 */
546 return 0;
547 if (index >= 9 && index < 18 &&
548 (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08) /* RD2 */
549 return 0;
550 if (index >= 18 && index < 27 && (reg & 0x30) != 0x10) /* RD3 */
551 return 0;
552 if (index >= 27 && index < 35) /* local */
553 return attr->mode;
554
555 err = regmap_read(data->regmap, REG_PECI_ENABLE, &reg);
556 if (err < 0)
557 return 0;
558
559 if (index >= 35 && index < 43 && !(reg & 0x01)) /* PECI 0 */
560 return 0;
561
562 if (index >= 0x43 && (!(reg & 0x02))) /* PECI 1 */
563 return 0;
564
565 return attr->mode;
566}
567
568static struct attribute_group nct7802_temp_group = {
569 .attrs = nct7802_temp_attrs,
570 .is_visible = nct7802_temp_is_visible,
571};
572
573static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0);
574static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, store_in,
575 0, 1);
576static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, store_in,
577 0, 2);
578static SENSOR_DEVICE_ATTR_2(in0_alarm, S_IRUGO, show_alarm, NULL, 0x1e, 3);
579static SENSOR_DEVICE_ATTR_2(in0_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
580 0x5a, 3);
581
582static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, 0);
583
584static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, 0);
585static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, store_in,
586 2, 1);
587static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, store_in,
588 2, 2);
589static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, show_alarm, NULL, 0x1e, 0);
590static SENSOR_DEVICE_ATTR_2(in2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
591 0x5a, 0);
592
593static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, 0);
594static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, store_in,
595 3, 1);
596static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, store_in,
597 3, 2);
598static SENSOR_DEVICE_ATTR_2(in3_alarm, S_IRUGO, show_alarm, NULL, 0x1e, 1);
599static SENSOR_DEVICE_ATTR_2(in3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
600 0x5a, 1);
601
602static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, 0);
603static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, store_in,
604 4, 1);
605static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, store_in,
606 4, 2);
607static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, show_alarm, NULL, 0x1e, 2);
608static SENSOR_DEVICE_ATTR_2(in4_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
609 0x5a, 2);
610
611static struct attribute *nct7802_in_attrs[] = {
612 &sensor_dev_attr_in0_input.dev_attr.attr,
613 &sensor_dev_attr_in0_min.dev_attr.attr,
614 &sensor_dev_attr_in0_max.dev_attr.attr,
615 &sensor_dev_attr_in0_alarm.dev_attr.attr,
616 &sensor_dev_attr_in0_beep.dev_attr.attr,
617
618 &sensor_dev_attr_in1_input.dev_attr.attr, /* 5 */
619
620 &sensor_dev_attr_in2_input.dev_attr.attr, /* 6 */
621 &sensor_dev_attr_in2_min.dev_attr.attr,
622 &sensor_dev_attr_in2_max.dev_attr.attr,
623 &sensor_dev_attr_in2_alarm.dev_attr.attr,
624 &sensor_dev_attr_in2_beep.dev_attr.attr,
625
626 &sensor_dev_attr_in3_input.dev_attr.attr, /* 11 */
627 &sensor_dev_attr_in3_min.dev_attr.attr,
628 &sensor_dev_attr_in3_max.dev_attr.attr,
629 &sensor_dev_attr_in3_alarm.dev_attr.attr,
630 &sensor_dev_attr_in3_beep.dev_attr.attr,
631
632 &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
633 &sensor_dev_attr_in4_min.dev_attr.attr,
634 &sensor_dev_attr_in4_max.dev_attr.attr,
635 &sensor_dev_attr_in4_alarm.dev_attr.attr,
636 &sensor_dev_attr_in4_beep.dev_attr.attr,
637
638 NULL,
639};
640
641static umode_t nct7802_in_is_visible(struct kobject *kobj,
642 struct attribute *attr, int index)
643{
644 struct device *dev = container_of(kobj, struct device, kobj);
645 struct nct7802_data *data = dev_get_drvdata(dev);
646 unsigned int reg;
647 int err;
648
649 if (index < 6) /* VCC, VCORE */
650 return attr->mode;
651
652 err = regmap_read(data->regmap, REG_MODE, &reg);
653 if (err < 0)
654 return 0;
655
656 if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
657 return 0;
658 if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
659 return 0;
660 if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
661 return 0;
662
663 return attr->mode;
664}
665
666static struct attribute_group nct7802_in_group = {
667 .attrs = nct7802_in_attrs,
668 .is_visible = nct7802_in_is_visible,
669};
670
671static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0x10);
672static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan_min,
673 store_fan_min, 0x49, 0x4c);
674static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, show_alarm, NULL, 0x1a, 0);
675static SENSOR_DEVICE_ATTR_2(fan1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
676 0x5b, 0);
677static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 0x11);
678static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan_min,
679 store_fan_min, 0x4a, 0x4d);
680static SENSOR_DEVICE_ATTR_2(fan2_alarm, S_IRUGO, show_alarm, NULL, 0x1a, 1);
681static SENSOR_DEVICE_ATTR_2(fan2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
682 0x5b, 1);
683static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 0x12);
684static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan_min,
685 store_fan_min, 0x4b, 0x4e);
686static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_alarm, NULL, 0x1a, 2);
687static SENSOR_DEVICE_ATTR_2(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
688 0x5b, 2);
689
690static struct attribute *nct7802_fan_attrs[] = {
691 &sensor_dev_attr_fan1_input.dev_attr.attr,
692 &sensor_dev_attr_fan1_min.dev_attr.attr,
693 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
694 &sensor_dev_attr_fan1_beep.dev_attr.attr,
695 &sensor_dev_attr_fan2_input.dev_attr.attr,
696 &sensor_dev_attr_fan2_min.dev_attr.attr,
697 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
698 &sensor_dev_attr_fan2_beep.dev_attr.attr,
699 &sensor_dev_attr_fan3_input.dev_attr.attr,
700 &sensor_dev_attr_fan3_min.dev_attr.attr,
701 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
702 &sensor_dev_attr_fan3_beep.dev_attr.attr,
703
704 NULL
705};
706
707static umode_t nct7802_fan_is_visible(struct kobject *kobj,
708 struct attribute *attr, int index)
709{
710 struct device *dev = container_of(kobj, struct device, kobj);
711 struct nct7802_data *data = dev_get_drvdata(dev);
712 int fan = index / 4; /* 4 attributes per fan */
713 unsigned int reg;
714 int err;
715
716 err = regmap_read(data->regmap, REG_FAN_ENABLE, &reg);
717 if (err < 0 || !(reg & (1 << fan)))
718 return 0;
719
720 return attr->mode;
721}
722
723static struct attribute_group nct7802_fan_group = {
724 .attrs = nct7802_fan_attrs,
725 .is_visible = nct7802_fan_is_visible,
726};
727
728static const struct attribute_group *nct7802_groups[] = {
729 &nct7802_temp_group,
730 &nct7802_in_group,
731 &nct7802_fan_group,
732 NULL
733};
734
735static int nct7802_detect(struct i2c_client *client,
736 struct i2c_board_info *info)
737{
738 int reg;
739
740 /*
741 * Chip identification registers are only available in bank 0,
742 * so only attempt chip detection if bank 0 is selected
743 */
744 reg = i2c_smbus_read_byte_data(client, REG_BANK);
745 if (reg != 0x00)
746 return -ENODEV;
747
748 reg = i2c_smbus_read_byte_data(client, REG_VENDOR_ID);
749 if (reg != 0x50)
750 return -ENODEV;
751
752 reg = i2c_smbus_read_byte_data(client, REG_CHIP_ID);
753 if (reg != 0xc3)
754 return -ENODEV;
755
756 reg = i2c_smbus_read_byte_data(client, REG_VERSION_ID);
757 if (reg < 0 || (reg & 0xf0) != 0x20)
758 return -ENODEV;
759
760 /* Also validate lower bits of voltage and temperature registers */
761 reg = i2c_smbus_read_byte_data(client, REG_TEMP_LSB);
762 if (reg < 0 || (reg & 0x1f))
763 return -ENODEV;
764
765 reg = i2c_smbus_read_byte_data(client, REG_TEMP_PECI_LSB);
766 if (reg < 0 || (reg & 0x3f))
767 return -ENODEV;
768
769 reg = i2c_smbus_read_byte_data(client, REG_VOLTAGE_LOW);
770 if (reg < 0 || (reg & 0x3f))
771 return -ENODEV;
772
773 strlcpy(info->type, "nct7802", I2C_NAME_SIZE);
774 return 0;
775}
776
777static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
778{
779 return reg != REG_BANK && reg <= 0x20;
780}
781
782static struct regmap_config nct7802_regmap_config = {
783 .reg_bits = 8,
784 .val_bits = 8,
785 .cache_type = REGCACHE_RBTREE,
786 .volatile_reg = nct7802_regmap_is_volatile,
787};
788
789static int nct7802_init_chip(struct nct7802_data *data)
790{
791 int err;
792
793 /* Enable ADC */
794 err = regmap_update_bits(data->regmap, REG_START, 0x01, 0x01);
795 if (err)
796 return err;
797
798 /* Enable local temperature sensor */
799 err = regmap_update_bits(data->regmap, REG_MODE, 0x40, 0x40);
800 if (err)
801 return err;
802
803 /* Enable Vcore and VCC voltage monitoring */
804 return regmap_update_bits(data->regmap, REG_VMON_ENABLE, 0x03, 0x03);
805}
806
807static int nct7802_probe(struct i2c_client *client,
808 const struct i2c_device_id *id)
809{
810 struct device *dev = &client->dev;
811 struct nct7802_data *data;
812 struct device *hwmon_dev;
813 int ret;
814
815 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
816 if (data == NULL)
817 return -ENOMEM;
818
819 data->regmap = devm_regmap_init_i2c(client, &nct7802_regmap_config);
820 if (IS_ERR(data->regmap))
821 return PTR_ERR(data->regmap);
822
823 mutex_init(&data->access_lock);
824
825 ret = nct7802_init_chip(data);
826 if (ret < 0)
827 return ret;
828
829 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
830 data,
831 nct7802_groups);
832 return PTR_ERR_OR_ZERO(hwmon_dev);
833}
834
835static const unsigned short nct7802_address_list[] = {
836 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END
837};
838
839static const struct i2c_device_id nct7802_idtable[] = {
840 { "nct7802", 0 },
841 { }
842};
843MODULE_DEVICE_TABLE(i2c, nct7802_idtable);
844
845static struct i2c_driver nct7802_driver = {
846 .class = I2C_CLASS_HWMON,
847 .driver = {
848 .name = DRVNAME,
849 },
850 .detect = nct7802_detect,
851 .probe = nct7802_probe,
852 .id_table = nct7802_idtable,
853 .address_list = nct7802_address_list,
854};
855
856module_i2c_driver(nct7802_driver);
857
858MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
859MODULE_DESCRIPTION("NCT7802Y Hardware Monitoring Driver");
860MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 6e1e4935fc62..a674cd83a4e2 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -47,15 +47,22 @@ config SENSORS_LM25066
47 be called lm25066. 47 be called lm25066.
48 48
49config SENSORS_LTC2978 49config SENSORS_LTC2978
50 tristate "Linear Technologies LTC2974, LTC2978, LTC3880, and LTC3883" 50 tristate "Linear Technologies LTC2978 and compatibles"
51 default n 51 default n
52 help 52 help
53 If you say yes here you get hardware monitoring support for Linear 53 If you say yes here you get hardware monitoring support for Linear
54 Technology LTC2974, LTC2978, LTC3880, and LTC3883. 54 Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676.
55 55
56 This driver can also be built as a module. If so, the module will 56 This driver can also be built as a module. If so, the module will
57 be called ltc2978. 57 be called ltc2978.
58 58
59config SENSORS_LTC2978_REGULATOR
60 boolean "Regulator support for LTC2978 and compatibles"
61 depends on SENSORS_LTC2978 && REGULATOR
62 help
63 If you say yes here you get regulator support for Linear
64 Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676.
65
59config SENSORS_MAX16064 66config SENSORS_MAX16064
60 tristate "Maxim MAX16064" 67 tristate "Maxim MAX16064"
61 default n 68 default n
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index e24ed521051a..0835050ec245 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -22,6 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/regulator/driver.h>
25#include "pmbus.h" 26#include "pmbus.h"
26 27
27enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 }; 28enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 };
@@ -374,6 +375,19 @@ static const struct i2c_device_id ltc2978_id[] = {
374}; 375};
375MODULE_DEVICE_TABLE(i2c, ltc2978_id); 376MODULE_DEVICE_TABLE(i2c, ltc2978_id);
376 377
378#if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR)
379static const struct regulator_desc ltc2978_reg_desc[] = {
380 PMBUS_REGULATOR("vout", 0),
381 PMBUS_REGULATOR("vout", 1),
382 PMBUS_REGULATOR("vout", 2),
383 PMBUS_REGULATOR("vout", 3),
384 PMBUS_REGULATOR("vout", 4),
385 PMBUS_REGULATOR("vout", 5),
386 PMBUS_REGULATOR("vout", 6),
387 PMBUS_REGULATOR("vout", 7),
388};
389#endif /* CONFIG_SENSORS_LTC2978_REGULATOR */
390
377static int ltc2978_probe(struct i2c_client *client, 391static int ltc2978_probe(struct i2c_client *client,
378 const struct i2c_device_id *id) 392 const struct i2c_device_id *id)
379{ 393{
@@ -487,13 +501,36 @@ static int ltc2978_probe(struct i2c_client *client,
487 default: 501 default:
488 return -ENODEV; 502 return -ENODEV;
489 } 503 }
504
505#if IS_ENABLED(CONFIG_SENSORS_LTC2978_REGULATOR)
506 info->num_regulators = info->pages;
507 info->reg_desc = ltc2978_reg_desc;
508 if (info->num_regulators > ARRAY_SIZE(ltc2978_reg_desc)) {
509 dev_err(&client->dev, "num_regulators too large!");
510 info->num_regulators = ARRAY_SIZE(ltc2978_reg_desc);
511 }
512#endif
513
490 return pmbus_do_probe(client, id, info); 514 return pmbus_do_probe(client, id, info);
491} 515}
492 516
493/* This is the driver that will be inserted */ 517#ifdef CONFIG_OF
518static const struct of_device_id ltc2978_of_match[] = {
519 { .compatible = "lltc,ltc2974" },
520 { .compatible = "lltc,ltc2977" },
521 { .compatible = "lltc,ltc2978" },
522 { .compatible = "lltc,ltc3880" },
523 { .compatible = "lltc,ltc3883" },
524 { .compatible = "lltc,ltm4676" },
525 { }
526};
527MODULE_DEVICE_TABLE(of, ltc2978_of_match);
528#endif
529
494static struct i2c_driver ltc2978_driver = { 530static struct i2c_driver ltc2978_driver = {
495 .driver = { 531 .driver = {
496 .name = "ltc2978", 532 .name = "ltc2978",
533 .of_match_table = of_match_ptr(ltc2978_of_match),
497 }, 534 },
498 .probe = ltc2978_probe, 535 .probe = ltc2978_probe,
499 .remove = pmbus_do_remove, 536 .remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa9beb3eb60c..89a23ff836e7 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -19,6 +19,8 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include <linux/regulator/driver.h>
23
22#ifndef PMBUS_H 24#ifndef PMBUS_H
23#define PMBUS_H 25#define PMBUS_H
24 26
@@ -186,6 +188,11 @@
186#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35) 188#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35)
187 189
188/* 190/*
191 * OPERATION
192 */
193#define PB_OPERATION_CONTROL_ON (1<<7)
194
195/*
189 * CAPABILITY 196 * CAPABILITY
190 */ 197 */
191#define PB_CAPABILITY_SMBALERT (1<<4) 198#define PB_CAPABILITY_SMBALERT (1<<4)
@@ -365,8 +372,27 @@ struct pmbus_driver_info {
365 */ 372 */
366 int (*identify)(struct i2c_client *client, 373 int (*identify)(struct i2c_client *client,
367 struct pmbus_driver_info *info); 374 struct pmbus_driver_info *info);
375
376 /* Regulator functionality, if supported by this chip driver. */
377 int num_regulators;
378 const struct regulator_desc *reg_desc;
368}; 379};
369 380
381/* Regulator ops */
382
383extern struct regulator_ops pmbus_regulator_ops;
384
385/* Macro for filling in array of struct regulator_desc */
386#define PMBUS_REGULATOR(_name, _id) \
387 [_id] = { \
388 .name = (_name # _id), \
389 .id = (_id), \
390 .of_match = of_match_ptr(_name # _id), \
391 .regulators_node = of_match_ptr("regulators"), \
392 .ops = &pmbus_regulator_ops, \
393 .owner = THIS_MODULE, \
394 }
395
370/* Function declarations */ 396/* Function declarations */
371 397
372void pmbus_clear_cache(struct i2c_client *client); 398void pmbus_clear_cache(struct i2c_client *client);
@@ -375,6 +401,10 @@ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
375int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); 401int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
376int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); 402int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
377int pmbus_write_byte(struct i2c_client *client, int page, u8 value); 403int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
404int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
405 u8 value);
406int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
407 u8 mask, u8 value);
378void pmbus_clear_faults(struct i2c_client *client); 408void pmbus_clear_faults(struct i2c_client *client);
379bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg); 409bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
380bool pmbus_check_word_register(struct i2c_client *client, int page, int reg); 410bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 291d11fe93e7..f2e47c7dd808 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -29,6 +29,8 @@
29#include <linux/hwmon-sysfs.h> 29#include <linux/hwmon-sysfs.h>
30#include <linux/jiffies.h> 30#include <linux/jiffies.h>
31#include <linux/i2c/pmbus.h> 31#include <linux/i2c/pmbus.h>
32#include <linux/regulator/driver.h>
33#include <linux/regulator/machine.h>
32#include "pmbus.h" 34#include "pmbus.h"
33 35
34/* 36/*
@@ -253,6 +255,37 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
253} 255}
254EXPORT_SYMBOL_GPL(pmbus_read_byte_data); 256EXPORT_SYMBOL_GPL(pmbus_read_byte_data);
255 257
258int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
259{
260 int rv;
261
262 rv = pmbus_set_page(client, page);
263 if (rv < 0)
264 return rv;
265
266 return i2c_smbus_write_byte_data(client, reg, value);
267}
268EXPORT_SYMBOL_GPL(pmbus_write_byte_data);
269
270int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
271 u8 mask, u8 value)
272{
273 unsigned int tmp;
274 int rv;
275
276 rv = pmbus_read_byte_data(client, page, reg);
277 if (rv < 0)
278 return rv;
279
280 tmp = (rv & ~mask) | (value & mask);
281
282 if (tmp != rv)
283 rv = pmbus_write_byte_data(client, page, reg, tmp);
284
285 return rv;
286}
287EXPORT_SYMBOL_GPL(pmbus_update_byte_data);
288
256/* 289/*
257 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if 290 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
258 * a device specific mapping function exists and calls it if necessary. 291 * a device specific mapping function exists and calls it if necessary.
@@ -1727,6 +1760,84 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1727 return 0; 1760 return 0;
1728} 1761}
1729 1762
1763#if IS_ENABLED(CONFIG_REGULATOR)
1764static int pmbus_regulator_is_enabled(struct regulator_dev *rdev)
1765{
1766 struct device *dev = rdev_get_dev(rdev);
1767 struct i2c_client *client = to_i2c_client(dev->parent);
1768 u8 page = rdev_get_id(rdev);
1769 int ret;
1770
1771 ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
1772 if (ret < 0)
1773 return ret;
1774
1775 return !!(ret & PB_OPERATION_CONTROL_ON);
1776}
1777
1778static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable)
1779{
1780 struct device *dev = rdev_get_dev(rdev);
1781 struct i2c_client *client = to_i2c_client(dev->parent);
1782 u8 page = rdev_get_id(rdev);
1783
1784 return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
1785 PB_OPERATION_CONTROL_ON,
1786 enable ? PB_OPERATION_CONTROL_ON : 0);
1787}
1788
1789static int pmbus_regulator_enable(struct regulator_dev *rdev)
1790{
1791 return _pmbus_regulator_on_off(rdev, 1);
1792}
1793
1794static int pmbus_regulator_disable(struct regulator_dev *rdev)
1795{
1796 return _pmbus_regulator_on_off(rdev, 0);
1797}
1798
1799struct regulator_ops pmbus_regulator_ops = {
1800 .enable = pmbus_regulator_enable,
1801 .disable = pmbus_regulator_disable,
1802 .is_enabled = pmbus_regulator_is_enabled,
1803};
1804EXPORT_SYMBOL_GPL(pmbus_regulator_ops);
1805
1806static int pmbus_regulator_register(struct pmbus_data *data)
1807{
1808 struct device *dev = data->dev;
1809 const struct pmbus_driver_info *info = data->info;
1810 const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
1811 struct regulator_dev *rdev;
1812 int i;
1813
1814 for (i = 0; i < info->num_regulators; i++) {
1815 struct regulator_config config = { };
1816
1817 config.dev = dev;
1818 config.driver_data = data;
1819
1820 if (pdata && pdata->reg_init_data)
1821 config.init_data = &pdata->reg_init_data[i];
1822
1823 rdev = devm_regulator_register(dev, &info->reg_desc[i],
1824 &config);
1825 if (IS_ERR(rdev)) {
1826 dev_err(dev, "Failed to register %s regulator\n",
1827 info->reg_desc[i].name);
1828 return PTR_ERR(rdev);
1829 }
1830 }
1831
1832 return 0;
1833}
1834#else
1835static int pmbus_regulator_register(struct pmbus_data *data)
1836{
1837 return 0;
1838}
1839#endif
1840
1730int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, 1841int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1731 struct pmbus_driver_info *info) 1842 struct pmbus_driver_info *info)
1732{ 1843{
@@ -1781,8 +1892,15 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1781 dev_err(dev, "Failed to register hwmon device\n"); 1892 dev_err(dev, "Failed to register hwmon device\n");
1782 goto out_kfree; 1893 goto out_kfree;
1783 } 1894 }
1895
1896 ret = pmbus_regulator_register(data);
1897 if (ret)
1898 goto out_unregister;
1899
1784 return 0; 1900 return 0;
1785 1901
1902out_unregister:
1903 hwmon_device_unregister(data->hwmon_dev);
1786out_kfree: 1904out_kfree:
1787 kfree(data->group.attrs); 1905 kfree(data->group.attrs);
1788 return ret; 1906 return ret;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 7fa6e7d0b9b6..99664ebc738d 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -44,9 +44,10 @@
44#include <linux/sysfs.h> 44#include <linux/sysfs.h>
45 45
46/* Addresses to scan */ 46/* Addresses to scan */
47static const unsigned short normal_i2c[] = { 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 47static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
48 0x4e, 0x4f, I2C_CLIENT_END };
48 49
49enum chips { tmp401, tmp411, tmp431, tmp432 }; 50enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
50 51
51/* 52/*
52 * The TMP401 registers, note some registers have different addresses for 53 * The TMP401 registers, note some registers have different addresses for
@@ -136,6 +137,7 @@ static const u8 TMP432_STATUS_REG[] = {
136#define TMP411C_DEVICE_ID 0x10 137#define TMP411C_DEVICE_ID 0x10
137#define TMP431_DEVICE_ID 0x31 138#define TMP431_DEVICE_ID 0x31
138#define TMP432_DEVICE_ID 0x32 139#define TMP432_DEVICE_ID 0x32
140#define TMP435_DEVICE_ID 0x35
139 141
140/* 142/*
141 * Driver data (common to all clients) 143 * Driver data (common to all clients)
@@ -146,6 +148,7 @@ static const struct i2c_device_id tmp401_id[] = {
146 { "tmp411", tmp411 }, 148 { "tmp411", tmp411 },
147 { "tmp431", tmp431 }, 149 { "tmp431", tmp431 },
148 { "tmp432", tmp432 }, 150 { "tmp432", tmp432 },
151 { "tmp435", tmp435 },
149 { } 152 { }
150}; 153};
151MODULE_DEVICE_TABLE(i2c, tmp401_id); 154MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -613,10 +616,10 @@ static const struct attribute_group tmp432_group = {
613 * Begin non sysfs callback code (aka Real code) 616 * Begin non sysfs callback code (aka Real code)
614 */ 617 */
615 618
616static void tmp401_init_client(struct tmp401_data *data, 619static int tmp401_init_client(struct tmp401_data *data,
617 struct i2c_client *client) 620 struct i2c_client *client)
618{ 621{
619 int config, config_orig; 622 int config, config_orig, status = 0;
620 623
621 /* Set the conversion rate to 2 Hz */ 624 /* Set the conversion rate to 2 Hz */
622 i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5); 625 i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
@@ -624,16 +627,18 @@ static void tmp401_init_client(struct tmp401_data *data,
624 627
625 /* Start conversions (disable shutdown if necessary) */ 628 /* Start conversions (disable shutdown if necessary) */
626 config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ); 629 config = i2c_smbus_read_byte_data(client, TMP401_CONFIG_READ);
627 if (config < 0) { 630 if (config < 0)
628 dev_warn(&client->dev, "Initialization failed!\n"); 631 return config;
629 return;
630 }
631 632
632 config_orig = config; 633 config_orig = config;
633 config &= ~TMP401_CONFIG_SHUTDOWN; 634 config &= ~TMP401_CONFIG_SHUTDOWN;
634 635
635 if (config != config_orig) 636 if (config != config_orig)
636 i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config); 637 status = i2c_smbus_write_byte_data(client,
638 TMP401_CONFIG_WRITE,
639 config);
640
641 return status;
637} 642}
638 643
639static int tmp401_detect(struct i2c_client *client, 644static int tmp401_detect(struct i2c_client *client,
@@ -675,15 +680,18 @@ static int tmp401_detect(struct i2c_client *client,
675 kind = tmp411; 680 kind = tmp411;
676 break; 681 break;
677 case TMP431_DEVICE_ID: 682 case TMP431_DEVICE_ID:
678 if (client->addr == 0x4e) 683 if (client->addr != 0x4c && client->addr != 0x4d)
679 return -ENODEV; 684 return -ENODEV;
680 kind = tmp431; 685 kind = tmp431;
681 break; 686 break;
682 case TMP432_DEVICE_ID: 687 case TMP432_DEVICE_ID:
683 if (client->addr == 0x4e) 688 if (client->addr != 0x4c && client->addr != 0x4d)
684 return -ENODEV; 689 return -ENODEV;
685 kind = tmp432; 690 kind = tmp432;
686 break; 691 break;
692 case TMP435_DEVICE_ID:
693 kind = tmp435;
694 break;
687 default: 695 default:
688 return -ENODEV; 696 return -ENODEV;
689 } 697 }
@@ -705,11 +713,13 @@ static int tmp401_detect(struct i2c_client *client,
705static int tmp401_probe(struct i2c_client *client, 713static int tmp401_probe(struct i2c_client *client,
706 const struct i2c_device_id *id) 714 const struct i2c_device_id *id)
707{ 715{
708 const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" }; 716 static const char * const names[] = {
717 "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
718 };
709 struct device *dev = &client->dev; 719 struct device *dev = &client->dev;
710 struct device *hwmon_dev; 720 struct device *hwmon_dev;
711 struct tmp401_data *data; 721 struct tmp401_data *data;
712 int groups = 0; 722 int groups = 0, status;
713 723
714 data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL); 724 data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
715 if (!data) 725 if (!data)
@@ -720,7 +730,9 @@ static int tmp401_probe(struct i2c_client *client,
720 data->kind = id->driver_data; 730 data->kind = id->driver_data;
721 731
722 /* Initialize the TMP401 chip */ 732 /* Initialize the TMP401 chip */
723 tmp401_init_client(data, client); 733 status = tmp401_init_client(data, client);
734 if (status < 0)
735 return status;
724 736
725 /* Register sysfs hooks */ 737 /* Register sysfs hooks */
726 data->groups[groups++] = &tmp401_group; 738 data->groups[groups++] = &tmp401_group;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 917c3585f45b..b4d135cc2f39 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,16 @@ config I2C_DIOLAN_U2C
881 This driver can also be built as a module. If so, the module 881 This driver can also be built as a module. If so, the module
882 will be called i2c-diolan-u2c. 882 will be called i2c-diolan-u2c.
883 883
884config I2C_DLN2
885 tristate "Diolan DLN-2 USB I2C adapter"
886 depends on MFD_DLN2
887 help
888 If you say yes to this option, support will be included for Diolan
889 DLN2, a USB to I2C interface.
890
891 This driver can also be built as a module. If so, the module
892 will be called i2c-dln2.
893
884config I2C_PARPORT 894config I2C_PARPORT
885 tristate "Parallel port adapter" 895 tristate "Parallel port adapter"
886 depends on PARPORT 896 depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 78d56c54ba2b..cdac7f15eab5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
87 87
88# External I2C/SMBus adapter drivers 88# External I2C/SMBus adapter drivers
89obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o 89obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
90obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
90obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o 91obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
91obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o 92obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
92obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o 93obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 63f3f03ecc9b..c604f4c3ac0d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -111,6 +111,8 @@
111#define CDNS_I2C_DIVA_MAX 4 111#define CDNS_I2C_DIVA_MAX 4
112#define CDNS_I2C_DIVB_MAX 64 112#define CDNS_I2C_DIVB_MAX 64
113 113
114#define CDNS_I2C_TIMEOUT_MAX 0xFF
115
114#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) 116#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
115#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) 117#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
116 118
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
852 goto err_clk_dis; 854 goto err_clk_dis;
853 } 855 }
854 856
857 /*
858 * Cadence I2C controller has a bug wherein it generates
859 * invalid read transaction after HW timeout in master receiver mode.
860 * HW timeout is not used by this driver and the interrupt is disabled.
861 * But the feature itself cannot be disabled. Hence maximum value
862 * is written to this register to reduce the chances of error.
863 */
864 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
865
855 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", 866 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
856 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); 867 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
857 868
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index d15b7c9b9219..01f0cd87a4a5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -407,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
407 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { 407 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
408 if (msg->flags & I2C_M_IGNORE_NAK) 408 if (msg->flags & I2C_M_IGNORE_NAK)
409 return msg->len; 409 return msg->len;
410 if (stop) { 410 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
411 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); 411 w |= DAVINCI_I2C_MDR_STP;
412 w |= DAVINCI_I2C_MDR_STP; 412 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
413 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
414 }
415 return -EREMOTEIO; 413 return -EREMOTEIO;
416 } 414 }
417 return -EIO; 415 return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index edca99dbba23..23628b7bfb8d 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
359 } 359 }
360 360
361 /* Configure Tx/Rx FIFO threshold levels */ 361 /* Configure Tx/Rx FIFO threshold levels */
362 dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL); 362 dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
363 dw_writel(dev, 0, DW_IC_RX_TL); 363 dw_writel(dev, 0, DW_IC_RX_TL);
364 364
365 /* configure the i2c master */ 365 /* configure the i2c master */
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
new file mode 100644
index 000000000000..b3fb86af4cbb
--- /dev/null
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -0,0 +1,262 @@
1/*
2 * Driver for the Diolan DLN-2 USB-I2C adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * Derived from:
7 * i2c-diolan-u2c.c
8 * Copyright (c) 2010-2011 Ericsson AB
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/slab.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <linux/mfd/dln2.h>
22
23#define DLN2_I2C_MODULE_ID 0x03
24#define DLN2_I2C_CMD(cmd) DLN2_CMD(cmd, DLN2_I2C_MODULE_ID)
25
26/* I2C commands */
27#define DLN2_I2C_GET_PORT_COUNT DLN2_I2C_CMD(0x00)
28#define DLN2_I2C_ENABLE DLN2_I2C_CMD(0x01)
29#define DLN2_I2C_DISABLE DLN2_I2C_CMD(0x02)
30#define DLN2_I2C_IS_ENABLED DLN2_I2C_CMD(0x03)
31#define DLN2_I2C_WRITE DLN2_I2C_CMD(0x06)
32#define DLN2_I2C_READ DLN2_I2C_CMD(0x07)
33#define DLN2_I2C_SCAN_DEVICES DLN2_I2C_CMD(0x08)
34#define DLN2_I2C_PULLUP_ENABLE DLN2_I2C_CMD(0x09)
35#define DLN2_I2C_PULLUP_DISABLE DLN2_I2C_CMD(0x0A)
36#define DLN2_I2C_PULLUP_IS_ENABLED DLN2_I2C_CMD(0x0B)
37#define DLN2_I2C_TRANSFER DLN2_I2C_CMD(0x0C)
38#define DLN2_I2C_SET_MAX_REPLY_COUNT DLN2_I2C_CMD(0x0D)
39#define DLN2_I2C_GET_MAX_REPLY_COUNT DLN2_I2C_CMD(0x0E)
40
41#define DLN2_I2C_MAX_XFER_SIZE 256
42#define DLN2_I2C_BUF_SIZE (DLN2_I2C_MAX_XFER_SIZE + 16)
43
44struct dln2_i2c {
45 struct platform_device *pdev;
46 struct i2c_adapter adapter;
47 u8 port;
48 /*
49 * Buffer to hold the packet for read or write transfers. One is enough
50 * since we can't have multiple transfers in parallel on the i2c bus.
51 */
52 void *buf;
53};
54
55static int dln2_i2c_enable(struct dln2_i2c *dln2, bool enable)
56{
57 u16 cmd;
58 struct {
59 u8 port;
60 } tx;
61
62 tx.port = dln2->port;
63
64 if (enable)
65 cmd = DLN2_I2C_ENABLE;
66 else
67 cmd = DLN2_I2C_DISABLE;
68
69 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
70}
71
72static int dln2_i2c_write(struct dln2_i2c *dln2, u8 addr,
73 u8 *data, u16 data_len)
74{
75 int ret;
76 struct {
77 u8 port;
78 u8 addr;
79 u8 mem_addr_len;
80 __le32 mem_addr;
81 __le16 buf_len;
82 u8 buf[DLN2_I2C_MAX_XFER_SIZE];
83 } __packed *tx = dln2->buf;
84 unsigned len;
85
86 BUILD_BUG_ON(sizeof(*tx) > DLN2_I2C_BUF_SIZE);
87
88 tx->port = dln2->port;
89 tx->addr = addr;
90 tx->mem_addr_len = 0;
91 tx->mem_addr = 0;
92 tx->buf_len = cpu_to_le16(data_len);
93 memcpy(tx->buf, data, data_len);
94
95 len = sizeof(*tx) + data_len - DLN2_I2C_MAX_XFER_SIZE;
96 ret = dln2_transfer_tx(dln2->pdev, DLN2_I2C_WRITE, tx, len);
97 if (ret < 0)
98 return ret;
99
100 return data_len;
101}
102
103static int dln2_i2c_read(struct dln2_i2c *dln2, u16 addr, u8 *data,
104 u16 data_len)
105{
106 int ret;
107 struct {
108 u8 port;
109 u8 addr;
110 u8 mem_addr_len;
111 __le32 mem_addr;
112 __le16 buf_len;
113 } __packed tx;
114 struct {
115 __le16 buf_len;
116 u8 buf[DLN2_I2C_MAX_XFER_SIZE];
117 } __packed *rx = dln2->buf;
118 unsigned rx_len = sizeof(*rx);
119
120 BUILD_BUG_ON(sizeof(*rx) > DLN2_I2C_BUF_SIZE);
121
122 tx.port = dln2->port;
123 tx.addr = addr;
124 tx.mem_addr_len = 0;
125 tx.mem_addr = 0;
126 tx.buf_len = cpu_to_le16(data_len);
127
128 ret = dln2_transfer(dln2->pdev, DLN2_I2C_READ, &tx, sizeof(tx),
129 rx, &rx_len);
130 if (ret < 0)
131 return ret;
132 if (rx_len < sizeof(rx->buf_len) + data_len)
133 return -EPROTO;
134 if (le16_to_cpu(rx->buf_len) != data_len)
135 return -EPROTO;
136
137 memcpy(data, rx->buf, data_len);
138
139 return data_len;
140}
141
142static int dln2_i2c_xfer(struct i2c_adapter *adapter,
143 struct i2c_msg *msgs, int num)
144{
145 struct dln2_i2c *dln2 = i2c_get_adapdata(adapter);
146 struct i2c_msg *pmsg;
147 struct device *dev = &dln2->adapter.dev;
148 int i;
149
150 for (i = 0; i < num; i++) {
151 int ret;
152
153 pmsg = &msgs[i];
154
155 if (pmsg->len > DLN2_I2C_MAX_XFER_SIZE) {
156 dev_warn(dev, "maximum transfer size exceeded\n");
157 return -EOPNOTSUPP;
158 }
159
160 if (pmsg->flags & I2C_M_RD) {
161 ret = dln2_i2c_read(dln2, pmsg->addr, pmsg->buf,
162 pmsg->len);
163 if (ret < 0)
164 return ret;
165
166 pmsg->len = ret;
167 } else {
168 ret = dln2_i2c_write(dln2, pmsg->addr, pmsg->buf,
169 pmsg->len);
170 if (ret != pmsg->len)
171 return -EPROTO;
172 }
173 }
174
175 return num;
176}
177
178static u32 dln2_i2c_func(struct i2c_adapter *a)
179{
180 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
181 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
182 I2C_FUNC_SMBUS_I2C_BLOCK;
183}
184
185static const struct i2c_algorithm dln2_i2c_usb_algorithm = {
186 .master_xfer = dln2_i2c_xfer,
187 .functionality = dln2_i2c_func,
188};
189
190static int dln2_i2c_probe(struct platform_device *pdev)
191{
192 int ret;
193 struct dln2_i2c *dln2;
194 struct device *dev = &pdev->dev;
195 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
196
197 dln2 = devm_kzalloc(dev, sizeof(*dln2), GFP_KERNEL);
198 if (!dln2)
199 return -ENOMEM;
200
201 dln2->buf = devm_kmalloc(dev, DLN2_I2C_BUF_SIZE, GFP_KERNEL);
202 if (!dln2->buf)
203 return -ENOMEM;
204
205 dln2->pdev = pdev;
206 dln2->port = pdata->port;
207
208 /* setup i2c adapter description */
209 dln2->adapter.owner = THIS_MODULE;
210 dln2->adapter.class = I2C_CLASS_HWMON;
211 dln2->adapter.algo = &dln2_i2c_usb_algorithm;
212 dln2->adapter.dev.parent = dev;
213 i2c_set_adapdata(&dln2->adapter, dln2);
214 snprintf(dln2->adapter.name, sizeof(dln2->adapter.name), "%s-%s-%d",
215 "dln2-i2c", dev_name(pdev->dev.parent), dln2->port);
216
217 platform_set_drvdata(pdev, dln2);
218
219 /* initialize the i2c interface */
220 ret = dln2_i2c_enable(dln2, true);
221 if (ret < 0) {
222 dev_err(dev, "failed to initialize adapter: %d\n", ret);
223 return ret;
224 }
225
226 /* and finally attach to i2c layer */
227 ret = i2c_add_adapter(&dln2->adapter);
228 if (ret < 0) {
229 dev_err(dev, "failed to add I2C adapter: %d\n", ret);
230 goto out_disable;
231 }
232
233 return 0;
234
235out_disable:
236 dln2_i2c_enable(dln2, false);
237
238 return ret;
239}
240
241static int dln2_i2c_remove(struct platform_device *pdev)
242{
243 struct dln2_i2c *dln2 = platform_get_drvdata(pdev);
244
245 i2c_del_adapter(&dln2->adapter);
246 dln2_i2c_enable(dln2, false);
247
248 return 0;
249}
250
251static struct platform_driver dln2_i2c_driver = {
252 .driver.name = "dln2-i2c",
253 .probe = dln2_i2c_probe,
254 .remove = dln2_i2c_remove,
255};
256
257module_platform_driver(dln2_i2c_driver);
258
259MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
260MODULE_DESCRIPTION("Driver for the Diolan DLN2 I2C master interface");
261MODULE_LICENSE("GPL v2");
262MODULE_ALIAS("platform:dln2-i2c");
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 9490d0f4255c..8fe78d08e01c 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -528,7 +528,7 @@ static int hix5hd2_i2c_runtime_resume(struct device *dev)
528#endif 528#endif
529 529
530static const struct dev_pm_ops hix5hd2_i2c_pm_ops = { 530static const struct dev_pm_ops hix5hd2_i2c_pm_ops = {
531 SET_PM_RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend, 531 SET_RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend,
532 hix5hd2_i2c_runtime_resume, 532 hix5hd2_i2c_runtime_resume,
533 NULL) 533 NULL)
534}; 534};
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 9ad038d223c4..97998946c4f6 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -932,7 +932,7 @@ static int nmk_i2c_runtime_resume(struct device *dev)
932 932
933static const struct dev_pm_ops nmk_i2c_pm = { 933static const struct dev_pm_ops nmk_i2c_pm = {
934 SET_LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early) 934 SET_LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
935 SET_PM_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, 935 SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend,
936 nmk_i2c_runtime_resume, 936 nmk_i2c_runtime_resume,
937 NULL) 937 NULL)
938}; 938};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 26942c159de1..0e650a0d0ad0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
922 if (stat & OMAP_I2C_STAT_NACK) { 922 if (stat & OMAP_I2C_STAT_NACK) {
923 err |= OMAP_I2C_STAT_NACK; 923 err |= OMAP_I2C_STAT_NACK;
924 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 924 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
925 break;
926 } 925 }
927 926
928 if (stat & OMAP_I2C_STAT_AL) { 927 if (stat & OMAP_I2C_STAT_AL) {
929 dev_err(dev->dev, "Arbitration lost\n"); 928 dev_err(dev->dev, "Arbitration lost\n");
930 err |= OMAP_I2C_STAT_AL; 929 err |= OMAP_I2C_STAT_AL;
931 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); 930 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
932 break;
933 } 931 }
934 932
935 /* 933 /*
@@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
954 if (dev->fifo_size) 952 if (dev->fifo_size)
955 num_bytes = dev->buf_len; 953 num_bytes = dev->buf_len;
956 954
957 omap_i2c_receive_data(dev, num_bytes, true); 955 if (dev->errata & I2C_OMAP_ERRATA_I207) {
958
959 if (dev->errata & I2C_OMAP_ERRATA_I207)
960 i2c_omap_errata_i207(dev, stat); 956 i2c_omap_errata_i207(dev, stat);
957 num_bytes = (omap_i2c_read_reg(dev,
958 OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
959 }
961 960
961 omap_i2c_receive_data(dev, num_bytes, true);
962 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 962 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
963 continue; 963 continue;
964 } 964 }
@@ -1280,7 +1280,6 @@ static int omap_i2c_remove(struct platform_device *pdev)
1280} 1280}
1281 1281
1282#ifdef CONFIG_PM 1282#ifdef CONFIG_PM
1283#ifdef CONFIG_PM_RUNTIME
1284static int omap_i2c_runtime_suspend(struct device *dev) 1283static int omap_i2c_runtime_suspend(struct device *dev)
1285{ 1284{
1286 struct platform_device *pdev = to_platform_device(dev); 1285 struct platform_device *pdev = to_platform_device(dev);
@@ -1318,7 +1317,6 @@ static int omap_i2c_runtime_resume(struct device *dev)
1318 1317
1319 return 0; 1318 return 0;
1320} 1319}
1321#endif /* CONFIG_PM_RUNTIME */
1322 1320
1323static struct dev_pm_ops omap_i2c_pm_ops = { 1321static struct dev_pm_ops omap_i2c_pm_ops = {
1324 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, 1322 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d8afd3f28ca4..229a89e84b0f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -404,6 +404,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
404 return -ENOMEM; 404 return -ENOMEM;
405 } 405 }
406 406
407 acpi_walk_dep_device_list(handle);
407 return 0; 408 return 0;
408} 409}
409 410
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9b7ee7e427df..9cceacb92f9d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -128,28 +128,28 @@ static struct cpuidle_state nehalem_cstates[] = {
128 { 128 {
129 .name = "C1-NHM", 129 .name = "C1-NHM",
130 .desc = "MWAIT 0x00", 130 .desc = "MWAIT 0x00",
131 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 131 .flags = MWAIT2flg(0x00),
132 .exit_latency = 3, 132 .exit_latency = 3,
133 .target_residency = 6, 133 .target_residency = 6,
134 .enter = &intel_idle }, 134 .enter = &intel_idle },
135 { 135 {
136 .name = "C1E-NHM", 136 .name = "C1E-NHM",
137 .desc = "MWAIT 0x01", 137 .desc = "MWAIT 0x01",
138 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 138 .flags = MWAIT2flg(0x01),
139 .exit_latency = 10, 139 .exit_latency = 10,
140 .target_residency = 20, 140 .target_residency = 20,
141 .enter = &intel_idle }, 141 .enter = &intel_idle },
142 { 142 {
143 .name = "C3-NHM", 143 .name = "C3-NHM",
144 .desc = "MWAIT 0x10", 144 .desc = "MWAIT 0x10",
145 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 145 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
146 .exit_latency = 20, 146 .exit_latency = 20,
147 .target_residency = 80, 147 .target_residency = 80,
148 .enter = &intel_idle }, 148 .enter = &intel_idle },
149 { 149 {
150 .name = "C6-NHM", 150 .name = "C6-NHM",
151 .desc = "MWAIT 0x20", 151 .desc = "MWAIT 0x20",
152 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 152 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
153 .exit_latency = 200, 153 .exit_latency = 200,
154 .target_residency = 800, 154 .target_residency = 800,
155 .enter = &intel_idle }, 155 .enter = &intel_idle },
@@ -161,35 +161,35 @@ static struct cpuidle_state snb_cstates[] = {
161 { 161 {
162 .name = "C1-SNB", 162 .name = "C1-SNB",
163 .desc = "MWAIT 0x00", 163 .desc = "MWAIT 0x00",
164 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 164 .flags = MWAIT2flg(0x00),
165 .exit_latency = 2, 165 .exit_latency = 2,
166 .target_residency = 2, 166 .target_residency = 2,
167 .enter = &intel_idle }, 167 .enter = &intel_idle },
168 { 168 {
169 .name = "C1E-SNB", 169 .name = "C1E-SNB",
170 .desc = "MWAIT 0x01", 170 .desc = "MWAIT 0x01",
171 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 171 .flags = MWAIT2flg(0x01),
172 .exit_latency = 10, 172 .exit_latency = 10,
173 .target_residency = 20, 173 .target_residency = 20,
174 .enter = &intel_idle }, 174 .enter = &intel_idle },
175 { 175 {
176 .name = "C3-SNB", 176 .name = "C3-SNB",
177 .desc = "MWAIT 0x10", 177 .desc = "MWAIT 0x10",
178 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 178 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
179 .exit_latency = 80, 179 .exit_latency = 80,
180 .target_residency = 211, 180 .target_residency = 211,
181 .enter = &intel_idle }, 181 .enter = &intel_idle },
182 { 182 {
183 .name = "C6-SNB", 183 .name = "C6-SNB",
184 .desc = "MWAIT 0x20", 184 .desc = "MWAIT 0x20",
185 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 185 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
186 .exit_latency = 104, 186 .exit_latency = 104,
187 .target_residency = 345, 187 .target_residency = 345,
188 .enter = &intel_idle }, 188 .enter = &intel_idle },
189 { 189 {
190 .name = "C7-SNB", 190 .name = "C7-SNB",
191 .desc = "MWAIT 0x30", 191 .desc = "MWAIT 0x30",
192 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 192 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
193 .exit_latency = 109, 193 .exit_latency = 109,
194 .target_residency = 345, 194 .target_residency = 345,
195 .enter = &intel_idle }, 195 .enter = &intel_idle },
@@ -201,42 +201,42 @@ static struct cpuidle_state byt_cstates[] = {
201 { 201 {
202 .name = "C1-BYT", 202 .name = "C1-BYT",
203 .desc = "MWAIT 0x00", 203 .desc = "MWAIT 0x00",
204 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 204 .flags = MWAIT2flg(0x00),
205 .exit_latency = 1, 205 .exit_latency = 1,
206 .target_residency = 1, 206 .target_residency = 1,
207 .enter = &intel_idle }, 207 .enter = &intel_idle },
208 { 208 {
209 .name = "C1E-BYT", 209 .name = "C1E-BYT",
210 .desc = "MWAIT 0x01", 210 .desc = "MWAIT 0x01",
211 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 211 .flags = MWAIT2flg(0x01),
212 .exit_latency = 15, 212 .exit_latency = 15,
213 .target_residency = 30, 213 .target_residency = 30,
214 .enter = &intel_idle }, 214 .enter = &intel_idle },
215 { 215 {
216 .name = "C6N-BYT", 216 .name = "C6N-BYT",
217 .desc = "MWAIT 0x58", 217 .desc = "MWAIT 0x58",
218 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 218 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
219 .exit_latency = 40, 219 .exit_latency = 40,
220 .target_residency = 275, 220 .target_residency = 275,
221 .enter = &intel_idle }, 221 .enter = &intel_idle },
222 { 222 {
223 .name = "C6S-BYT", 223 .name = "C6S-BYT",
224 .desc = "MWAIT 0x52", 224 .desc = "MWAIT 0x52",
225 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 225 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
226 .exit_latency = 140, 226 .exit_latency = 140,
227 .target_residency = 560, 227 .target_residency = 560,
228 .enter = &intel_idle }, 228 .enter = &intel_idle },
229 { 229 {
230 .name = "C7-BYT", 230 .name = "C7-BYT",
231 .desc = "MWAIT 0x60", 231 .desc = "MWAIT 0x60",
232 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 232 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
233 .exit_latency = 1200, 233 .exit_latency = 1200,
234 .target_residency = 1500, 234 .target_residency = 1500,
235 .enter = &intel_idle }, 235 .enter = &intel_idle },
236 { 236 {
237 .name = "C7S-BYT", 237 .name = "C7S-BYT",
238 .desc = "MWAIT 0x64", 238 .desc = "MWAIT 0x64",
239 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 239 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
240 .exit_latency = 10000, 240 .exit_latency = 10000,
241 .target_residency = 20000, 241 .target_residency = 20000,
242 .enter = &intel_idle }, 242 .enter = &intel_idle },
@@ -248,35 +248,35 @@ static struct cpuidle_state ivb_cstates[] = {
248 { 248 {
249 .name = "C1-IVB", 249 .name = "C1-IVB",
250 .desc = "MWAIT 0x00", 250 .desc = "MWAIT 0x00",
251 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 251 .flags = MWAIT2flg(0x00),
252 .exit_latency = 1, 252 .exit_latency = 1,
253 .target_residency = 1, 253 .target_residency = 1,
254 .enter = &intel_idle }, 254 .enter = &intel_idle },
255 { 255 {
256 .name = "C1E-IVB", 256 .name = "C1E-IVB",
257 .desc = "MWAIT 0x01", 257 .desc = "MWAIT 0x01",
258 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 258 .flags = MWAIT2flg(0x01),
259 .exit_latency = 10, 259 .exit_latency = 10,
260 .target_residency = 20, 260 .target_residency = 20,
261 .enter = &intel_idle }, 261 .enter = &intel_idle },
262 { 262 {
263 .name = "C3-IVB", 263 .name = "C3-IVB",
264 .desc = "MWAIT 0x10", 264 .desc = "MWAIT 0x10",
265 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 265 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
266 .exit_latency = 59, 266 .exit_latency = 59,
267 .target_residency = 156, 267 .target_residency = 156,
268 .enter = &intel_idle }, 268 .enter = &intel_idle },
269 { 269 {
270 .name = "C6-IVB", 270 .name = "C6-IVB",
271 .desc = "MWAIT 0x20", 271 .desc = "MWAIT 0x20",
272 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 272 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
273 .exit_latency = 80, 273 .exit_latency = 80,
274 .target_residency = 300, 274 .target_residency = 300,
275 .enter = &intel_idle }, 275 .enter = &intel_idle },
276 { 276 {
277 .name = "C7-IVB", 277 .name = "C7-IVB",
278 .desc = "MWAIT 0x30", 278 .desc = "MWAIT 0x30",
279 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 279 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
280 .exit_latency = 87, 280 .exit_latency = 87,
281 .target_residency = 300, 281 .target_residency = 300,
282 .enter = &intel_idle }, 282 .enter = &intel_idle },
@@ -288,28 +288,28 @@ static struct cpuidle_state ivt_cstates[] = {
288 { 288 {
289 .name = "C1-IVT", 289 .name = "C1-IVT",
290 .desc = "MWAIT 0x00", 290 .desc = "MWAIT 0x00",
291 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 291 .flags = MWAIT2flg(0x00),
292 .exit_latency = 1, 292 .exit_latency = 1,
293 .target_residency = 1, 293 .target_residency = 1,
294 .enter = &intel_idle }, 294 .enter = &intel_idle },
295 { 295 {
296 .name = "C1E-IVT", 296 .name = "C1E-IVT",
297 .desc = "MWAIT 0x01", 297 .desc = "MWAIT 0x01",
298 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 298 .flags = MWAIT2flg(0x01),
299 .exit_latency = 10, 299 .exit_latency = 10,
300 .target_residency = 80, 300 .target_residency = 80,
301 .enter = &intel_idle }, 301 .enter = &intel_idle },
302 { 302 {
303 .name = "C3-IVT", 303 .name = "C3-IVT",
304 .desc = "MWAIT 0x10", 304 .desc = "MWAIT 0x10",
305 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 305 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
306 .exit_latency = 59, 306 .exit_latency = 59,
307 .target_residency = 156, 307 .target_residency = 156,
308 .enter = &intel_idle }, 308 .enter = &intel_idle },
309 { 309 {
310 .name = "C6-IVT", 310 .name = "C6-IVT",
311 .desc = "MWAIT 0x20", 311 .desc = "MWAIT 0x20",
312 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 312 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
313 .exit_latency = 82, 313 .exit_latency = 82,
314 .target_residency = 300, 314 .target_residency = 300,
315 .enter = &intel_idle }, 315 .enter = &intel_idle },
@@ -321,28 +321,28 @@ static struct cpuidle_state ivt_cstates_4s[] = {
321 { 321 {
322 .name = "C1-IVT-4S", 322 .name = "C1-IVT-4S",
323 .desc = "MWAIT 0x00", 323 .desc = "MWAIT 0x00",
324 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 324 .flags = MWAIT2flg(0x00),
325 .exit_latency = 1, 325 .exit_latency = 1,
326 .target_residency = 1, 326 .target_residency = 1,
327 .enter = &intel_idle }, 327 .enter = &intel_idle },
328 { 328 {
329 .name = "C1E-IVT-4S", 329 .name = "C1E-IVT-4S",
330 .desc = "MWAIT 0x01", 330 .desc = "MWAIT 0x01",
331 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 331 .flags = MWAIT2flg(0x01),
332 .exit_latency = 10, 332 .exit_latency = 10,
333 .target_residency = 250, 333 .target_residency = 250,
334 .enter = &intel_idle }, 334 .enter = &intel_idle },
335 { 335 {
336 .name = "C3-IVT-4S", 336 .name = "C3-IVT-4S",
337 .desc = "MWAIT 0x10", 337 .desc = "MWAIT 0x10",
338 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 338 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
339 .exit_latency = 59, 339 .exit_latency = 59,
340 .target_residency = 300, 340 .target_residency = 300,
341 .enter = &intel_idle }, 341 .enter = &intel_idle },
342 { 342 {
343 .name = "C6-IVT-4S", 343 .name = "C6-IVT-4S",
344 .desc = "MWAIT 0x20", 344 .desc = "MWAIT 0x20",
345 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 345 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
346 .exit_latency = 84, 346 .exit_latency = 84,
347 .target_residency = 400, 347 .target_residency = 400,
348 .enter = &intel_idle }, 348 .enter = &intel_idle },
@@ -354,28 +354,28 @@ static struct cpuidle_state ivt_cstates_8s[] = {
354 { 354 {
355 .name = "C1-IVT-8S", 355 .name = "C1-IVT-8S",
356 .desc = "MWAIT 0x00", 356 .desc = "MWAIT 0x00",
357 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 357 .flags = MWAIT2flg(0x00),
358 .exit_latency = 1, 358 .exit_latency = 1,
359 .target_residency = 1, 359 .target_residency = 1,
360 .enter = &intel_idle }, 360 .enter = &intel_idle },
361 { 361 {
362 .name = "C1E-IVT-8S", 362 .name = "C1E-IVT-8S",
363 .desc = "MWAIT 0x01", 363 .desc = "MWAIT 0x01",
364 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 364 .flags = MWAIT2flg(0x01),
365 .exit_latency = 10, 365 .exit_latency = 10,
366 .target_residency = 500, 366 .target_residency = 500,
367 .enter = &intel_idle }, 367 .enter = &intel_idle },
368 { 368 {
369 .name = "C3-IVT-8S", 369 .name = "C3-IVT-8S",
370 .desc = "MWAIT 0x10", 370 .desc = "MWAIT 0x10",
371 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 371 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
372 .exit_latency = 59, 372 .exit_latency = 59,
373 .target_residency = 600, 373 .target_residency = 600,
374 .enter = &intel_idle }, 374 .enter = &intel_idle },
375 { 375 {
376 .name = "C6-IVT-8S", 376 .name = "C6-IVT-8S",
377 .desc = "MWAIT 0x20", 377 .desc = "MWAIT 0x20",
378 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 378 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
379 .exit_latency = 88, 379 .exit_latency = 88,
380 .target_residency = 700, 380 .target_residency = 700,
381 .enter = &intel_idle }, 381 .enter = &intel_idle },
@@ -387,56 +387,56 @@ static struct cpuidle_state hsw_cstates[] = {
387 { 387 {
388 .name = "C1-HSW", 388 .name = "C1-HSW",
389 .desc = "MWAIT 0x00", 389 .desc = "MWAIT 0x00",
390 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 390 .flags = MWAIT2flg(0x00),
391 .exit_latency = 2, 391 .exit_latency = 2,
392 .target_residency = 2, 392 .target_residency = 2,
393 .enter = &intel_idle }, 393 .enter = &intel_idle },
394 { 394 {
395 .name = "C1E-HSW", 395 .name = "C1E-HSW",
396 .desc = "MWAIT 0x01", 396 .desc = "MWAIT 0x01",
397 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 397 .flags = MWAIT2flg(0x01),
398 .exit_latency = 10, 398 .exit_latency = 10,
399 .target_residency = 20, 399 .target_residency = 20,
400 .enter = &intel_idle }, 400 .enter = &intel_idle },
401 { 401 {
402 .name = "C3-HSW", 402 .name = "C3-HSW",
403 .desc = "MWAIT 0x10", 403 .desc = "MWAIT 0x10",
404 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 404 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
405 .exit_latency = 33, 405 .exit_latency = 33,
406 .target_residency = 100, 406 .target_residency = 100,
407 .enter = &intel_idle }, 407 .enter = &intel_idle },
408 { 408 {
409 .name = "C6-HSW", 409 .name = "C6-HSW",
410 .desc = "MWAIT 0x20", 410 .desc = "MWAIT 0x20",
411 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 411 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
412 .exit_latency = 133, 412 .exit_latency = 133,
413 .target_residency = 400, 413 .target_residency = 400,
414 .enter = &intel_idle }, 414 .enter = &intel_idle },
415 { 415 {
416 .name = "C7s-HSW", 416 .name = "C7s-HSW",
417 .desc = "MWAIT 0x32", 417 .desc = "MWAIT 0x32",
418 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 418 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
419 .exit_latency = 166, 419 .exit_latency = 166,
420 .target_residency = 500, 420 .target_residency = 500,
421 .enter = &intel_idle }, 421 .enter = &intel_idle },
422 { 422 {
423 .name = "C8-HSW", 423 .name = "C8-HSW",
424 .desc = "MWAIT 0x40", 424 .desc = "MWAIT 0x40",
425 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 425 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
426 .exit_latency = 300, 426 .exit_latency = 300,
427 .target_residency = 900, 427 .target_residency = 900,
428 .enter = &intel_idle }, 428 .enter = &intel_idle },
429 { 429 {
430 .name = "C9-HSW", 430 .name = "C9-HSW",
431 .desc = "MWAIT 0x50", 431 .desc = "MWAIT 0x50",
432 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 432 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
433 .exit_latency = 600, 433 .exit_latency = 600,
434 .target_residency = 1800, 434 .target_residency = 1800,
435 .enter = &intel_idle }, 435 .enter = &intel_idle },
436 { 436 {
437 .name = "C10-HSW", 437 .name = "C10-HSW",
438 .desc = "MWAIT 0x60", 438 .desc = "MWAIT 0x60",
439 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 439 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
440 .exit_latency = 2600, 440 .exit_latency = 2600,
441 .target_residency = 7700, 441 .target_residency = 7700,
442 .enter = &intel_idle }, 442 .enter = &intel_idle },
@@ -447,56 +447,56 @@ static struct cpuidle_state bdw_cstates[] = {
447 { 447 {
448 .name = "C1-BDW", 448 .name = "C1-BDW",
449 .desc = "MWAIT 0x00", 449 .desc = "MWAIT 0x00",
450 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 450 .flags = MWAIT2flg(0x00),
451 .exit_latency = 2, 451 .exit_latency = 2,
452 .target_residency = 2, 452 .target_residency = 2,
453 .enter = &intel_idle }, 453 .enter = &intel_idle },
454 { 454 {
455 .name = "C1E-BDW", 455 .name = "C1E-BDW",
456 .desc = "MWAIT 0x01", 456 .desc = "MWAIT 0x01",
457 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, 457 .flags = MWAIT2flg(0x01),
458 .exit_latency = 10, 458 .exit_latency = 10,
459 .target_residency = 20, 459 .target_residency = 20,
460 .enter = &intel_idle }, 460 .enter = &intel_idle },
461 { 461 {
462 .name = "C3-BDW", 462 .name = "C3-BDW",
463 .desc = "MWAIT 0x10", 463 .desc = "MWAIT 0x10",
464 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 464 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
465 .exit_latency = 40, 465 .exit_latency = 40,
466 .target_residency = 100, 466 .target_residency = 100,
467 .enter = &intel_idle }, 467 .enter = &intel_idle },
468 { 468 {
469 .name = "C6-BDW", 469 .name = "C6-BDW",
470 .desc = "MWAIT 0x20", 470 .desc = "MWAIT 0x20",
471 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 471 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
472 .exit_latency = 133, 472 .exit_latency = 133,
473 .target_residency = 400, 473 .target_residency = 400,
474 .enter = &intel_idle }, 474 .enter = &intel_idle },
475 { 475 {
476 .name = "C7s-BDW", 476 .name = "C7s-BDW",
477 .desc = "MWAIT 0x32", 477 .desc = "MWAIT 0x32",
478 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 478 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
479 .exit_latency = 166, 479 .exit_latency = 166,
480 .target_residency = 500, 480 .target_residency = 500,
481 .enter = &intel_idle }, 481 .enter = &intel_idle },
482 { 482 {
483 .name = "C8-BDW", 483 .name = "C8-BDW",
484 .desc = "MWAIT 0x40", 484 .desc = "MWAIT 0x40",
485 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 485 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
486 .exit_latency = 300, 486 .exit_latency = 300,
487 .target_residency = 900, 487 .target_residency = 900,
488 .enter = &intel_idle }, 488 .enter = &intel_idle },
489 { 489 {
490 .name = "C9-BDW", 490 .name = "C9-BDW",
491 .desc = "MWAIT 0x50", 491 .desc = "MWAIT 0x50",
492 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 492 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
493 .exit_latency = 600, 493 .exit_latency = 600,
494 .target_residency = 1800, 494 .target_residency = 1800,
495 .enter = &intel_idle }, 495 .enter = &intel_idle },
496 { 496 {
497 .name = "C10-BDW", 497 .name = "C10-BDW",
498 .desc = "MWAIT 0x60", 498 .desc = "MWAIT 0x60",
499 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 499 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
500 .exit_latency = 2600, 500 .exit_latency = 2600,
501 .target_residency = 7700, 501 .target_residency = 7700,
502 .enter = &intel_idle }, 502 .enter = &intel_idle },
@@ -508,28 +508,28 @@ static struct cpuidle_state atom_cstates[] = {
508 { 508 {
509 .name = "C1E-ATM", 509 .name = "C1E-ATM",
510 .desc = "MWAIT 0x00", 510 .desc = "MWAIT 0x00",
511 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 511 .flags = MWAIT2flg(0x00),
512 .exit_latency = 10, 512 .exit_latency = 10,
513 .target_residency = 20, 513 .target_residency = 20,
514 .enter = &intel_idle }, 514 .enter = &intel_idle },
515 { 515 {
516 .name = "C2-ATM", 516 .name = "C2-ATM",
517 .desc = "MWAIT 0x10", 517 .desc = "MWAIT 0x10",
518 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID, 518 .flags = MWAIT2flg(0x10),
519 .exit_latency = 20, 519 .exit_latency = 20,
520 .target_residency = 80, 520 .target_residency = 80,
521 .enter = &intel_idle }, 521 .enter = &intel_idle },
522 { 522 {
523 .name = "C4-ATM", 523 .name = "C4-ATM",
524 .desc = "MWAIT 0x30", 524 .desc = "MWAIT 0x30",
525 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 525 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
526 .exit_latency = 100, 526 .exit_latency = 100,
527 .target_residency = 400, 527 .target_residency = 400,
528 .enter = &intel_idle }, 528 .enter = &intel_idle },
529 { 529 {
530 .name = "C6-ATM", 530 .name = "C6-ATM",
531 .desc = "MWAIT 0x52", 531 .desc = "MWAIT 0x52",
532 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 532 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
533 .exit_latency = 140, 533 .exit_latency = 140,
534 .target_residency = 560, 534 .target_residency = 560,
535 .enter = &intel_idle }, 535 .enter = &intel_idle },
@@ -540,14 +540,14 @@ static struct cpuidle_state avn_cstates[] = {
540 { 540 {
541 .name = "C1-AVN", 541 .name = "C1-AVN",
542 .desc = "MWAIT 0x00", 542 .desc = "MWAIT 0x00",
543 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, 543 .flags = MWAIT2flg(0x00),
544 .exit_latency = 2, 544 .exit_latency = 2,
545 .target_residency = 2, 545 .target_residency = 2,
546 .enter = &intel_idle }, 546 .enter = &intel_idle },
547 { 547 {
548 .name = "C6-AVN", 548 .name = "C6-AVN",
549 .desc = "MWAIT 0x51", 549 .desc = "MWAIT 0x51",
550 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 550 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
551 .exit_latency = 15, 551 .exit_latency = 15,
552 .target_residency = 45, 552 .target_residency = 45,
553 .enter = &intel_idle }, 553 .enter = &intel_idle },
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 22c096ce39ad..066d0c04072c 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -44,6 +44,9 @@
44 44
45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B 45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07 46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07
47#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
48#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
49#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
47#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) 50#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
48 51
49#define BMC150_ACCEL_REG_PMU_LPW 0x11 52#define BMC150_ACCEL_REG_PMU_LPW 0x11
@@ -92,9 +95,9 @@
92#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF 95#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
93 96
94/* Slope duration in terms of number of samples */ 97/* Slope duration in terms of number of samples */
95#define BMC150_ACCEL_DEF_SLOPE_DURATION 2 98#define BMC150_ACCEL_DEF_SLOPE_DURATION 1
96/* in terms of multiples of g's/LSB, based on range */ 99/* in terms of multiples of g's/LSB, based on range */
97#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 5 100#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
98 101
99#define BMC150_ACCEL_REG_XOUT_L 0x02 102#define BMC150_ACCEL_REG_XOUT_L 0x02
100 103
@@ -510,7 +513,7 @@ static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
510 return -EINVAL; 513 return -EINVAL;
511} 514}
512 515
513#ifdef CONFIG_PM_RUNTIME 516#ifdef CONFIG_PM
514static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data) 517static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
515{ 518{
516 int i; 519 int i;
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
536 if (ret < 0) { 539 if (ret < 0) {
537 dev_err(&data->client->dev, 540 dev_err(&data->client->dev,
538 "Failed: bmc150_accel_set_power_state for %d\n", on); 541 "Failed: bmc150_accel_set_power_state for %d\n", on);
542 if (on)
543 pm_runtime_put_noidle(&data->client->dev);
544
539 return ret; 545 return ret;
540 } 546 }
541 547
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
811 817
812 ret = bmc150_accel_setup_any_motion_interrupt(data, state); 818 ret = bmc150_accel_setup_any_motion_interrupt(data, state);
813 if (ret < 0) { 819 if (ret < 0) {
820 bmc150_accel_set_power_state(data, false);
814 mutex_unlock(&data->mutex); 821 mutex_unlock(&data->mutex);
815 return ret; 822 return ret;
816 } 823 }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
846 853
847static const struct iio_event_spec bmc150_accel_event = { 854static const struct iio_event_spec bmc150_accel_event = {
848 .type = IIO_EV_TYPE_ROC, 855 .type = IIO_EV_TYPE_ROC,
849 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 856 .dir = IIO_EV_DIR_EITHER,
850 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 857 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
851 BIT(IIO_EV_INFO_ENABLE) | 858 BIT(IIO_EV_INFO_ENABLE) |
852 BIT(IIO_EV_INFO_PERIOD) 859 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
1054 else 1061 else
1055 ret = bmc150_accel_setup_new_data_interrupt(data, state); 1062 ret = bmc150_accel_setup_new_data_interrupt(data, state);
1056 if (ret < 0) { 1063 if (ret < 0) {
1064 bmc150_accel_set_power_state(data, false);
1057 mutex_unlock(&data->mutex); 1065 mutex_unlock(&data->mutex);
1058 return ret; 1066 return ret;
1059 } 1067 }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
1092 else 1100 else
1093 dir = IIO_EV_DIR_RISING; 1101 dir = IIO_EV_DIR_RISING;
1094 1102
1095 if (ret & BMC150_ACCEL_ANY_MOTION_MASK) 1103 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
1104 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1105 0,
1106 IIO_MOD_X,
1107 IIO_EV_TYPE_ROC,
1108 dir),
1109 data->timestamp);
1110 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1096 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 1111 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1097 0, 1112 0,
1098 IIO_MOD_X_OR_Y_OR_Z, 1113 IIO_MOD_Y,
1099 IIO_EV_TYPE_ROC, 1114 IIO_EV_TYPE_ROC,
1100 IIO_EV_DIR_EITHER), 1115 dir),
1116 data->timestamp);
1117 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1118 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1119 0,
1120 IIO_MOD_Z,
1121 IIO_EV_TYPE_ROC,
1122 dir),
1101 data->timestamp); 1123 data->timestamp);
1102ack_intr_status: 1124ack_intr_status:
1103 if (!data->dready_trigger_on) 1125 if (!data->dready_trigger_on)
@@ -1349,15 +1371,19 @@ static int bmc150_accel_resume(struct device *dev)
1349} 1371}
1350#endif 1372#endif
1351 1373
1352#ifdef CONFIG_PM_RUNTIME 1374#ifdef CONFIG_PM
1353static int bmc150_accel_runtime_suspend(struct device *dev) 1375static int bmc150_accel_runtime_suspend(struct device *dev)
1354{ 1376{
1355 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1377 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1356 struct bmc150_accel_data *data = iio_priv(indio_dev); 1378 struct bmc150_accel_data *data = iio_priv(indio_dev);
1379 int ret;
1357 1380
1358 dev_dbg(&data->client->dev, __func__); 1381 dev_dbg(&data->client->dev, __func__);
1382 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1383 if (ret < 0)
1384 return -EAGAIN;
1359 1385
1360 return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1386 return 0;
1361} 1387}
1362 1388
1363static int bmc150_accel_runtime_resume(struct device *dev) 1389static int bmc150_accel_runtime_resume(struct device *dev)
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index a23e58c4ed99..da2fe93739a2 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
269 return ret; 269 return ret;
270 } 270 }
271 271
272 ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
273 KXCJK1013_REG_CTRL1_BIT_GSEL1);
272 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); 274 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
273 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); 275 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
274 276
@@ -358,7 +360,7 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
358 return 0; 360 return 0;
359} 361}
360 362
361#ifdef CONFIG_PM_RUNTIME 363#ifdef CONFIG_PM
362static int kxcjk1013_get_startup_times(struct kxcjk1013_data *data) 364static int kxcjk1013_get_startup_times(struct kxcjk1013_data *data)
363{ 365{
364 int i; 366 int i;
@@ -1357,7 +1359,7 @@ static int kxcjk1013_resume(struct device *dev)
1357} 1359}
1358#endif 1360#endif
1359 1361
1360#ifdef CONFIG_PM_RUNTIME 1362#ifdef CONFIG_PM
1361static int kxcjk1013_runtime_suspend(struct device *dev) 1363static int kxcjk1013_runtime_suspend(struct device *dev)
1362{ 1364{
1363 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1365 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 88bdc8f612e2..bc4e787096e8 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -127,6 +127,14 @@ config AT91_ADC
127 help 127 help
128 Say yes here to build support for Atmel AT91 ADC. 128 Say yes here to build support for Atmel AT91 ADC.
129 129
130config AXP288_ADC
131 tristate "X-Powers AXP288 ADC driver"
132 depends on MFD_AXP20X
133 help
134 Say yes here to have support for X-Powers power management IC (PMIC) ADC
135 device. Depending on platform configuration, this general purpose ADC can
136 be used for sampling sensors such as thermal resistors.
137
130config EXYNOS_ADC 138config EXYNOS_ADC
131 tristate "Exynos ADC driver support" 139 tristate "Exynos ADC driver support"
132 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST) 140 depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index cb88a6a23b8f..f30093f5b67a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_AD7793) += ad7793.o
14obj-$(CONFIG_AD7887) += ad7887.o 14obj-$(CONFIG_AD7887) += ad7887.o
15obj-$(CONFIG_AD799X) += ad799x.o 15obj-$(CONFIG_AD799X) += ad799x.o
16obj-$(CONFIG_AT91_ADC) += at91_adc.o 16obj-$(CONFIG_AT91_ADC) += at91_adc.o
17obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
17obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o 18obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
18obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o 19obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
19obj-$(CONFIG_MAX1027) += max1027.o 20obj-$(CONFIG_MAX1027) += max1027.o
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
new file mode 100644
index 000000000000..08bcfb061ca5
--- /dev/null
+++ b/drivers/iio/adc/axp288_adc.c
@@ -0,0 +1,261 @@
1/*
2 * axp288_adc.c - X-Powers AXP288 PMIC ADC Driver
3 *
4 * Copyright (C) 2014 Intel Corporation
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/regmap.h>
23#include <linux/mfd/axp20x.h>
24#include <linux/platform_device.h>
25
26#include <linux/iio/iio.h>
27#include <linux/iio/machine.h>
28#include <linux/iio/driver.h>
29
30#define AXP288_ADC_EN_MASK 0xF1
31#define AXP288_ADC_TS_PIN_GPADC 0xF2
32#define AXP288_ADC_TS_PIN_ON 0xF3
33
34enum axp288_adc_id {
35 AXP288_ADC_TS,
36 AXP288_ADC_PMIC,
37 AXP288_ADC_GP,
38 AXP288_ADC_BATT_CHRG_I,
39 AXP288_ADC_BATT_DISCHRG_I,
40 AXP288_ADC_BATT_V,
41 AXP288_ADC_NR_CHAN,
42};
43
44struct axp288_adc_info {
45 int irq;
46 struct regmap *regmap;
47};
48
49static const struct iio_chan_spec const axp288_adc_channels[] = {
50 {
51 .indexed = 1,
52 .type = IIO_TEMP,
53 .channel = 0,
54 .address = AXP288_TS_ADC_H,
55 .datasheet_name = "TS_PIN",
56 }, {
57 .indexed = 1,
58 .type = IIO_TEMP,
59 .channel = 1,
60 .address = AXP288_PMIC_ADC_H,
61 .datasheet_name = "PMIC_TEMP",
62 }, {
63 .indexed = 1,
64 .type = IIO_TEMP,
65 .channel = 2,
66 .address = AXP288_GP_ADC_H,
67 .datasheet_name = "GPADC",
68 }, {
69 .indexed = 1,
70 .type = IIO_CURRENT,
71 .channel = 3,
72 .address = AXP20X_BATT_CHRG_I_H,
73 .datasheet_name = "BATT_CHG_I",
74 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
75 }, {
76 .indexed = 1,
77 .type = IIO_CURRENT,
78 .channel = 4,
79 .address = AXP20X_BATT_DISCHRG_I_H,
80 .datasheet_name = "BATT_DISCHRG_I",
81 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
82 }, {
83 .indexed = 1,
84 .type = IIO_VOLTAGE,
85 .channel = 5,
86 .address = AXP20X_BATT_V_H,
87 .datasheet_name = "BATT_V",
88 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
89 },
90};
91
92#define AXP288_ADC_MAP(_adc_channel_label, _consumer_dev_name, \
93 _consumer_channel) \
94 { \
95 .adc_channel_label = _adc_channel_label, \
96 .consumer_dev_name = _consumer_dev_name, \
97 .consumer_channel = _consumer_channel, \
98 }
99
100/* for consumer drivers */
101static struct iio_map axp288_adc_default_maps[] = {
102 AXP288_ADC_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
103 AXP288_ADC_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
104 AXP288_ADC_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
105 AXP288_ADC_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
106 AXP288_ADC_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
107 AXP288_ADC_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
108 {},
109};
110
111static int axp288_adc_read_channel(int *val, unsigned long address,
112 struct regmap *regmap)
113{
114 u8 buf[2];
115
116 if (regmap_bulk_read(regmap, address, buf, 2))
117 return -EIO;
118 *val = (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
119
120 return IIO_VAL_INT;
121}
122
123static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
124 unsigned long address)
125{
126 /* channels other than GPADC do not need to switch TS pin */
127 if (address != AXP288_GP_ADC_H)
128 return 0;
129
130 return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
131}
132
133static int axp288_adc_read_raw(struct iio_dev *indio_dev,
134 struct iio_chan_spec const *chan,
135 int *val, int *val2, long mask)
136{
137 int ret;
138 struct axp288_adc_info *info = iio_priv(indio_dev);
139
140 mutex_lock(&indio_dev->mlock);
141 switch (mask) {
142 case IIO_CHAN_INFO_RAW:
143 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
144 chan->address)) {
145 dev_err(&indio_dev->dev, "GPADC mode\n");
146 ret = -EINVAL;
147 break;
148 }
149 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
150 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
151 chan->address))
152 dev_err(&indio_dev->dev, "TS pin restore\n");
153 break;
154 case IIO_CHAN_INFO_PROCESSED:
155 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
156 break;
157 default:
158 ret = -EINVAL;
159 }
160 mutex_unlock(&indio_dev->mlock);
161
162 return ret;
163}
164
165static int axp288_adc_set_state(struct regmap *regmap)
166{
167 /* ADC should be always enabled for internal FG to function */
168 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
169 return -EIO;
170
171 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
172}
173
174static const struct iio_info axp288_adc_iio_info = {
175 .read_raw = &axp288_adc_read_raw,
176 .driver_module = THIS_MODULE,
177};
178
179static int axp288_adc_probe(struct platform_device *pdev)
180{
181 int ret;
182 struct axp288_adc_info *info;
183 struct iio_dev *indio_dev;
184 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
185
186 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
187 if (!indio_dev)
188 return -ENOMEM;
189
190 info = iio_priv(indio_dev);
191 info->irq = platform_get_irq(pdev, 0);
192 if (info->irq < 0) {
193 dev_err(&pdev->dev, "no irq resource?\n");
194 return info->irq;
195 }
196 platform_set_drvdata(pdev, indio_dev);
197 info->regmap = axp20x->regmap;
198 /*
199 * Set ADC to enabled state at all time, including system suspend.
200 * otherwise internal fuel gauge functionality may be affected.
201 */
202 ret = axp288_adc_set_state(axp20x->regmap);
203 if (ret) {
204 dev_err(&pdev->dev, "unable to enable ADC device\n");
205 return ret;
206 }
207
208 indio_dev->dev.parent = &pdev->dev;
209 indio_dev->name = pdev->name;
210 indio_dev->channels = axp288_adc_channels;
211 indio_dev->num_channels = ARRAY_SIZE(axp288_adc_channels);
212 indio_dev->info = &axp288_adc_iio_info;
213 indio_dev->modes = INDIO_DIRECT_MODE;
214 ret = iio_map_array_register(indio_dev, axp288_adc_default_maps);
215 if (ret < 0)
216 return ret;
217
218 ret = iio_device_register(indio_dev);
219 if (ret < 0) {
220 dev_err(&pdev->dev, "unable to register iio device\n");
221 goto err_array_unregister;
222 }
223 return 0;
224
225err_array_unregister:
226 iio_map_array_unregister(indio_dev);
227
228 return ret;
229}
230
231static int axp288_adc_remove(struct platform_device *pdev)
232{
233 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
234
235 iio_device_unregister(indio_dev);
236 iio_map_array_unregister(indio_dev);
237
238 return 0;
239}
240
241static struct platform_device_id axp288_adc_id_table[] = {
242 { .name = "axp288_adc" },
243 {},
244};
245
246static struct platform_driver axp288_adc_driver = {
247 .probe = axp288_adc_probe,
248 .remove = axp288_adc_remove,
249 .id_table = axp288_adc_id_table,
250 .driver = {
251 .name = "axp288_adc",
252 },
253};
254
255MODULE_DEVICE_TABLE(platform, axp288_adc_id_table);
256
257module_platform_driver(axp288_adc_driver);
258
259MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
260MODULE_DESCRIPTION("X-Powers AXP288 ADC Driver");
261MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index b58d6302521f..d095efe1ba14 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
152 152
153static const struct mcb_device_id men_z188_ids[] = { 153static const struct mcb_device_id men_z188_ids[] = {
154 { .device = 0xbc }, 154 { .device = 0xbc },
155 { }
155}; 156};
156MODULE_DEVICE_TABLE(mcb, men_z188_ids); 157MODULE_DEVICE_TABLE(mcb, men_z188_ids);
157 158
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
index 1f967e0d688e..60451b328242 100644
--- a/drivers/iio/gyro/bmg160.c
+++ b/drivers/iio/gyro/bmg160.c
@@ -67,6 +67,9 @@
67#define BMG160_REG_INT_EN_0 0x15 67#define BMG160_REG_INT_EN_0 0x15
68#define BMG160_DATA_ENABLE_INT BIT(7) 68#define BMG160_DATA_ENABLE_INT BIT(7)
69 69
70#define BMG160_REG_INT_EN_1 0x16
71#define BMG160_INT1_BIT_OD BIT(1)
72
70#define BMG160_REG_XOUT_L 0x02 73#define BMG160_REG_XOUT_L 0x02
71#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2)) 74#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2))
72 75
@@ -82,6 +85,9 @@
82 85
83#define BMG160_REG_INT_STATUS_2 0x0B 86#define BMG160_REG_INT_STATUS_2 0x0B
84#define BMG160_ANY_MOTION_MASK 0x07 87#define BMG160_ANY_MOTION_MASK 0x07
88#define BMG160_ANY_MOTION_BIT_X BIT(0)
89#define BMG160_ANY_MOTION_BIT_Y BIT(1)
90#define BMG160_ANY_MOTION_BIT_Z BIT(2)
85 91
86#define BMG160_REG_TEMP 0x08 92#define BMG160_REG_TEMP 0x08
87#define BMG160_TEMP_CENTER_VAL 23 93#define BMG160_TEMP_CENTER_VAL 23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
222 data->slope_thres = ret; 228 data->slope_thres = ret;
223 229
224 /* Set default interrupt mode */ 230 /* Set default interrupt mode */
231 ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
232 if (ret < 0) {
233 dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
234 return ret;
235 }
236 ret &= ~BMG160_INT1_BIT_OD;
237 ret = i2c_smbus_write_byte_data(data->client,
238 BMG160_REG_INT_EN_1, ret);
239 if (ret < 0) {
240 dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
241 return ret;
242 }
243
225 ret = i2c_smbus_write_byte_data(data->client, 244 ret = i2c_smbus_write_byte_data(data->client,
226 BMG160_REG_INT_RST_LATCH, 245 BMG160_REG_INT_RST_LATCH,
227 BMG160_INT_MODE_LATCH_INT | 246 BMG160_INT_MODE_LATCH_INT |
@@ -237,7 +256,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
237 256
238static int bmg160_set_power_state(struct bmg160_data *data, bool on) 257static int bmg160_set_power_state(struct bmg160_data *data, bool on)
239{ 258{
240#ifdef CONFIG_PM_RUNTIME 259#ifdef CONFIG_PM
241 int ret; 260 int ret;
242 261
243 if (on) 262 if (on)
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
250 if (ret < 0) { 269 if (ret < 0) {
251 dev_err(&data->client->dev, 270 dev_err(&data->client->dev,
252 "Failed: bmg160_set_power_state for %d\n", on); 271 "Failed: bmg160_set_power_state for %d\n", on);
272 if (on)
273 pm_runtime_put_noidle(&data->client->dev);
274
253 return ret; 275 return ret;
254 } 276 }
255#endif 277#endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
705 727
706 ret = bmg160_setup_any_motion_interrupt(data, state); 728 ret = bmg160_setup_any_motion_interrupt(data, state);
707 if (ret < 0) { 729 if (ret < 0) {
730 bmg160_set_power_state(data, false);
708 mutex_unlock(&data->mutex); 731 mutex_unlock(&data->mutex);
709 return ret; 732 return ret;
710 } 733 }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
743 766
744static const struct iio_event_spec bmg160_event = { 767static const struct iio_event_spec bmg160_event = {
745 .type = IIO_EV_TYPE_ROC, 768 .type = IIO_EV_TYPE_ROC,
746 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 769 .dir = IIO_EV_DIR_EITHER,
747 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 770 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
748 BIT(IIO_EV_INFO_ENABLE) 771 BIT(IIO_EV_INFO_ENABLE)
749}; 772};
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
871 else 894 else
872 ret = bmg160_setup_new_data_interrupt(data, state); 895 ret = bmg160_setup_new_data_interrupt(data, state);
873 if (ret < 0) { 896 if (ret < 0) {
897 bmg160_set_power_state(data, false);
874 mutex_unlock(&data->mutex); 898 mutex_unlock(&data->mutex);
875 return ret; 899 return ret;
876 } 900 }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
908 else 932 else
909 dir = IIO_EV_DIR_FALLING; 933 dir = IIO_EV_DIR_FALLING;
910 934
911 if (ret & BMG160_ANY_MOTION_MASK) 935 if (ret & BMG160_ANY_MOTION_BIT_X)
912 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, 936 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
913 0, 937 0,
914 IIO_MOD_X_OR_Y_OR_Z, 938 IIO_MOD_X,
939 IIO_EV_TYPE_ROC,
940 dir),
941 data->timestamp);
942 if (ret & BMG160_ANY_MOTION_BIT_Y)
943 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
944 0,
945 IIO_MOD_Y,
946 IIO_EV_TYPE_ROC,
947 dir),
948 data->timestamp);
949 if (ret & BMG160_ANY_MOTION_BIT_Z)
950 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
951 0,
952 IIO_MOD_Z,
915 IIO_EV_TYPE_ROC, 953 IIO_EV_TYPE_ROC,
916 dir), 954 dir),
917 data->timestamp); 955 data->timestamp);
@@ -1164,13 +1202,20 @@ static int bmg160_resume(struct device *dev)
1164} 1202}
1165#endif 1203#endif
1166 1204
1167#ifdef CONFIG_PM_RUNTIME 1205#ifdef CONFIG_PM
1168static int bmg160_runtime_suspend(struct device *dev) 1206static int bmg160_runtime_suspend(struct device *dev)
1169{ 1207{
1170 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1208 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1171 struct bmg160_data *data = iio_priv(indio_dev); 1209 struct bmg160_data *data = iio_priv(indio_dev);
1210 int ret;
1211
1212 ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
1213 if (ret < 0) {
1214 dev_err(&data->client->dev, "set mode failed\n");
1215 return -EAGAIN;
1216 }
1172 1217
1173 return bmg160_set_mode(data, BMG160_MODE_SUSPEND); 1218 return 0;
1174} 1219}
1175 1220
1176static int bmg160_runtime_resume(struct device *dev) 1221static int bmg160_runtime_resume(struct device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f42ab14105ac..20ca6a619476 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -911,7 +911,7 @@ static struct scsi_host_template iscsi_iser_sht = {
911 .module = THIS_MODULE, 911 .module = THIS_MODULE,
912 .name = "iSCSI Initiator over iSER", 912 .name = "iSCSI Initiator over iSER",
913 .queuecommand = iscsi_queuecommand, 913 .queuecommand = iscsi_queuecommand,
914 .change_queue_depth = iscsi_change_queue_depth, 914 .change_queue_depth = scsi_change_queue_depth,
915 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 915 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
916 .max_sectors = 1024, 916 .max_sectors = 1024,
917 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 917 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
@@ -922,6 +922,7 @@ static struct scsi_host_template iscsi_iser_sht = {
922 .use_clustering = DISABLE_CLUSTERING, 922 .use_clustering = DISABLE_CLUSTERING,
923 .proc_name = "iscsi_iser", 923 .proc_name = "iscsi_iser",
924 .this_id = -1, 924 .this_id = -1,
925 .track_queue_depth = 1,
925}; 926};
926 927
927static struct iscsi_transport iscsi_iser_transport = { 928static struct iscsi_transport iscsi_iser_transport = {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 62d2a18e1b41..5461924c9f10 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -123,10 +123,15 @@ MODULE_PARM_DESC(dev_loss_tmo,
123 " if fast_io_fail_tmo has not been set. \"off\" means that" 123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled."); 124 " this functionality is disabled.");
125 125
126static unsigned ch_count;
127module_param(ch_count, uint, 0444);
128MODULE_PARM_DESC(ch_count,
129 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
130
126static void srp_add_one(struct ib_device *device); 131static void srp_add_one(struct ib_device *device);
127static void srp_remove_one(struct ib_device *device); 132static void srp_remove_one(struct ib_device *device);
128static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 133static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
129static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 134static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 135static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131 136
132static struct scsi_transport_template *ib_srp_transport_template; 137static struct scsi_transport_template *ib_srp_transport_template;
@@ -262,7 +267,7 @@ static int srp_init_qp(struct srp_target_port *target,
262 267
263 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 268 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264 target->srp_host->port, 269 target->srp_host->port,
265 be16_to_cpu(target->path.pkey), 270 be16_to_cpu(target->pkey),
266 &attr->pkey_index); 271 &attr->pkey_index);
267 if (ret) 272 if (ret)
268 goto out; 273 goto out;
@@ -283,18 +288,23 @@ out:
283 return ret; 288 return ret;
284} 289}
285 290
286static int srp_new_cm_id(struct srp_target_port *target) 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
287{ 292{
293 struct srp_target_port *target = ch->target;
288 struct ib_cm_id *new_cm_id; 294 struct ib_cm_id *new_cm_id;
289 295
290 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
291 srp_cm_handler, target); 297 srp_cm_handler, ch);
292 if (IS_ERR(new_cm_id)) 298 if (IS_ERR(new_cm_id))
293 return PTR_ERR(new_cm_id); 299 return PTR_ERR(new_cm_id);
294 300
295 if (target->cm_id) 301 if (ch->cm_id)
296 ib_destroy_cm_id(target->cm_id); 302 ib_destroy_cm_id(ch->cm_id);
297 target->cm_id = new_cm_id; 303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
298 308
299 return 0; 309 return 0;
300} 310}
@@ -443,8 +453,44 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
443 dev->max_pages_per_mr); 453 dev->max_pages_per_mr);
444} 454}
445 455
446static int srp_create_target_ib(struct srp_target_port *target) 456/**
457 * srp_destroy_qp() - destroy an RDMA queue pair
458 * @ch: SRP RDMA channel.
459 *
460 * Change a queue pair into the error state and wait until all receive
461 * completions have been processed before destroying it. This avoids that
462 * the receive completion handler can access the queue pair while it is
463 * being destroyed.
464 */
465static void srp_destroy_qp(struct srp_rdma_ch *ch)
466{
467 struct srp_target_port *target = ch->target;
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
471 int ret;
472
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(target->connected);
475
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478 if (ret)
479 goto out;
480
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484 if (ret == 0)
485 wait_for_completion(&ch->done);
486
487out:
488 ib_destroy_qp(ch->qp);
489}
490
491static int srp_create_ch_ib(struct srp_rdma_ch *ch)
447{ 492{
493 struct srp_target_port *target = ch->target;
448 struct srp_device *dev = target->srp_host->srp_dev; 494 struct srp_device *dev = target->srp_host->srp_dev;
449 struct ib_qp_init_attr *init_attr; 495 struct ib_qp_init_attr *init_attr;
450 struct ib_cq *recv_cq, *send_cq; 496 struct ib_cq *recv_cq, *send_cq;
@@ -458,15 +504,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
458 if (!init_attr) 504 if (!init_attr)
459 return -ENOMEM; 505 return -ENOMEM;
460 506
461 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target, 507 /* + 1 for SRP_LAST_WR_ID */
462 target->queue_size, target->comp_vector); 508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
509 target->queue_size + 1, ch->comp_vector);
463 if (IS_ERR(recv_cq)) { 510 if (IS_ERR(recv_cq)) {
464 ret = PTR_ERR(recv_cq); 511 ret = PTR_ERR(recv_cq);
465 goto err; 512 goto err;
466 } 513 }
467 514
468 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target, 515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
469 m * target->queue_size, target->comp_vector); 516 m * target->queue_size, ch->comp_vector);
470 if (IS_ERR(send_cq)) { 517 if (IS_ERR(send_cq)) {
471 ret = PTR_ERR(send_cq); 518 ret = PTR_ERR(send_cq);
472 goto err_recv_cq; 519 goto err_recv_cq;
@@ -476,7 +523,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
476 523
477 init_attr->event_handler = srp_qp_event; 524 init_attr->event_handler = srp_qp_event;
478 init_attr->cap.max_send_wr = m * target->queue_size; 525 init_attr->cap.max_send_wr = m * target->queue_size;
479 init_attr->cap.max_recv_wr = target->queue_size; 526 init_attr->cap.max_recv_wr = target->queue_size + 1;
480 init_attr->cap.max_recv_sge = 1; 527 init_attr->cap.max_recv_sge = 1;
481 init_attr->cap.max_send_sge = 1; 528 init_attr->cap.max_send_sge = 1;
482 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 529 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -502,9 +549,9 @@ static int srp_create_target_ib(struct srp_target_port *target)
502 "FR pool allocation failed (%d)\n", ret); 549 "FR pool allocation failed (%d)\n", ret);
503 goto err_qp; 550 goto err_qp;
504 } 551 }
505 if (target->fr_pool) 552 if (ch->fr_pool)
506 srp_destroy_fr_pool(target->fr_pool); 553 srp_destroy_fr_pool(ch->fr_pool);
507 target->fr_pool = fr_pool; 554 ch->fr_pool = fr_pool;
508 } else if (!dev->use_fast_reg && dev->has_fmr) { 555 } else if (!dev->use_fast_reg && dev->has_fmr) {
509 fmr_pool = srp_alloc_fmr_pool(target); 556 fmr_pool = srp_alloc_fmr_pool(target);
510 if (IS_ERR(fmr_pool)) { 557 if (IS_ERR(fmr_pool)) {
@@ -513,21 +560,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
513 "FMR pool allocation failed (%d)\n", ret); 560 "FMR pool allocation failed (%d)\n", ret);
514 goto err_qp; 561 goto err_qp;
515 } 562 }
516 if (target->fmr_pool) 563 if (ch->fmr_pool)
517 ib_destroy_fmr_pool(target->fmr_pool); 564 ib_destroy_fmr_pool(ch->fmr_pool);
518 target->fmr_pool = fmr_pool; 565 ch->fmr_pool = fmr_pool;
519 } 566 }
520 567
521 if (target->qp) 568 if (ch->qp)
522 ib_destroy_qp(target->qp); 569 srp_destroy_qp(ch);
523 if (target->recv_cq) 570 if (ch->recv_cq)
524 ib_destroy_cq(target->recv_cq); 571 ib_destroy_cq(ch->recv_cq);
525 if (target->send_cq) 572 if (ch->send_cq)
526 ib_destroy_cq(target->send_cq); 573 ib_destroy_cq(ch->send_cq);
527 574
528 target->qp = qp; 575 ch->qp = qp;
529 target->recv_cq = recv_cq; 576 ch->recv_cq = recv_cq;
530 target->send_cq = send_cq; 577 ch->send_cq = send_cq;
531 578
532 kfree(init_attr); 579 kfree(init_attr);
533 return 0; 580 return 0;
@@ -548,93 +595,117 @@ err:
548 595
549/* 596/*
550 * Note: this function may be called without srp_alloc_iu_bufs() having been 597 * Note: this function may be called without srp_alloc_iu_bufs() having been
551 * invoked. Hence the target->[rt]x_ring checks. 598 * invoked. Hence the ch->[rt]x_ring checks.
552 */ 599 */
553static void srp_free_target_ib(struct srp_target_port *target) 600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
554{ 602{
555 struct srp_device *dev = target->srp_host->srp_dev; 603 struct srp_device *dev = target->srp_host->srp_dev;
556 int i; 604 int i;
557 605
606 if (!ch->target)
607 return;
608
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
612 }
613
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
558 if (dev->use_fast_reg) { 618 if (dev->use_fast_reg) {
559 if (target->fr_pool) 619 if (ch->fr_pool)
560 srp_destroy_fr_pool(target->fr_pool); 620 srp_destroy_fr_pool(ch->fr_pool);
561 } else { 621 } else {
562 if (target->fmr_pool) 622 if (ch->fmr_pool)
563 ib_destroy_fmr_pool(target->fmr_pool); 623 ib_destroy_fmr_pool(ch->fmr_pool);
564 } 624 }
565 ib_destroy_qp(target->qp); 625 srp_destroy_qp(ch);
566 ib_destroy_cq(target->send_cq); 626 ib_destroy_cq(ch->send_cq);
567 ib_destroy_cq(target->recv_cq); 627 ib_destroy_cq(ch->recv_cq);
628
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
568 636
569 target->qp = NULL; 637 ch->qp = NULL;
570 target->send_cq = target->recv_cq = NULL; 638 ch->send_cq = ch->recv_cq = NULL;
571 639
572 if (target->rx_ring) { 640 if (ch->rx_ring) {
573 for (i = 0; i < target->queue_size; ++i) 641 for (i = 0; i < target->queue_size; ++i)
574 srp_free_iu(target->srp_host, target->rx_ring[i]); 642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
575 kfree(target->rx_ring); 643 kfree(ch->rx_ring);
576 target->rx_ring = NULL; 644 ch->rx_ring = NULL;
577 } 645 }
578 if (target->tx_ring) { 646 if (ch->tx_ring) {
579 for (i = 0; i < target->queue_size; ++i) 647 for (i = 0; i < target->queue_size; ++i)
580 srp_free_iu(target->srp_host, target->tx_ring[i]); 648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
581 kfree(target->tx_ring); 649 kfree(ch->tx_ring);
582 target->tx_ring = NULL; 650 ch->tx_ring = NULL;
583 } 651 }
584} 652}
585 653
586static void srp_path_rec_completion(int status, 654static void srp_path_rec_completion(int status,
587 struct ib_sa_path_rec *pathrec, 655 struct ib_sa_path_rec *pathrec,
588 void *target_ptr) 656 void *ch_ptr)
589{ 657{
590 struct srp_target_port *target = target_ptr; 658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
591 660
592 target->status = status; 661 ch->status = status;
593 if (status) 662 if (status)
594 shost_printk(KERN_ERR, target->scsi_host, 663 shost_printk(KERN_ERR, target->scsi_host,
595 PFX "Got failed path rec status %d\n", status); 664 PFX "Got failed path rec status %d\n", status);
596 else 665 else
597 target->path = *pathrec; 666 ch->path = *pathrec;
598 complete(&target->done); 667 complete(&ch->done);
599} 668}
600 669
601static int srp_lookup_path(struct srp_target_port *target) 670static int srp_lookup_path(struct srp_rdma_ch *ch)
602{ 671{
672 struct srp_target_port *target = ch->target;
603 int ret; 673 int ret;
604 674
605 target->path.numb_path = 1; 675 ch->path.numb_path = 1;
606 676
607 init_completion(&target->done); 677 init_completion(&ch->done);
608 678
609 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
610 target->srp_host->srp_dev->dev, 680 target->srp_host->srp_dev->dev,
611 target->srp_host->port, 681 target->srp_host->port,
612 &target->path, 682 &ch->path,
613 IB_SA_PATH_REC_SERVICE_ID | 683 IB_SA_PATH_REC_SERVICE_ID |
614 IB_SA_PATH_REC_DGID | 684 IB_SA_PATH_REC_DGID |
615 IB_SA_PATH_REC_SGID | 685 IB_SA_PATH_REC_SGID |
616 IB_SA_PATH_REC_NUMB_PATH | 686 IB_SA_PATH_REC_NUMB_PATH |
617 IB_SA_PATH_REC_PKEY, 687 IB_SA_PATH_REC_PKEY,
618 SRP_PATH_REC_TIMEOUT_MS, 688 SRP_PATH_REC_TIMEOUT_MS,
619 GFP_KERNEL, 689 GFP_KERNEL,
620 srp_path_rec_completion, 690 srp_path_rec_completion,
621 target, &target->path_query); 691 ch, &ch->path_query);
622 if (target->path_query_id < 0) 692 if (ch->path_query_id < 0)
623 return target->path_query_id; 693 return ch->path_query_id;
624 694
625 ret = wait_for_completion_interruptible(&target->done); 695 ret = wait_for_completion_interruptible(&ch->done);
626 if (ret < 0) 696 if (ret < 0)
627 return ret; 697 return ret;
628 698
629 if (target->status < 0) 699 if (ch->status < 0)
630 shost_printk(KERN_WARNING, target->scsi_host, 700 shost_printk(KERN_WARNING, target->scsi_host,
631 PFX "Path record query failed\n"); 701 PFX "Path record query failed\n");
632 702
633 return target->status; 703 return ch->status;
634} 704}
635 705
636static int srp_send_req(struct srp_target_port *target) 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
637{ 707{
708 struct srp_target_port *target = ch->target;
638 struct { 709 struct {
639 struct ib_cm_req_param param; 710 struct ib_cm_req_param param;
640 struct srp_login_req priv; 711 struct srp_login_req priv;
@@ -645,11 +716,11 @@ static int srp_send_req(struct srp_target_port *target)
645 if (!req) 716 if (!req)
646 return -ENOMEM; 717 return -ENOMEM;
647 718
648 req->param.primary_path = &target->path; 719 req->param.primary_path = &ch->path;
649 req->param.alternate_path = NULL; 720 req->param.alternate_path = NULL;
650 req->param.service_id = target->service_id; 721 req->param.service_id = target->service_id;
651 req->param.qp_num = target->qp->qp_num; 722 req->param.qp_num = ch->qp->qp_num;
652 req->param.qp_type = target->qp->qp_type; 723 req->param.qp_type = ch->qp->qp_type;
653 req->param.private_data = &req->priv; 724 req->param.private_data = &req->priv;
654 req->param.private_data_len = sizeof req->priv; 725 req->param.private_data_len = sizeof req->priv;
655 req->param.flow_control = 1; 726 req->param.flow_control = 1;
@@ -673,6 +744,8 @@ static int srp_send_req(struct srp_target_port *target)
673 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
674 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
675 SRP_BUF_FORMAT_INDIRECT); 746 SRP_BUF_FORMAT_INDIRECT);
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
676 /* 749 /*
677 * In the published SRP specification (draft rev. 16a), the 750 * In the published SRP specification (draft rev. 16a), the
678 * port identifier format is 8 bytes of ID extension followed 751 * port identifier format is 8 bytes of ID extension followed
@@ -684,7 +757,7 @@ static int srp_send_req(struct srp_target_port *target)
684 */ 757 */
685 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
686 memcpy(req->priv.initiator_port_id, 759 memcpy(req->priv.initiator_port_id,
687 &target->path.sgid.global.interface_id, 8); 760 &target->sgid.global.interface_id, 8);
688 memcpy(req->priv.initiator_port_id + 8, 761 memcpy(req->priv.initiator_port_id + 8,
689 &target->initiator_ext, 8); 762 &target->initiator_ext, 8);
690 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
@@ -693,7 +766,7 @@ static int srp_send_req(struct srp_target_port *target)
693 memcpy(req->priv.initiator_port_id, 766 memcpy(req->priv.initiator_port_id,
694 &target->initiator_ext, 8); 767 &target->initiator_ext, 8);
695 memcpy(req->priv.initiator_port_id + 8, 768 memcpy(req->priv.initiator_port_id + 8,
696 &target->path.sgid.global.interface_id, 8); 769 &target->sgid.global.interface_id, 8);
697 memcpy(req->priv.target_port_id, &target->id_ext, 8); 770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
698 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
699 } 772 }
@@ -713,7 +786,7 @@ static int srp_send_req(struct srp_target_port *target)
713 &target->srp_host->srp_dev->dev->node_guid, 8); 786 &target->srp_host->srp_dev->dev->node_guid, 8);
714 } 787 }
715 788
716 status = ib_send_cm_req(target->cm_id, &req->param); 789 status = ib_send_cm_req(ch->cm_id, &req->param);
717 790
718 kfree(req); 791 kfree(req);
719 792
@@ -754,28 +827,35 @@ static bool srp_change_conn_state(struct srp_target_port *target,
754 827
755static void srp_disconnect_target(struct srp_target_port *target) 828static void srp_disconnect_target(struct srp_target_port *target)
756{ 829{
830 struct srp_rdma_ch *ch;
831 int i;
832
757 if (srp_change_conn_state(target, false)) { 833 if (srp_change_conn_state(target, false)) {
758 /* XXX should send SRP_I_LOGOUT request */ 834 /* XXX should send SRP_I_LOGOUT request */
759 835
760 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 836 for (i = 0; i < target->ch_count; i++) {
761 shost_printk(KERN_DEBUG, target->scsi_host, 837 ch = &target->ch[i];
762 PFX "Sending CM DREQ failed\n"); 838 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
839 shost_printk(KERN_DEBUG, target->scsi_host,
840 PFX "Sending CM DREQ failed\n");
841 }
763 } 842 }
764 } 843 }
765} 844}
766 845
767static void srp_free_req_data(struct srp_target_port *target) 846static void srp_free_req_data(struct srp_target_port *target,
847 struct srp_rdma_ch *ch)
768{ 848{
769 struct srp_device *dev = target->srp_host->srp_dev; 849 struct srp_device *dev = target->srp_host->srp_dev;
770 struct ib_device *ibdev = dev->dev; 850 struct ib_device *ibdev = dev->dev;
771 struct srp_request *req; 851 struct srp_request *req;
772 int i; 852 int i;
773 853
774 if (!target->req_ring) 854 if (!ch->target || !ch->req_ring)
775 return; 855 return;
776 856
777 for (i = 0; i < target->req_ring_size; ++i) { 857 for (i = 0; i < target->req_ring_size; ++i) {
778 req = &target->req_ring[i]; 858 req = &ch->req_ring[i];
779 if (dev->use_fast_reg) 859 if (dev->use_fast_reg)
780 kfree(req->fr_list); 860 kfree(req->fr_list);
781 else 861 else
@@ -789,12 +869,13 @@ static void srp_free_req_data(struct srp_target_port *target)
789 kfree(req->indirect_desc); 869 kfree(req->indirect_desc);
790 } 870 }
791 871
792 kfree(target->req_ring); 872 kfree(ch->req_ring);
793 target->req_ring = NULL; 873 ch->req_ring = NULL;
794} 874}
795 875
796static int srp_alloc_req_data(struct srp_target_port *target) 876static int srp_alloc_req_data(struct srp_rdma_ch *ch)
797{ 877{
878 struct srp_target_port *target = ch->target;
798 struct srp_device *srp_dev = target->srp_host->srp_dev; 879 struct srp_device *srp_dev = target->srp_host->srp_dev;
799 struct ib_device *ibdev = srp_dev->dev; 880 struct ib_device *ibdev = srp_dev->dev;
800 struct srp_request *req; 881 struct srp_request *req;
@@ -802,15 +883,13 @@ static int srp_alloc_req_data(struct srp_target_port *target)
802 dma_addr_t dma_addr; 883 dma_addr_t dma_addr;
803 int i, ret = -ENOMEM; 884 int i, ret = -ENOMEM;
804 885
805 INIT_LIST_HEAD(&target->free_reqs); 886 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
806 887 GFP_KERNEL);
807 target->req_ring = kzalloc(target->req_ring_size * 888 if (!ch->req_ring)
808 sizeof(*target->req_ring), GFP_KERNEL);
809 if (!target->req_ring)
810 goto out; 889 goto out;
811 890
812 for (i = 0; i < target->req_ring_size; ++i) { 891 for (i = 0; i < target->req_ring_size; ++i) {
813 req = &target->req_ring[i]; 892 req = &ch->req_ring[i];
814 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 893 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
815 GFP_KERNEL); 894 GFP_KERNEL);
816 if (!mr_list) 895 if (!mr_list)
@@ -834,8 +913,6 @@ static int srp_alloc_req_data(struct srp_target_port *target)
834 goto out; 913 goto out;
835 914
836 req->indirect_dma_addr = dma_addr; 915 req->indirect_dma_addr = dma_addr;
837 req->index = i;
838 list_add_tail(&req->list, &target->free_reqs);
839 } 916 }
840 ret = 0; 917 ret = 0;
841 918
@@ -860,6 +937,9 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
860 937
861static void srp_remove_target(struct srp_target_port *target) 938static void srp_remove_target(struct srp_target_port *target)
862{ 939{
940 struct srp_rdma_ch *ch;
941 int i;
942
863 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 943 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
864 944
865 srp_del_scsi_host_attr(target->scsi_host); 945 srp_del_scsi_host_attr(target->scsi_host);
@@ -868,11 +948,18 @@ static void srp_remove_target(struct srp_target_port *target)
868 scsi_remove_host(target->scsi_host); 948 scsi_remove_host(target->scsi_host);
869 srp_stop_rport_timers(target->rport); 949 srp_stop_rport_timers(target->rport);
870 srp_disconnect_target(target); 950 srp_disconnect_target(target);
871 ib_destroy_cm_id(target->cm_id); 951 for (i = 0; i < target->ch_count; i++) {
872 srp_free_target_ib(target); 952 ch = &target->ch[i];
953 srp_free_ch_ib(target, ch);
954 }
873 cancel_work_sync(&target->tl_err_work); 955 cancel_work_sync(&target->tl_err_work);
874 srp_rport_put(target->rport); 956 srp_rport_put(target->rport);
875 srp_free_req_data(target); 957 for (i = 0; i < target->ch_count; i++) {
958 ch = &target->ch[i];
959 srp_free_req_data(target, ch);
960 }
961 kfree(target->ch);
962 target->ch = NULL;
876 963
877 spin_lock(&target->srp_host->target_lock); 964 spin_lock(&target->srp_host->target_lock);
878 list_del(&target->list); 965 list_del(&target->list);
@@ -898,25 +985,25 @@ static void srp_rport_delete(struct srp_rport *rport)
898 srp_queue_remove_work(target); 985 srp_queue_remove_work(target);
899} 986}
900 987
901static int srp_connect_target(struct srp_target_port *target) 988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
902{ 989{
903 int retries = 3; 990 struct srp_target_port *target = ch->target;
904 int ret; 991 int ret;
905 992
906 WARN_ON_ONCE(target->connected); 993 WARN_ON_ONCE(!multich && target->connected);
907 994
908 target->qp_in_error = false; 995 target->qp_in_error = false;
909 996
910 ret = srp_lookup_path(target); 997 ret = srp_lookup_path(ch);
911 if (ret) 998 if (ret)
912 return ret; 999 return ret;
913 1000
914 while (1) { 1001 while (1) {
915 init_completion(&target->done); 1002 init_completion(&ch->done);
916 ret = srp_send_req(target); 1003 ret = srp_send_req(ch, multich);
917 if (ret) 1004 if (ret)
918 return ret; 1005 return ret;
919 ret = wait_for_completion_interruptible(&target->done); 1006 ret = wait_for_completion_interruptible(&ch->done);
920 if (ret < 0) 1007 if (ret < 0)
921 return ret; 1008 return ret;
922 1009
@@ -926,13 +1013,13 @@ static int srp_connect_target(struct srp_target_port *target)
926 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1013 * back, or SRP_DLID_REDIRECT if we get a lid/qp
927 * redirect REJ back. 1014 * redirect REJ back.
928 */ 1015 */
929 switch (target->status) { 1016 switch (ch->status) {
930 case 0: 1017 case 0:
931 srp_change_conn_state(target, true); 1018 srp_change_conn_state(target, true);
932 return 0; 1019 return 0;
933 1020
934 case SRP_PORT_REDIRECT: 1021 case SRP_PORT_REDIRECT:
935 ret = srp_lookup_path(target); 1022 ret = srp_lookup_path(ch);
936 if (ret) 1023 if (ret)
937 return ret; 1024 return ret;
938 break; 1025 break;
@@ -941,27 +1028,18 @@ static int srp_connect_target(struct srp_target_port *target)
941 break; 1028 break;
942 1029
943 case SRP_STALE_CONN: 1030 case SRP_STALE_CONN:
944 /* Our current CM id was stale, and is now in timewait.
945 * Try to reconnect with a new one.
946 */
947 if (!retries-- || srp_new_cm_id(target)) {
948 shost_printk(KERN_ERR, target->scsi_host, PFX
949 "giving up on stale connection\n");
950 target->status = -ECONNRESET;
951 return target->status;
952 }
953
954 shost_printk(KERN_ERR, target->scsi_host, PFX 1031 shost_printk(KERN_ERR, target->scsi_host, PFX
955 "retrying stale connection\n"); 1032 "giving up on stale connection\n");
956 break; 1033 ch->status = -ECONNRESET;
1034 return ch->status;
957 1035
958 default: 1036 default:
959 return target->status; 1037 return ch->status;
960 } 1038 }
961 } 1039 }
962} 1040}
963 1041
964static int srp_inv_rkey(struct srp_target_port *target, u32 rkey) 1042static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
965{ 1043{
966 struct ib_send_wr *bad_wr; 1044 struct ib_send_wr *bad_wr;
967 struct ib_send_wr wr = { 1045 struct ib_send_wr wr = {
@@ -973,13 +1051,14 @@ static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)
973 .ex.invalidate_rkey = rkey, 1051 .ex.invalidate_rkey = rkey,
974 }; 1052 };
975 1053
976 return ib_post_send(target->qp, &wr, &bad_wr); 1054 return ib_post_send(ch->qp, &wr, &bad_wr);
977} 1055}
978 1056
979static void srp_unmap_data(struct scsi_cmnd *scmnd, 1057static void srp_unmap_data(struct scsi_cmnd *scmnd,
980 struct srp_target_port *target, 1058 struct srp_rdma_ch *ch,
981 struct srp_request *req) 1059 struct srp_request *req)
982{ 1060{
1061 struct srp_target_port *target = ch->target;
983 struct srp_device *dev = target->srp_host->srp_dev; 1062 struct srp_device *dev = target->srp_host->srp_dev;
984 struct ib_device *ibdev = dev->dev; 1063 struct ib_device *ibdev = dev->dev;
985 int i, res; 1064 int i, res;
@@ -993,7 +1072,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
993 struct srp_fr_desc **pfr; 1072 struct srp_fr_desc **pfr;
994 1073
995 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
996 res = srp_inv_rkey(target, (*pfr)->mr->rkey); 1075 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
997 if (res < 0) { 1076 if (res < 0) {
998 shost_printk(KERN_ERR, target->scsi_host, PFX 1077 shost_printk(KERN_ERR, target->scsi_host, PFX
999 "Queueing INV WR for rkey %#x failed (%d)\n", 1078 "Queueing INV WR for rkey %#x failed (%d)\n",
@@ -1003,7 +1082,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1003 } 1082 }
1004 } 1083 }
1005 if (req->nmdesc) 1084 if (req->nmdesc)
1006 srp_fr_pool_put(target->fr_pool, req->fr_list, 1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1007 req->nmdesc); 1086 req->nmdesc);
1008 } else { 1087 } else {
1009 struct ib_pool_fmr **pfmr; 1088 struct ib_pool_fmr **pfmr;
@@ -1018,7 +1097,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1018 1097
1019/** 1098/**
1020 * srp_claim_req - Take ownership of the scmnd associated with a request. 1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
1021 * @target: SRP target port. 1100 * @ch: SRP RDMA channel.
1022 * @req: SRP request. 1101 * @req: SRP request.
1023 * @sdev: If not NULL, only take ownership for this SCSI device. 1102 * @sdev: If not NULL, only take ownership for this SCSI device.
1024 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
@@ -1027,14 +1106,14 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
1027 * Return value: 1106 * Return value:
1028 * Either NULL or a pointer to the SCSI command the caller became owner of. 1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1029 */ 1108 */
1030static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, 1109static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1031 struct srp_request *req, 1110 struct srp_request *req,
1032 struct scsi_device *sdev, 1111 struct scsi_device *sdev,
1033 struct scsi_cmnd *scmnd) 1112 struct scsi_cmnd *scmnd)
1034{ 1113{
1035 unsigned long flags; 1114 unsigned long flags;
1036 1115
1037 spin_lock_irqsave(&target->lock, flags); 1116 spin_lock_irqsave(&ch->lock, flags);
1038 if (req->scmnd && 1117 if (req->scmnd &&
1039 (!sdev || req->scmnd->device == sdev) && 1118 (!sdev || req->scmnd->device == sdev) &&
1040 (!scmnd || req->scmnd == scmnd)) { 1119 (!scmnd || req->scmnd == scmnd)) {
@@ -1043,40 +1122,37 @@ static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
1043 } else { 1122 } else {
1044 scmnd = NULL; 1123 scmnd = NULL;
1045 } 1124 }
1046 spin_unlock_irqrestore(&target->lock, flags); 1125 spin_unlock_irqrestore(&ch->lock, flags);
1047 1126
1048 return scmnd; 1127 return scmnd;
1049} 1128}
1050 1129
1051/** 1130/**
1052 * srp_free_req() - Unmap data and add request to the free request list. 1131 * srp_free_req() - Unmap data and add request to the free request list.
1053 * @target: SRP target port. 1132 * @ch: SRP RDMA channel.
1054 * @req: Request to be freed. 1133 * @req: Request to be freed.
1055 * @scmnd: SCSI command associated with @req. 1134 * @scmnd: SCSI command associated with @req.
1056 * @req_lim_delta: Amount to be added to @target->req_lim. 1135 * @req_lim_delta: Amount to be added to @target->req_lim.
1057 */ 1136 */
1058static void srp_free_req(struct srp_target_port *target, 1137static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1059 struct srp_request *req, struct scsi_cmnd *scmnd, 1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1060 s32 req_lim_delta)
1061{ 1139{
1062 unsigned long flags; 1140 unsigned long flags;
1063 1141
1064 srp_unmap_data(scmnd, target, req); 1142 srp_unmap_data(scmnd, ch, req);
1065 1143
1066 spin_lock_irqsave(&target->lock, flags); 1144 spin_lock_irqsave(&ch->lock, flags);
1067 target->req_lim += req_lim_delta; 1145 ch->req_lim += req_lim_delta;
1068 list_add_tail(&req->list, &target->free_reqs); 1146 spin_unlock_irqrestore(&ch->lock, flags);
1069 spin_unlock_irqrestore(&target->lock, flags);
1070} 1147}
1071 1148
1072static void srp_finish_req(struct srp_target_port *target, 1149static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1073 struct srp_request *req, struct scsi_device *sdev, 1150 struct scsi_device *sdev, int result)
1074 int result)
1075{ 1151{
1076 struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL); 1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1077 1153
1078 if (scmnd) { 1154 if (scmnd) {
1079 srp_free_req(target, req, scmnd, 0); 1155 srp_free_req(ch, req, scmnd, 0);
1080 scmnd->result = result; 1156 scmnd->result = result;
1081 scmnd->scsi_done(scmnd); 1157 scmnd->scsi_done(scmnd);
1082 } 1158 }
@@ -1085,9 +1161,10 @@ static void srp_finish_req(struct srp_target_port *target,
1085static void srp_terminate_io(struct srp_rport *rport) 1161static void srp_terminate_io(struct srp_rport *rport)
1086{ 1162{
1087 struct srp_target_port *target = rport->lld_data; 1163 struct srp_target_port *target = rport->lld_data;
1164 struct srp_rdma_ch *ch;
1088 struct Scsi_Host *shost = target->scsi_host; 1165 struct Scsi_Host *shost = target->scsi_host;
1089 struct scsi_device *sdev; 1166 struct scsi_device *sdev;
1090 int i; 1167 int i, j;
1091 1168
1092 /* 1169 /*
1093 * Invoking srp_terminate_io() while srp_queuecommand() is running 1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
@@ -1096,9 +1173,15 @@ static void srp_terminate_io(struct srp_rport *rport)
1096 shost_for_each_device(sdev, shost) 1173 shost_for_each_device(sdev, shost)
1097 WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1098 1175
1099 for (i = 0; i < target->req_ring_size; ++i) { 1176 for (i = 0; i < target->ch_count; i++) {
1100 struct srp_request *req = &target->req_ring[i]; 1177 ch = &target->ch[i];
1101 srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16); 1178
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1181
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1184 }
1102 } 1185 }
1103} 1186}
1104 1187
@@ -1114,34 +1197,61 @@ static void srp_terminate_io(struct srp_rport *rport)
1114static int srp_rport_reconnect(struct srp_rport *rport) 1197static int srp_rport_reconnect(struct srp_rport *rport)
1115{ 1198{
1116 struct srp_target_port *target = rport->lld_data; 1199 struct srp_target_port *target = rport->lld_data;
1117 int i, ret; 1200 struct srp_rdma_ch *ch;
1201 int i, j, ret = 0;
1202 bool multich = false;
1118 1203
1119 srp_disconnect_target(target); 1204 srp_disconnect_target(target);
1205
1206 if (target->state == SRP_TARGET_SCANNING)
1207 return -ENODEV;
1208
1120 /* 1209 /*
1121 * Now get a new local CM ID so that we avoid confusing the target in 1210 * Now get a new local CM ID so that we avoid confusing the target in
1122 * case things are really fouled up. Doing so also ensures that all CM 1211 * case things are really fouled up. Doing so also ensures that all CM
1123 * callbacks will have finished before a new QP is allocated. 1212 * callbacks will have finished before a new QP is allocated.
1124 */ 1213 */
1125 ret = srp_new_cm_id(target); 1214 for (i = 0; i < target->ch_count; i++) {
1126 1215 ch = &target->ch[i];
1127 for (i = 0; i < target->req_ring_size; ++i) { 1216 if (!ch->target)
1128 struct srp_request *req = &target->req_ring[i]; 1217 break;
1129 srp_finish_req(target, req, NULL, DID_RESET << 16); 1218 ret += srp_new_cm_id(ch);
1130 } 1219 }
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
1222 if (!ch->target)
1223 break;
1224 for (j = 0; j < target->req_ring_size; ++j) {
1225 struct srp_request *req = &ch->req_ring[j];
1131 1226
1132 /* 1227 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1133 * Whether or not creating a new CM ID succeeded, create a new 1228 }
1134 * QP. This guarantees that all callback functions for the old QP have 1229 }
1135 * finished before any send requests are posted on the new QP. 1230 for (i = 0; i < target->ch_count; i++) {
1136 */ 1231 ch = &target->ch[i];
1137 ret += srp_create_target_ib(target); 1232 if (!ch->target)
1138 1233 break;
1139 INIT_LIST_HEAD(&target->free_tx); 1234 /*
1140 for (i = 0; i < target->queue_size; ++i) 1235 * Whether or not creating a new CM ID succeeded, create a new
1141 list_add(&target->tx_ring[i]->list, &target->free_tx); 1236 * QP. This guarantees that all completion callback function
1237 * invocations have finished before request resetting starts.
1238 */
1239 ret += srp_create_ch_ib(ch);
1142 1240
1143 if (ret == 0) 1241 INIT_LIST_HEAD(&ch->free_tx);
1144 ret = srp_connect_target(target); 1242 for (j = 0; j < target->queue_size; ++j)
1243 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1244 }
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1247 if (ret || !ch->target) {
1248 if (i > 1)
1249 ret = 0;
1250 break;
1251 }
1252 ret = srp_connect_ch(ch, multich);
1253 multich = true;
1254 }
1145 1255
1146 if (ret == 0) 1256 if (ret == 0)
1147 shost_printk(KERN_INFO, target->scsi_host, 1257 shost_printk(KERN_INFO, target->scsi_host,
@@ -1165,12 +1275,12 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1165} 1275}
1166 1276
1167static int srp_map_finish_fmr(struct srp_map_state *state, 1277static int srp_map_finish_fmr(struct srp_map_state *state,
1168 struct srp_target_port *target) 1278 struct srp_rdma_ch *ch)
1169{ 1279{
1170 struct ib_pool_fmr *fmr; 1280 struct ib_pool_fmr *fmr;
1171 u64 io_addr = 0; 1281 u64 io_addr = 0;
1172 1282
1173 fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages, 1283 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1174 state->npages, io_addr); 1284 state->npages, io_addr);
1175 if (IS_ERR(fmr)) 1285 if (IS_ERR(fmr))
1176 return PTR_ERR(fmr); 1286 return PTR_ERR(fmr);
@@ -1184,15 +1294,16 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
1184} 1294}
1185 1295
1186static int srp_map_finish_fr(struct srp_map_state *state, 1296static int srp_map_finish_fr(struct srp_map_state *state,
1187 struct srp_target_port *target) 1297 struct srp_rdma_ch *ch)
1188{ 1298{
1299 struct srp_target_port *target = ch->target;
1189 struct srp_device *dev = target->srp_host->srp_dev; 1300 struct srp_device *dev = target->srp_host->srp_dev;
1190 struct ib_send_wr *bad_wr; 1301 struct ib_send_wr *bad_wr;
1191 struct ib_send_wr wr; 1302 struct ib_send_wr wr;
1192 struct srp_fr_desc *desc; 1303 struct srp_fr_desc *desc;
1193 u32 rkey; 1304 u32 rkey;
1194 1305
1195 desc = srp_fr_pool_get(target->fr_pool); 1306 desc = srp_fr_pool_get(ch->fr_pool);
1196 if (!desc) 1307 if (!desc)
1197 return -ENOMEM; 1308 return -ENOMEM;
1198 1309
@@ -1221,12 +1332,13 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1221 srp_map_desc(state, state->base_dma_addr, state->dma_len, 1332 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1222 desc->mr->rkey); 1333 desc->mr->rkey);
1223 1334
1224 return ib_post_send(target->qp, &wr, &bad_wr); 1335 return ib_post_send(ch->qp, &wr, &bad_wr);
1225} 1336}
1226 1337
1227static int srp_finish_mapping(struct srp_map_state *state, 1338static int srp_finish_mapping(struct srp_map_state *state,
1228 struct srp_target_port *target) 1339 struct srp_rdma_ch *ch)
1229{ 1340{
1341 struct srp_target_port *target = ch->target;
1230 int ret = 0; 1342 int ret = 0;
1231 1343
1232 if (state->npages == 0) 1344 if (state->npages == 0)
@@ -1237,8 +1349,8 @@ static int srp_finish_mapping(struct srp_map_state *state,
1237 target->rkey); 1349 target->rkey);
1238 else 1350 else
1239 ret = target->srp_host->srp_dev->use_fast_reg ? 1351 ret = target->srp_host->srp_dev->use_fast_reg ?
1240 srp_map_finish_fr(state, target) : 1352 srp_map_finish_fr(state, ch) :
1241 srp_map_finish_fmr(state, target); 1353 srp_map_finish_fmr(state, ch);
1242 1354
1243 if (ret == 0) { 1355 if (ret == 0) {
1244 state->npages = 0; 1356 state->npages = 0;
@@ -1258,10 +1370,11 @@ static void srp_map_update_start(struct srp_map_state *state,
1258} 1370}
1259 1371
1260static int srp_map_sg_entry(struct srp_map_state *state, 1372static int srp_map_sg_entry(struct srp_map_state *state,
1261 struct srp_target_port *target, 1373 struct srp_rdma_ch *ch,
1262 struct scatterlist *sg, int sg_index, 1374 struct scatterlist *sg, int sg_index,
1263 bool use_mr) 1375 bool use_mr)
1264{ 1376{
1377 struct srp_target_port *target = ch->target;
1265 struct srp_device *dev = target->srp_host->srp_dev; 1378 struct srp_device *dev = target->srp_host->srp_dev;
1266 struct ib_device *ibdev = dev->dev; 1379 struct ib_device *ibdev = dev->dev;
1267 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1380 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
@@ -1290,7 +1403,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1290 */ 1403 */
1291 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || 1404 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1292 dma_len > dev->mr_max_size) { 1405 dma_len > dev->mr_max_size) {
1293 ret = srp_finish_mapping(state, target); 1406 ret = srp_finish_mapping(state, ch);
1294 if (ret) 1407 if (ret)
1295 return ret; 1408 return ret;
1296 1409
@@ -1311,7 +1424,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1311 while (dma_len) { 1424 while (dma_len) {
1312 unsigned offset = dma_addr & ~dev->mr_page_mask; 1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1313 if (state->npages == dev->max_pages_per_mr || offset != 0) { 1426 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1314 ret = srp_finish_mapping(state, target); 1427 ret = srp_finish_mapping(state, ch);
1315 if (ret) 1428 if (ret)
1316 return ret; 1429 return ret;
1317 1430
@@ -1335,17 +1448,18 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1335 */ 1448 */
1336 ret = 0; 1449 ret = 0;
1337 if (len != dev->mr_page_size) { 1450 if (len != dev->mr_page_size) {
1338 ret = srp_finish_mapping(state, target); 1451 ret = srp_finish_mapping(state, ch);
1339 if (!ret) 1452 if (!ret)
1340 srp_map_update_start(state, NULL, 0, 0); 1453 srp_map_update_start(state, NULL, 0, 0);
1341 } 1454 }
1342 return ret; 1455 return ret;
1343} 1456}
1344 1457
1345static int srp_map_sg(struct srp_map_state *state, 1458static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1346 struct srp_target_port *target, struct srp_request *req, 1459 struct srp_request *req, struct scatterlist *scat,
1347 struct scatterlist *scat, int count) 1460 int count)
1348{ 1461{
1462 struct srp_target_port *target = ch->target;
1349 struct srp_device *dev = target->srp_host->srp_dev; 1463 struct srp_device *dev = target->srp_host->srp_dev;
1350 struct ib_device *ibdev = dev->dev; 1464 struct ib_device *ibdev = dev->dev;
1351 struct scatterlist *sg; 1465 struct scatterlist *sg;
@@ -1356,14 +1470,14 @@ static int srp_map_sg(struct srp_map_state *state,
1356 state->pages = req->map_page; 1470 state->pages = req->map_page;
1357 if (dev->use_fast_reg) { 1471 if (dev->use_fast_reg) {
1358 state->next_fr = req->fr_list; 1472 state->next_fr = req->fr_list;
1359 use_mr = !!target->fr_pool; 1473 use_mr = !!ch->fr_pool;
1360 } else { 1474 } else {
1361 state->next_fmr = req->fmr_list; 1475 state->next_fmr = req->fmr_list;
1362 use_mr = !!target->fmr_pool; 1476 use_mr = !!ch->fmr_pool;
1363 } 1477 }
1364 1478
1365 for_each_sg(scat, sg, count, i) { 1479 for_each_sg(scat, sg, count, i) {
1366 if (srp_map_sg_entry(state, target, sg, i, use_mr)) { 1480 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1367 /* 1481 /*
1368 * Memory registration failed, so backtrack to the 1482 * Memory registration failed, so backtrack to the
1369 * first unmapped entry and continue on without using 1483 * first unmapped entry and continue on without using
@@ -1385,7 +1499,7 @@ backtrack:
1385 } 1499 }
1386 } 1500 }
1387 1501
1388 if (use_mr && srp_finish_mapping(state, target)) 1502 if (use_mr && srp_finish_mapping(state, ch))
1389 goto backtrack; 1503 goto backtrack;
1390 1504
1391 req->nmdesc = state->nmdesc; 1505 req->nmdesc = state->nmdesc;
@@ -1393,9 +1507,10 @@ backtrack:
1393 return 0; 1507 return 0;
1394} 1508}
1395 1509
1396static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 1510static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1397 struct srp_request *req) 1511 struct srp_request *req)
1398{ 1512{
1513 struct srp_target_port *target = ch->target;
1399 struct scatterlist *scat; 1514 struct scatterlist *scat;
1400 struct srp_cmd *cmd = req->cmd->buf; 1515 struct srp_cmd *cmd = req->cmd->buf;
1401 int len, nents, count; 1516 int len, nents, count;
@@ -1457,7 +1572,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1457 target->indirect_size, DMA_TO_DEVICE); 1572 target->indirect_size, DMA_TO_DEVICE);
1458 1573
1459 memset(&state, 0, sizeof(state)); 1574 memset(&state, 0, sizeof(state));
1460 srp_map_sg(&state, target, req, scat, count); 1575 srp_map_sg(&state, ch, req, scat, count);
1461 1576
1462 /* We've mapped the request, now pull as much of the indirect 1577 /* We've mapped the request, now pull as much of the indirect
1463 * descriptor table as we can into the command buffer. If this 1578 * descriptor table as we can into the command buffer. If this
@@ -1518,20 +1633,20 @@ map_complete:
1518/* 1633/*
1519 * Return an IU and possible credit to the free pool 1634 * Return an IU and possible credit to the free pool
1520 */ 1635 */
1521static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, 1636static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1522 enum srp_iu_type iu_type) 1637 enum srp_iu_type iu_type)
1523{ 1638{
1524 unsigned long flags; 1639 unsigned long flags;
1525 1640
1526 spin_lock_irqsave(&target->lock, flags); 1641 spin_lock_irqsave(&ch->lock, flags);
1527 list_add(&iu->list, &target->free_tx); 1642 list_add(&iu->list, &ch->free_tx);
1528 if (iu_type != SRP_IU_RSP) 1643 if (iu_type != SRP_IU_RSP)
1529 ++target->req_lim; 1644 ++ch->req_lim;
1530 spin_unlock_irqrestore(&target->lock, flags); 1645 spin_unlock_irqrestore(&ch->lock, flags);
1531} 1646}
1532 1647
1533/* 1648/*
1534 * Must be called with target->lock held to protect req_lim and free_tx. 1649 * Must be called with ch->lock held to protect req_lim and free_tx.
1535 * If IU is not sent, it must be returned using srp_put_tx_iu(). 1650 * If IU is not sent, it must be returned using srp_put_tx_iu().
1536 * 1651 *
1537 * Note: 1652 * Note:
@@ -1543,35 +1658,36 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1543 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 1658 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1544 * one unanswered SRP request to an initiator. 1659 * one unanswered SRP request to an initiator.
1545 */ 1660 */
1546static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 1661static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1547 enum srp_iu_type iu_type) 1662 enum srp_iu_type iu_type)
1548{ 1663{
1664 struct srp_target_port *target = ch->target;
1549 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 1665 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1550 struct srp_iu *iu; 1666 struct srp_iu *iu;
1551 1667
1552 srp_send_completion(target->send_cq, target); 1668 srp_send_completion(ch->send_cq, ch);
1553 1669
1554 if (list_empty(&target->free_tx)) 1670 if (list_empty(&ch->free_tx))
1555 return NULL; 1671 return NULL;
1556 1672
1557 /* Initiator responses to target requests do not consume credits */ 1673 /* Initiator responses to target requests do not consume credits */
1558 if (iu_type != SRP_IU_RSP) { 1674 if (iu_type != SRP_IU_RSP) {
1559 if (target->req_lim <= rsv) { 1675 if (ch->req_lim <= rsv) {
1560 ++target->zero_req_lim; 1676 ++target->zero_req_lim;
1561 return NULL; 1677 return NULL;
1562 } 1678 }
1563 1679
1564 --target->req_lim; 1680 --ch->req_lim;
1565 } 1681 }
1566 1682
1567 iu = list_first_entry(&target->free_tx, struct srp_iu, list); 1683 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1568 list_del(&iu->list); 1684 list_del(&iu->list);
1569 return iu; 1685 return iu;
1570} 1686}
1571 1687
1572static int srp_post_send(struct srp_target_port *target, 1688static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1573 struct srp_iu *iu, int len)
1574{ 1689{
1690 struct srp_target_port *target = ch->target;
1575 struct ib_sge list; 1691 struct ib_sge list;
1576 struct ib_send_wr wr, *bad_wr; 1692 struct ib_send_wr wr, *bad_wr;
1577 1693
@@ -1586,11 +1702,12 @@ static int srp_post_send(struct srp_target_port *target,
1586 wr.opcode = IB_WR_SEND; 1702 wr.opcode = IB_WR_SEND;
1587 wr.send_flags = IB_SEND_SIGNALED; 1703 wr.send_flags = IB_SEND_SIGNALED;
1588 1704
1589 return ib_post_send(target->qp, &wr, &bad_wr); 1705 return ib_post_send(ch->qp, &wr, &bad_wr);
1590} 1706}
1591 1707
1592static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) 1708static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1593{ 1709{
1710 struct srp_target_port *target = ch->target;
1594 struct ib_recv_wr wr, *bad_wr; 1711 struct ib_recv_wr wr, *bad_wr;
1595 struct ib_sge list; 1712 struct ib_sge list;
1596 1713
@@ -1603,35 +1720,39 @@ static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1603 wr.sg_list = &list; 1720 wr.sg_list = &list;
1604 wr.num_sge = 1; 1721 wr.num_sge = 1;
1605 1722
1606 return ib_post_recv(target->qp, &wr, &bad_wr); 1723 return ib_post_recv(ch->qp, &wr, &bad_wr);
1607} 1724}
1608 1725
1609static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 1726static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1610{ 1727{
1728 struct srp_target_port *target = ch->target;
1611 struct srp_request *req; 1729 struct srp_request *req;
1612 struct scsi_cmnd *scmnd; 1730 struct scsi_cmnd *scmnd;
1613 unsigned long flags; 1731 unsigned long flags;
1614 1732
1615 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1733 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1616 spin_lock_irqsave(&target->lock, flags); 1734 spin_lock_irqsave(&ch->lock, flags);
1617 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1735 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1618 spin_unlock_irqrestore(&target->lock, flags); 1736 spin_unlock_irqrestore(&ch->lock, flags);
1619 1737
1620 target->tsk_mgmt_status = -1; 1738 ch->tsk_mgmt_status = -1;
1621 if (be32_to_cpu(rsp->resp_data_len) >= 4) 1739 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1622 target->tsk_mgmt_status = rsp->data[3]; 1740 ch->tsk_mgmt_status = rsp->data[3];
1623 complete(&target->tsk_mgmt_done); 1741 complete(&ch->tsk_mgmt_done);
1624 } else { 1742 } else {
1625 req = &target->req_ring[rsp->tag]; 1743 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1626 scmnd = srp_claim_req(target, req, NULL, NULL); 1744 if (scmnd) {
1745 req = (void *)scmnd->host_scribble;
1746 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 }
1627 if (!scmnd) { 1748 if (!scmnd) {
1628 shost_printk(KERN_ERR, target->scsi_host, 1749 shost_printk(KERN_ERR, target->scsi_host,
1629 "Null scmnd for RSP w/tag %016llx\n", 1750 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1630 (unsigned long long) rsp->tag); 1751 rsp->tag, ch - target->ch, ch->qp->qp_num);
1631 1752
1632 spin_lock_irqsave(&target->lock, flags); 1753 spin_lock_irqsave(&ch->lock, flags);
1633 target->req_lim += be32_to_cpu(rsp->req_lim_delta); 1754 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1634 spin_unlock_irqrestore(&target->lock, flags); 1755 spin_unlock_irqrestore(&ch->lock, flags);
1635 1756
1636 return; 1757 return;
1637 } 1758 }
@@ -1653,7 +1774,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1653 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1774 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1654 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1775 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1655 1776
1656 srp_free_req(target, req, scmnd, 1777 srp_free_req(ch, req, scmnd,
1657 be32_to_cpu(rsp->req_lim_delta)); 1778 be32_to_cpu(rsp->req_lim_delta));
1658 1779
1659 scmnd->host_scribble = NULL; 1780 scmnd->host_scribble = NULL;
@@ -1661,18 +1782,19 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1661 } 1782 }
1662} 1783}
1663 1784
1664static int srp_response_common(struct srp_target_port *target, s32 req_delta, 1785static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1665 void *rsp, int len) 1786 void *rsp, int len)
1666{ 1787{
1788 struct srp_target_port *target = ch->target;
1667 struct ib_device *dev = target->srp_host->srp_dev->dev; 1789 struct ib_device *dev = target->srp_host->srp_dev->dev;
1668 unsigned long flags; 1790 unsigned long flags;
1669 struct srp_iu *iu; 1791 struct srp_iu *iu;
1670 int err; 1792 int err;
1671 1793
1672 spin_lock_irqsave(&target->lock, flags); 1794 spin_lock_irqsave(&ch->lock, flags);
1673 target->req_lim += req_delta; 1795 ch->req_lim += req_delta;
1674 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 1796 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1675 spin_unlock_irqrestore(&target->lock, flags); 1797 spin_unlock_irqrestore(&ch->lock, flags);
1676 1798
1677 if (!iu) { 1799 if (!iu) {
1678 shost_printk(KERN_ERR, target->scsi_host, PFX 1800 shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1684,17 +1806,17 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1684 memcpy(iu->buf, rsp, len); 1806 memcpy(iu->buf, rsp, len);
1685 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1807 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1686 1808
1687 err = srp_post_send(target, iu, len); 1809 err = srp_post_send(ch, iu, len);
1688 if (err) { 1810 if (err) {
1689 shost_printk(KERN_ERR, target->scsi_host, PFX 1811 shost_printk(KERN_ERR, target->scsi_host, PFX
1690 "unable to post response: %d\n", err); 1812 "unable to post response: %d\n", err);
1691 srp_put_tx_iu(target, iu, SRP_IU_RSP); 1813 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1692 } 1814 }
1693 1815
1694 return err; 1816 return err;
1695} 1817}
1696 1818
1697static void srp_process_cred_req(struct srp_target_port *target, 1819static void srp_process_cred_req(struct srp_rdma_ch *ch,
1698 struct srp_cred_req *req) 1820 struct srp_cred_req *req)
1699{ 1821{
1700 struct srp_cred_rsp rsp = { 1822 struct srp_cred_rsp rsp = {
@@ -1703,14 +1825,15 @@ static void srp_process_cred_req(struct srp_target_port *target,
1703 }; 1825 };
1704 s32 delta = be32_to_cpu(req->req_lim_delta); 1826 s32 delta = be32_to_cpu(req->req_lim_delta);
1705 1827
1706 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1828 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1707 shost_printk(KERN_ERR, target->scsi_host, PFX 1829 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1708 "problems processing SRP_CRED_REQ\n"); 1830 "problems processing SRP_CRED_REQ\n");
1709} 1831}
1710 1832
1711static void srp_process_aer_req(struct srp_target_port *target, 1833static void srp_process_aer_req(struct srp_rdma_ch *ch,
1712 struct srp_aer_req *req) 1834 struct srp_aer_req *req)
1713{ 1835{
1836 struct srp_target_port *target = ch->target;
1714 struct srp_aer_rsp rsp = { 1837 struct srp_aer_rsp rsp = {
1715 .opcode = SRP_AER_RSP, 1838 .opcode = SRP_AER_RSP,
1716 .tag = req->tag, 1839 .tag = req->tag,
@@ -1720,19 +1843,20 @@ static void srp_process_aer_req(struct srp_target_port *target,
1720 shost_printk(KERN_ERR, target->scsi_host, PFX 1843 shost_printk(KERN_ERR, target->scsi_host, PFX
1721 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); 1844 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1722 1845
1723 if (srp_response_common(target, delta, &rsp, sizeof rsp)) 1846 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1724 shost_printk(KERN_ERR, target->scsi_host, PFX 1847 shost_printk(KERN_ERR, target->scsi_host, PFX
1725 "problems processing SRP_AER_REQ\n"); 1848 "problems processing SRP_AER_REQ\n");
1726} 1849}
1727 1850
1728static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1851static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1729{ 1852{
1853 struct srp_target_port *target = ch->target;
1730 struct ib_device *dev = target->srp_host->srp_dev->dev; 1854 struct ib_device *dev = target->srp_host->srp_dev->dev;
1731 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1855 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1732 int res; 1856 int res;
1733 u8 opcode; 1857 u8 opcode;
1734 1858
1735 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1859 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1736 DMA_FROM_DEVICE); 1860 DMA_FROM_DEVICE);
1737 1861
1738 opcode = *(u8 *) iu->buf; 1862 opcode = *(u8 *) iu->buf;
@@ -1746,15 +1870,15 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1746 1870
1747 switch (opcode) { 1871 switch (opcode) {
1748 case SRP_RSP: 1872 case SRP_RSP:
1749 srp_process_rsp(target, iu->buf); 1873 srp_process_rsp(ch, iu->buf);
1750 break; 1874 break;
1751 1875
1752 case SRP_CRED_REQ: 1876 case SRP_CRED_REQ:
1753 srp_process_cred_req(target, iu->buf); 1877 srp_process_cred_req(ch, iu->buf);
1754 break; 1878 break;
1755 1879
1756 case SRP_AER_REQ: 1880 case SRP_AER_REQ:
1757 srp_process_aer_req(target, iu->buf); 1881 srp_process_aer_req(ch, iu->buf);
1758 break; 1882 break;
1759 1883
1760 case SRP_T_LOGOUT: 1884 case SRP_T_LOGOUT:
@@ -1769,10 +1893,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1769 break; 1893 break;
1770 } 1894 }
1771 1895
1772 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1896 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1773 DMA_FROM_DEVICE); 1897 DMA_FROM_DEVICE);
1774 1898
1775 res = srp_post_recv(target, iu); 1899 res = srp_post_recv(ch, iu);
1776 if (res != 0) 1900 if (res != 0)
1777 shost_printk(KERN_ERR, target->scsi_host, 1901 shost_printk(KERN_ERR, target->scsi_host,
1778 PFX "Recv failed with error code %d\n", res); 1902 PFX "Recv failed with error code %d\n", res);
@@ -1795,8 +1919,15 @@ static void srp_tl_err_work(struct work_struct *work)
1795} 1919}
1796 1920
1797static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 1921static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1798 bool send_err, struct srp_target_port *target) 1922 bool send_err, struct srp_rdma_ch *ch)
1799{ 1923{
1924 struct srp_target_port *target = ch->target;
1925
1926 if (wr_id == SRP_LAST_WR_ID) {
1927 complete(&ch->done);
1928 return;
1929 }
1930
1800 if (target->connected && !target->qp_in_error) { 1931 if (target->connected && !target->qp_in_error) {
1801 if (wr_id & LOCAL_INV_WR_ID_MASK) { 1932 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1802 shost_printk(KERN_ERR, target->scsi_host, PFX 1933 shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1817,33 +1948,33 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1817 target->qp_in_error = true; 1948 target->qp_in_error = true;
1818} 1949}
1819 1950
1820static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 1951static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1821{ 1952{
1822 struct srp_target_port *target = target_ptr; 1953 struct srp_rdma_ch *ch = ch_ptr;
1823 struct ib_wc wc; 1954 struct ib_wc wc;
1824 1955
1825 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1956 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1826 while (ib_poll_cq(cq, 1, &wc) > 0) { 1957 while (ib_poll_cq(cq, 1, &wc) > 0) {
1827 if (likely(wc.status == IB_WC_SUCCESS)) { 1958 if (likely(wc.status == IB_WC_SUCCESS)) {
1828 srp_handle_recv(target, &wc); 1959 srp_handle_recv(ch, &wc);
1829 } else { 1960 } else {
1830 srp_handle_qp_err(wc.wr_id, wc.status, false, target); 1961 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1831 } 1962 }
1832 } 1963 }
1833} 1964}
1834 1965
1835static void srp_send_completion(struct ib_cq *cq, void *target_ptr) 1966static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1836{ 1967{
1837 struct srp_target_port *target = target_ptr; 1968 struct srp_rdma_ch *ch = ch_ptr;
1838 struct ib_wc wc; 1969 struct ib_wc wc;
1839 struct srp_iu *iu; 1970 struct srp_iu *iu;
1840 1971
1841 while (ib_poll_cq(cq, 1, &wc) > 0) { 1972 while (ib_poll_cq(cq, 1, &wc) > 0) {
1842 if (likely(wc.status == IB_WC_SUCCESS)) { 1973 if (likely(wc.status == IB_WC_SUCCESS)) {
1843 iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1974 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1844 list_add(&iu->list, &target->free_tx); 1975 list_add(&iu->list, &ch->free_tx);
1845 } else { 1976 } else {
1846 srp_handle_qp_err(wc.wr_id, wc.status, true, target); 1977 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1847 } 1978 }
1848 } 1979 }
1849} 1980}
@@ -1852,11 +1983,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1852{ 1983{
1853 struct srp_target_port *target = host_to_target(shost); 1984 struct srp_target_port *target = host_to_target(shost);
1854 struct srp_rport *rport = target->rport; 1985 struct srp_rport *rport = target->rport;
1986 struct srp_rdma_ch *ch;
1855 struct srp_request *req; 1987 struct srp_request *req;
1856 struct srp_iu *iu; 1988 struct srp_iu *iu;
1857 struct srp_cmd *cmd; 1989 struct srp_cmd *cmd;
1858 struct ib_device *dev; 1990 struct ib_device *dev;
1859 unsigned long flags; 1991 unsigned long flags;
1992 u32 tag;
1993 u16 idx;
1860 int len, ret; 1994 int len, ret;
1861 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 1995 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1862 1996
@@ -1873,15 +2007,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1873 if (unlikely(scmnd->result)) 2007 if (unlikely(scmnd->result))
1874 goto err; 2008 goto err;
1875 2009
1876 spin_lock_irqsave(&target->lock, flags); 2010 WARN_ON_ONCE(scmnd->request->tag < 0);
1877 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 2011 tag = blk_mq_unique_tag(scmnd->request);
1878 if (!iu) 2012 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1879 goto err_unlock; 2013 idx = blk_mq_unique_tag_to_tag(tag);
2014 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2015 dev_name(&shost->shost_gendev), tag, idx,
2016 target->req_ring_size);
1880 2017
1881 req = list_first_entry(&target->free_reqs, struct srp_request, list); 2018 spin_lock_irqsave(&ch->lock, flags);
1882 list_del(&req->list); 2019 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
1883 spin_unlock_irqrestore(&target->lock, flags); 2020 spin_unlock_irqrestore(&ch->lock, flags);
1884 2021
2022 if (!iu)
2023 goto err;
2024
2025 req = &ch->req_ring[idx];
1885 dev = target->srp_host->srp_dev->dev; 2026 dev = target->srp_host->srp_dev->dev;
1886 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 2027 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1887 DMA_TO_DEVICE); 2028 DMA_TO_DEVICE);
@@ -1893,13 +2034,13 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1893 2034
1894 cmd->opcode = SRP_CMD; 2035 cmd->opcode = SRP_CMD;
1895 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 2036 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1896 cmd->tag = req->index; 2037 cmd->tag = tag;
1897 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2038 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1898 2039
1899 req->scmnd = scmnd; 2040 req->scmnd = scmnd;
1900 req->cmd = iu; 2041 req->cmd = iu;
1901 2042
1902 len = srp_map_data(scmnd, target, req); 2043 len = srp_map_data(scmnd, ch, req);
1903 if (len < 0) { 2044 if (len < 0) {
1904 shost_printk(KERN_ERR, target->scsi_host, 2045 shost_printk(KERN_ERR, target->scsi_host,
1905 PFX "Failed to map data (%d)\n", len); 2046 PFX "Failed to map data (%d)\n", len);
@@ -1917,7 +2058,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1917 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 2058 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1918 DMA_TO_DEVICE); 2059 DMA_TO_DEVICE);
1919 2060
1920 if (srp_post_send(target, iu, len)) { 2061 if (srp_post_send(ch, iu, len)) {
1921 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2062 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1922 goto err_unmap; 2063 goto err_unmap;
1923 } 2064 }
@@ -1931,10 +2072,10 @@ unlock_rport:
1931 return ret; 2072 return ret;
1932 2073
1933err_unmap: 2074err_unmap:
1934 srp_unmap_data(scmnd, target, req); 2075 srp_unmap_data(scmnd, ch, req);
1935 2076
1936err_iu: 2077err_iu:
1937 srp_put_tx_iu(target, iu, SRP_IU_CMD); 2078 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
1938 2079
1939 /* 2080 /*
1940 * Avoid that the loops that iterate over the request ring can 2081 * Avoid that the loops that iterate over the request ring can
@@ -1942,12 +2083,6 @@ err_iu:
1942 */ 2083 */
1943 req->scmnd = NULL; 2084 req->scmnd = NULL;
1944 2085
1945 spin_lock_irqsave(&target->lock, flags);
1946 list_add(&req->list, &target->free_reqs);
1947
1948err_unlock:
1949 spin_unlock_irqrestore(&target->lock, flags);
1950
1951err: 2086err:
1952 if (scmnd->result) { 2087 if (scmnd->result) {
1953 scmnd->scsi_done(scmnd); 2088 scmnd->scsi_done(scmnd);
@@ -1961,53 +2096,54 @@ err:
1961 2096
1962/* 2097/*
1963 * Note: the resources allocated in this function are freed in 2098 * Note: the resources allocated in this function are freed in
1964 * srp_free_target_ib(). 2099 * srp_free_ch_ib().
1965 */ 2100 */
1966static int srp_alloc_iu_bufs(struct srp_target_port *target) 2101static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
1967{ 2102{
2103 struct srp_target_port *target = ch->target;
1968 int i; 2104 int i;
1969 2105
1970 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring), 2106 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
1971 GFP_KERNEL); 2107 GFP_KERNEL);
1972 if (!target->rx_ring) 2108 if (!ch->rx_ring)
1973 goto err_no_ring; 2109 goto err_no_ring;
1974 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring), 2110 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
1975 GFP_KERNEL); 2111 GFP_KERNEL);
1976 if (!target->tx_ring) 2112 if (!ch->tx_ring)
1977 goto err_no_ring; 2113 goto err_no_ring;
1978 2114
1979 for (i = 0; i < target->queue_size; ++i) { 2115 for (i = 0; i < target->queue_size; ++i) {
1980 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 2116 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
1981 target->max_ti_iu_len, 2117 ch->max_ti_iu_len,
1982 GFP_KERNEL, DMA_FROM_DEVICE); 2118 GFP_KERNEL, DMA_FROM_DEVICE);
1983 if (!target->rx_ring[i]) 2119 if (!ch->rx_ring[i])
1984 goto err; 2120 goto err;
1985 } 2121 }
1986 2122
1987 for (i = 0; i < target->queue_size; ++i) { 2123 for (i = 0; i < target->queue_size; ++i) {
1988 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 2124 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
1989 target->max_iu_len, 2125 target->max_iu_len,
1990 GFP_KERNEL, DMA_TO_DEVICE); 2126 GFP_KERNEL, DMA_TO_DEVICE);
1991 if (!target->tx_ring[i]) 2127 if (!ch->tx_ring[i])
1992 goto err; 2128 goto err;
1993 2129
1994 list_add(&target->tx_ring[i]->list, &target->free_tx); 2130 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
1995 } 2131 }
1996 2132
1997 return 0; 2133 return 0;
1998 2134
1999err: 2135err:
2000 for (i = 0; i < target->queue_size; ++i) { 2136 for (i = 0; i < target->queue_size; ++i) {
2001 srp_free_iu(target->srp_host, target->rx_ring[i]); 2137 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2002 srp_free_iu(target->srp_host, target->tx_ring[i]); 2138 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2003 } 2139 }
2004 2140
2005 2141
2006err_no_ring: 2142err_no_ring:
2007 kfree(target->tx_ring); 2143 kfree(ch->tx_ring);
2008 target->tx_ring = NULL; 2144 ch->tx_ring = NULL;
2009 kfree(target->rx_ring); 2145 kfree(ch->rx_ring);
2010 target->rx_ring = NULL; 2146 ch->rx_ring = NULL;
2011 2147
2012 return -ENOMEM; 2148 return -ENOMEM;
2013} 2149}
@@ -2041,23 +2177,24 @@ static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2041 2177
2042static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2178static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2043 struct srp_login_rsp *lrsp, 2179 struct srp_login_rsp *lrsp,
2044 struct srp_target_port *target) 2180 struct srp_rdma_ch *ch)
2045{ 2181{
2182 struct srp_target_port *target = ch->target;
2046 struct ib_qp_attr *qp_attr = NULL; 2183 struct ib_qp_attr *qp_attr = NULL;
2047 int attr_mask = 0; 2184 int attr_mask = 0;
2048 int ret; 2185 int ret;
2049 int i; 2186 int i;
2050 2187
2051 if (lrsp->opcode == SRP_LOGIN_RSP) { 2188 if (lrsp->opcode == SRP_LOGIN_RSP) {
2052 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2189 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2053 target->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2190 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2054 2191
2055 /* 2192 /*
2056 * Reserve credits for task management so we don't 2193 * Reserve credits for task management so we don't
2057 * bounce requests back to the SCSI mid-layer. 2194 * bounce requests back to the SCSI mid-layer.
2058 */ 2195 */
2059 target->scsi_host->can_queue 2196 target->scsi_host->can_queue
2060 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2197 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2061 target->scsi_host->can_queue); 2198 target->scsi_host->can_queue);
2062 target->scsi_host->cmd_per_lun 2199 target->scsi_host->cmd_per_lun
2063 = min_t(int, target->scsi_host->can_queue, 2200 = min_t(int, target->scsi_host->can_queue,
@@ -2069,8 +2206,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2069 goto error; 2206 goto error;
2070 } 2207 }
2071 2208
2072 if (!target->rx_ring) { 2209 if (!ch->rx_ring) {
2073 ret = srp_alloc_iu_bufs(target); 2210 ret = srp_alloc_iu_bufs(ch);
2074 if (ret) 2211 if (ret)
2075 goto error; 2212 goto error;
2076 } 2213 }
@@ -2085,13 +2222,14 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2085 if (ret) 2222 if (ret)
2086 goto error_free; 2223 goto error_free;
2087 2224
2088 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 2225 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2089 if (ret) 2226 if (ret)
2090 goto error_free; 2227 goto error_free;
2091 2228
2092 for (i = 0; i < target->queue_size; i++) { 2229 for (i = 0; i < target->queue_size; i++) {
2093 struct srp_iu *iu = target->rx_ring[i]; 2230 struct srp_iu *iu = ch->rx_ring[i];
2094 ret = srp_post_recv(target, iu); 2231
2232 ret = srp_post_recv(ch, iu);
2095 if (ret) 2233 if (ret)
2096 goto error_free; 2234 goto error_free;
2097 } 2235 }
@@ -2103,7 +2241,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2103 2241
2104 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2242 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2105 2243
2106 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 2244 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2107 if (ret) 2245 if (ret)
2108 goto error_free; 2246 goto error_free;
2109 2247
@@ -2113,13 +2251,14 @@ error_free:
2113 kfree(qp_attr); 2251 kfree(qp_attr);
2114 2252
2115error: 2253error:
2116 target->status = ret; 2254 ch->status = ret;
2117} 2255}
2118 2256
2119static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2257static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2120 struct ib_cm_event *event, 2258 struct ib_cm_event *event,
2121 struct srp_target_port *target) 2259 struct srp_rdma_ch *ch)
2122{ 2260{
2261 struct srp_target_port *target = ch->target;
2123 struct Scsi_Host *shost = target->scsi_host; 2262 struct Scsi_Host *shost = target->scsi_host;
2124 struct ib_class_port_info *cpi; 2263 struct ib_class_port_info *cpi;
2125 int opcode; 2264 int opcode;
@@ -2127,12 +2266,12 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2127 switch (event->param.rej_rcvd.reason) { 2266 switch (event->param.rej_rcvd.reason) {
2128 case IB_CM_REJ_PORT_CM_REDIRECT: 2267 case IB_CM_REJ_PORT_CM_REDIRECT:
2129 cpi = event->param.rej_rcvd.ari; 2268 cpi = event->param.rej_rcvd.ari;
2130 target->path.dlid = cpi->redirect_lid; 2269 ch->path.dlid = cpi->redirect_lid;
2131 target->path.pkey = cpi->redirect_pkey; 2270 ch->path.pkey = cpi->redirect_pkey;
2132 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2271 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2133 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 2272 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2134 2273
2135 target->status = target->path.dlid ? 2274 ch->status = ch->path.dlid ?
2136 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2275 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2137 break; 2276 break;
2138 2277
@@ -2143,26 +2282,26 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2143 * reject reason code 25 when they mean 24 2282 * reject reason code 25 when they mean 24
2144 * (port redirect). 2283 * (port redirect).
2145 */ 2284 */
2146 memcpy(target->path.dgid.raw, 2285 memcpy(ch->path.dgid.raw,
2147 event->param.rej_rcvd.ari, 16); 2286 event->param.rej_rcvd.ari, 16);
2148 2287
2149 shost_printk(KERN_DEBUG, shost, 2288 shost_printk(KERN_DEBUG, shost,
2150 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2289 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2151 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 2290 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2152 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 2291 be64_to_cpu(ch->path.dgid.global.interface_id));
2153 2292
2154 target->status = SRP_PORT_REDIRECT; 2293 ch->status = SRP_PORT_REDIRECT;
2155 } else { 2294 } else {
2156 shost_printk(KERN_WARNING, shost, 2295 shost_printk(KERN_WARNING, shost,
2157 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2296 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2158 target->status = -ECONNRESET; 2297 ch->status = -ECONNRESET;
2159 } 2298 }
2160 break; 2299 break;
2161 2300
2162 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 2301 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2163 shost_printk(KERN_WARNING, shost, 2302 shost_printk(KERN_WARNING, shost,
2164 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2303 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2165 target->status = -ECONNRESET; 2304 ch->status = -ECONNRESET;
2166 break; 2305 break;
2167 2306
2168 case IB_CM_REJ_CONSUMER_DEFINED: 2307 case IB_CM_REJ_CONSUMER_DEFINED:
@@ -2177,30 +2316,31 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2177 else 2316 else
2178 shost_printk(KERN_WARNING, shost, PFX 2317 shost_printk(KERN_WARNING, shost, PFX
2179 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2318 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2180 target->path.sgid.raw, 2319 target->sgid.raw,
2181 target->orig_dgid, reason); 2320 target->orig_dgid.raw, reason);
2182 } else 2321 } else
2183 shost_printk(KERN_WARNING, shost, 2322 shost_printk(KERN_WARNING, shost,
2184 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2323 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2185 " opcode 0x%02x\n", opcode); 2324 " opcode 0x%02x\n", opcode);
2186 target->status = -ECONNRESET; 2325 ch->status = -ECONNRESET;
2187 break; 2326 break;
2188 2327
2189 case IB_CM_REJ_STALE_CONN: 2328 case IB_CM_REJ_STALE_CONN:
2190 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2329 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2191 target->status = SRP_STALE_CONN; 2330 ch->status = SRP_STALE_CONN;
2192 break; 2331 break;
2193 2332
2194 default: 2333 default:
2195 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2334 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2196 event->param.rej_rcvd.reason); 2335 event->param.rej_rcvd.reason);
2197 target->status = -ECONNRESET; 2336 ch->status = -ECONNRESET;
2198 } 2337 }
2199} 2338}
2200 2339
2201static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2340static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2202{ 2341{
2203 struct srp_target_port *target = cm_id->context; 2342 struct srp_rdma_ch *ch = cm_id->context;
2343 struct srp_target_port *target = ch->target;
2204 int comp = 0; 2344 int comp = 0;
2205 2345
2206 switch (event->event) { 2346 switch (event->event) {
@@ -2208,19 +2348,19 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2208 shost_printk(KERN_DEBUG, target->scsi_host, 2348 shost_printk(KERN_DEBUG, target->scsi_host,
2209 PFX "Sending CM REQ failed\n"); 2349 PFX "Sending CM REQ failed\n");
2210 comp = 1; 2350 comp = 1;
2211 target->status = -ECONNRESET; 2351 ch->status = -ECONNRESET;
2212 break; 2352 break;
2213 2353
2214 case IB_CM_REP_RECEIVED: 2354 case IB_CM_REP_RECEIVED:
2215 comp = 1; 2355 comp = 1;
2216 srp_cm_rep_handler(cm_id, event->private_data, target); 2356 srp_cm_rep_handler(cm_id, event->private_data, ch);
2217 break; 2357 break;
2218 2358
2219 case IB_CM_REJ_RECEIVED: 2359 case IB_CM_REJ_RECEIVED:
2220 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2360 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2221 comp = 1; 2361 comp = 1;
2222 2362
2223 srp_cm_rej_handler(cm_id, event, target); 2363 srp_cm_rej_handler(cm_id, event, ch);
2224 break; 2364 break;
2225 2365
2226 case IB_CM_DREQ_RECEIVED: 2366 case IB_CM_DREQ_RECEIVED:
@@ -2238,7 +2378,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2238 PFX "connection closed\n"); 2378 PFX "connection closed\n");
2239 comp = 1; 2379 comp = 1;
2240 2380
2241 target->status = 0; 2381 ch->status = 0;
2242 break; 2382 break;
2243 2383
2244 case IB_CM_MRA_RECEIVED: 2384 case IB_CM_MRA_RECEIVED:
@@ -2253,65 +2393,30 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2253 } 2393 }
2254 2394
2255 if (comp) 2395 if (comp)
2256 complete(&target->done); 2396 complete(&ch->done);
2257 2397
2258 return 0; 2398 return 0;
2259} 2399}
2260 2400
2261/** 2401/**
2262 * srp_change_queue_type - changing device queue tag type
2263 * @sdev: scsi device struct
2264 * @tag_type: requested tag type
2265 *
2266 * Returns queue tag type.
2267 */
2268static int
2269srp_change_queue_type(struct scsi_device *sdev, int tag_type)
2270{
2271 if (sdev->tagged_supported) {
2272 scsi_set_tag_type(sdev, tag_type);
2273 if (tag_type)
2274 scsi_activate_tcq(sdev, sdev->queue_depth);
2275 else
2276 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2277 } else
2278 tag_type = 0;
2279
2280 return tag_type;
2281}
2282
2283/**
2284 * srp_change_queue_depth - setting device queue depth 2402 * srp_change_queue_depth - setting device queue depth
2285 * @sdev: scsi device struct 2403 * @sdev: scsi device struct
2286 * @qdepth: requested queue depth 2404 * @qdepth: requested queue depth
2287 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2288 * (see include/scsi/scsi_host.h for definition)
2289 * 2405 *
2290 * Returns queue depth. 2406 * Returns queue depth.
2291 */ 2407 */
2292static int 2408static int
2293srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 2409srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2294{ 2410{
2295 struct Scsi_Host *shost = sdev->host; 2411 if (!sdev->tagged_supported)
2296 int max_depth; 2412 qdepth = 1;
2297 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { 2413 return scsi_change_queue_depth(sdev, qdepth);
2298 max_depth = shost->can_queue;
2299 if (!sdev->tagged_supported)
2300 max_depth = 1;
2301 if (qdepth > max_depth)
2302 qdepth = max_depth;
2303 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2304 } else if (reason == SCSI_QDEPTH_QFULL)
2305 scsi_track_queue_full(sdev, qdepth);
2306 else
2307 return -EOPNOTSUPP;
2308
2309 return sdev->queue_depth;
2310} 2414}
2311 2415
2312static int srp_send_tsk_mgmt(struct srp_target_port *target, 2416static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2313 u64 req_tag, unsigned int lun, u8 func) 2417 unsigned int lun, u8 func)
2314{ 2418{
2419 struct srp_target_port *target = ch->target;
2315 struct srp_rport *rport = target->rport; 2420 struct srp_rport *rport = target->rport;
2316 struct ib_device *dev = target->srp_host->srp_dev->dev; 2421 struct ib_device *dev = target->srp_host->srp_dev->dev;
2317 struct srp_iu *iu; 2422 struct srp_iu *iu;
@@ -2320,16 +2425,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
2320 if (!target->connected || target->qp_in_error) 2425 if (!target->connected || target->qp_in_error)
2321 return -1; 2426 return -1;
2322 2427
2323 init_completion(&target->tsk_mgmt_done); 2428 init_completion(&ch->tsk_mgmt_done);
2324 2429
2325 /* 2430 /*
2326 * Lock the rport mutex to avoid that srp_create_target_ib() is 2431 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2327 * invoked while a task management function is being sent. 2432 * invoked while a task management function is being sent.
2328 */ 2433 */
2329 mutex_lock(&rport->mutex); 2434 mutex_lock(&rport->mutex);
2330 spin_lock_irq(&target->lock); 2435 spin_lock_irq(&ch->lock);
2331 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 2436 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2332 spin_unlock_irq(&target->lock); 2437 spin_unlock_irq(&ch->lock);
2333 2438
2334 if (!iu) { 2439 if (!iu) {
2335 mutex_unlock(&rport->mutex); 2440 mutex_unlock(&rport->mutex);
@@ -2350,15 +2455,15 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
2350 2455
2351 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 2456 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2352 DMA_TO_DEVICE); 2457 DMA_TO_DEVICE);
2353 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { 2458 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2354 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); 2459 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2355 mutex_unlock(&rport->mutex); 2460 mutex_unlock(&rport->mutex);
2356 2461
2357 return -1; 2462 return -1;
2358 } 2463 }
2359 mutex_unlock(&rport->mutex); 2464 mutex_unlock(&rport->mutex);
2360 2465
2361 if (!wait_for_completion_timeout(&target->tsk_mgmt_done, 2466 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2362 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2467 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2363 return -1; 2468 return -1;
2364 2469
@@ -2369,20 +2474,32 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2369{ 2474{
2370 struct srp_target_port *target = host_to_target(scmnd->device->host); 2475 struct srp_target_port *target = host_to_target(scmnd->device->host);
2371 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 2476 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2477 u32 tag;
2478 u16 ch_idx;
2479 struct srp_rdma_ch *ch;
2372 int ret; 2480 int ret;
2373 2481
2374 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2482 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2375 2483
2376 if (!req || !srp_claim_req(target, req, NULL, scmnd)) 2484 if (!req)
2485 return SUCCESS;
2486 tag = blk_mq_unique_tag(scmnd->request);
2487 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2488 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2377 return SUCCESS; 2489 return SUCCESS;
2378 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, 2490 ch = &target->ch[ch_idx];
2491 if (!srp_claim_req(ch, req, NULL, scmnd))
2492 return SUCCESS;
2493 shost_printk(KERN_ERR, target->scsi_host,
2494 "Sending SRP abort for tag %#x\n", tag);
2495 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2379 SRP_TSK_ABORT_TASK) == 0) 2496 SRP_TSK_ABORT_TASK) == 0)
2380 ret = SUCCESS; 2497 ret = SUCCESS;
2381 else if (target->rport->state == SRP_RPORT_LOST) 2498 else if (target->rport->state == SRP_RPORT_LOST)
2382 ret = FAST_IO_FAIL; 2499 ret = FAST_IO_FAIL;
2383 else 2500 else
2384 ret = FAILED; 2501 ret = FAILED;
2385 srp_free_req(target, req, scmnd, 0); 2502 srp_free_req(ch, req, scmnd, 0);
2386 scmnd->result = DID_ABORT << 16; 2503 scmnd->result = DID_ABORT << 16;
2387 scmnd->scsi_done(scmnd); 2504 scmnd->scsi_done(scmnd);
2388 2505
@@ -2392,19 +2509,25 @@ static int srp_abort(struct scsi_cmnd *scmnd)
2392static int srp_reset_device(struct scsi_cmnd *scmnd) 2509static int srp_reset_device(struct scsi_cmnd *scmnd)
2393{ 2510{
2394 struct srp_target_port *target = host_to_target(scmnd->device->host); 2511 struct srp_target_port *target = host_to_target(scmnd->device->host);
2512 struct srp_rdma_ch *ch;
2395 int i; 2513 int i;
2396 2514
2397 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2398 2516
2399 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, 2517 ch = &target->ch[0];
2518 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2400 SRP_TSK_LUN_RESET)) 2519 SRP_TSK_LUN_RESET))
2401 return FAILED; 2520 return FAILED;
2402 if (target->tsk_mgmt_status) 2521 if (ch->tsk_mgmt_status)
2403 return FAILED; 2522 return FAILED;
2404 2523
2405 for (i = 0; i < target->req_ring_size; ++i) { 2524 for (i = 0; i < target->ch_count; i++) {
2406 struct srp_request *req = &target->req_ring[i]; 2525 ch = &target->ch[i];
2407 srp_finish_req(target, req, scmnd->device, DID_RESET << 16); 2526 for (i = 0; i < target->req_ring_size; ++i) {
2527 struct srp_request *req = &ch->req_ring[i];
2528
2529 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2530 }
2408 } 2531 }
2409 2532
2410 return SUCCESS; 2533 return SUCCESS;
@@ -2466,7 +2589,7 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2466{ 2589{
2467 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2468 2591
2469 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 2592 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2470} 2593}
2471 2594
2472static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2595static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2474,15 +2597,16 @@ static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2474{ 2597{
2475 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2598 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2476 2599
2477 return sprintf(buf, "%pI6\n", target->path.sgid.raw); 2600 return sprintf(buf, "%pI6\n", target->sgid.raw);
2478} 2601}
2479 2602
2480static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2603static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2481 char *buf) 2604 char *buf)
2482{ 2605{
2483 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2606 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2607 struct srp_rdma_ch *ch = &target->ch[0];
2484 2608
2485 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 2609 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2486} 2610}
2487 2611
2488static ssize_t show_orig_dgid(struct device *dev, 2612static ssize_t show_orig_dgid(struct device *dev,
@@ -2490,15 +2614,21 @@ static ssize_t show_orig_dgid(struct device *dev,
2490{ 2614{
2491 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2615 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2492 2616
2493 return sprintf(buf, "%pI6\n", target->orig_dgid); 2617 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2494} 2618}
2495 2619
2496static ssize_t show_req_lim(struct device *dev, 2620static ssize_t show_req_lim(struct device *dev,
2497 struct device_attribute *attr, char *buf) 2621 struct device_attribute *attr, char *buf)
2498{ 2622{
2499 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2623 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2624 struct srp_rdma_ch *ch;
2625 int i, req_lim = INT_MAX;
2500 2626
2501 return sprintf(buf, "%d\n", target->req_lim); 2627 for (i = 0; i < target->ch_count; i++) {
2628 ch = &target->ch[i];
2629 req_lim = min(req_lim, ch->req_lim);
2630 }
2631 return sprintf(buf, "%d\n", req_lim);
2502} 2632}
2503 2633
2504static ssize_t show_zero_req_lim(struct device *dev, 2634static ssize_t show_zero_req_lim(struct device *dev,
@@ -2525,6 +2655,14 @@ static ssize_t show_local_ib_device(struct device *dev,
2525 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2655 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2526} 2656}
2527 2657
2658static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2659 char *buf)
2660{
2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2662
2663 return sprintf(buf, "%d\n", target->ch_count);
2664}
2665
2528static ssize_t show_comp_vector(struct device *dev, 2666static ssize_t show_comp_vector(struct device *dev,
2529 struct device_attribute *attr, char *buf) 2667 struct device_attribute *attr, char *buf)
2530{ 2668{
@@ -2568,6 +2706,7 @@ static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2568static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2706static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2569static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2707static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2570static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2708static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2709static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2571static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 2710static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2572static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 2711static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2573static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2712static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
@@ -2585,6 +2724,7 @@ static struct device_attribute *srp_host_attrs[] = {
2585 &dev_attr_zero_req_lim, 2724 &dev_attr_zero_req_lim,
2586 &dev_attr_local_ib_port, 2725 &dev_attr_local_ib_port,
2587 &dev_attr_local_ib_device, 2726 &dev_attr_local_ib_device,
2727 &dev_attr_ch_count,
2588 &dev_attr_comp_vector, 2728 &dev_attr_comp_vector,
2589 &dev_attr_tl_retry_count, 2729 &dev_attr_tl_retry_count,
2590 &dev_attr_cmd_sg_entries, 2730 &dev_attr_cmd_sg_entries,
@@ -2600,7 +2740,7 @@ static struct scsi_host_template srp_template = {
2600 .info = srp_target_info, 2740 .info = srp_target_info,
2601 .queuecommand = srp_queuecommand, 2741 .queuecommand = srp_queuecommand,
2602 .change_queue_depth = srp_change_queue_depth, 2742 .change_queue_depth = srp_change_queue_depth,
2603 .change_queue_type = srp_change_queue_type, 2743 .change_queue_type = scsi_change_queue_type,
2604 .eh_abort_handler = srp_abort, 2744 .eh_abort_handler = srp_abort,
2605 .eh_device_reset_handler = srp_reset_device, 2745 .eh_device_reset_handler = srp_reset_device,
2606 .eh_host_reset_handler = srp_reset_host, 2746 .eh_host_reset_handler = srp_reset_host,
@@ -2610,14 +2750,28 @@ static struct scsi_host_template srp_template = {
2610 .this_id = -1, 2750 .this_id = -1,
2611 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 2751 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2612 .use_clustering = ENABLE_CLUSTERING, 2752 .use_clustering = ENABLE_CLUSTERING,
2613 .shost_attrs = srp_host_attrs 2753 .shost_attrs = srp_host_attrs,
2754 .use_blk_tags = 1,
2755 .track_queue_depth = 1,
2614}; 2756};
2615 2757
2758static int srp_sdev_count(struct Scsi_Host *host)
2759{
2760 struct scsi_device *sdev;
2761 int c = 0;
2762
2763 shost_for_each_device(sdev, host)
2764 c++;
2765
2766 return c;
2767}
2768
2616static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2769static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2617{ 2770{
2618 struct srp_rport_identifiers ids; 2771 struct srp_rport_identifiers ids;
2619 struct srp_rport *rport; 2772 struct srp_rport *rport;
2620 2773
2774 target->state = SRP_TARGET_SCANNING;
2621 sprintf(target->target_name, "SRP.T10:%016llX", 2775 sprintf(target->target_name, "SRP.T10:%016llX",
2622 (unsigned long long) be64_to_cpu(target->id_ext)); 2776 (unsigned long long) be64_to_cpu(target->id_ext));
2623 2777
@@ -2640,11 +2794,26 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2640 list_add_tail(&target->list, &host->target_list); 2794 list_add_tail(&target->list, &host->target_list);
2641 spin_unlock(&host->target_lock); 2795 spin_unlock(&host->target_lock);
2642 2796
2643 target->state = SRP_TARGET_LIVE;
2644
2645 scsi_scan_target(&target->scsi_host->shost_gendev, 2797 scsi_scan_target(&target->scsi_host->shost_gendev,
2646 0, target->scsi_id, SCAN_WILD_CARD, 0); 2798 0, target->scsi_id, SCAN_WILD_CARD, 0);
2647 2799
2800 if (!target->connected || target->qp_in_error) {
2801 shost_printk(KERN_INFO, target->scsi_host,
2802 PFX "SCSI scan failed - removing SCSI host\n");
2803 srp_queue_remove_work(target);
2804 goto out;
2805 }
2806
2807 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2808 dev_name(&target->scsi_host->shost_gendev),
2809 srp_sdev_count(target->scsi_host));
2810
2811 spin_lock_irq(&target->lock);
2812 if (target->state == SRP_TARGET_SCANNING)
2813 target->state = SRP_TARGET_LIVE;
2814 spin_unlock_irq(&target->lock);
2815
2816out:
2648 return 0; 2817 return 0;
2649} 2818}
2650 2819
@@ -2801,11 +2970,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2801 } 2970 }
2802 2971
2803 for (i = 0; i < 16; ++i) { 2972 for (i = 0; i < 16; ++i) {
2804 strlcpy(dgid, p + i * 2, 3); 2973 strlcpy(dgid, p + i * 2, sizeof(dgid));
2805 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 2974 if (sscanf(dgid, "%hhx",
2975 &target->orig_dgid.raw[i]) < 1) {
2976 ret = -EINVAL;
2977 kfree(p);
2978 goto out;
2979 }
2806 } 2980 }
2807 kfree(p); 2981 kfree(p);
2808 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2809 break; 2982 break;
2810 2983
2811 case SRP_OPT_PKEY: 2984 case SRP_OPT_PKEY:
@@ -2813,7 +2986,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2813 pr_warn("bad P_Key parameter '%s'\n", p); 2986 pr_warn("bad P_Key parameter '%s'\n", p);
2814 goto out; 2987 goto out;
2815 } 2988 }
2816 target->path.pkey = cpu_to_be16(token); 2989 target->pkey = cpu_to_be16(token);
2817 break; 2990 break;
2818 2991
2819 case SRP_OPT_SERVICE_ID: 2992 case SRP_OPT_SERVICE_ID:
@@ -2823,7 +2996,6 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2823 goto out; 2996 goto out;
2824 } 2997 }
2825 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2998 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2826 target->path.service_id = target->service_id;
2827 kfree(p); 2999 kfree(p);
2828 break; 3000 break;
2829 3001
@@ -2960,9 +3132,11 @@ static ssize_t srp_create_target(struct device *dev,
2960 container_of(dev, struct srp_host, dev); 3132 container_of(dev, struct srp_host, dev);
2961 struct Scsi_Host *target_host; 3133 struct Scsi_Host *target_host;
2962 struct srp_target_port *target; 3134 struct srp_target_port *target;
3135 struct srp_rdma_ch *ch;
2963 struct srp_device *srp_dev = host->srp_dev; 3136 struct srp_device *srp_dev = host->srp_dev;
2964 struct ib_device *ibdev = srp_dev->dev; 3137 struct ib_device *ibdev = srp_dev->dev;
2965 int ret; 3138 int ret, node_idx, node, cpu, i;
3139 bool multich = false;
2966 3140
2967 target_host = scsi_host_alloc(&srp_template, 3141 target_host = scsi_host_alloc(&srp_template,
2968 sizeof (struct srp_target_port)); 3142 sizeof (struct srp_target_port));
@@ -2988,12 +3162,22 @@ static ssize_t srp_create_target(struct device *dev,
2988 target->tl_retry_count = 7; 3162 target->tl_retry_count = 7;
2989 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3163 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
2990 3164
3165 /*
3166 * Avoid that the SCSI host can be removed by srp_remove_target()
3167 * before this function returns.
3168 */
3169 scsi_host_get(target->scsi_host);
3170
2991 mutex_lock(&host->add_target_mutex); 3171 mutex_lock(&host->add_target_mutex);
2992 3172
2993 ret = srp_parse_options(buf, target); 3173 ret = srp_parse_options(buf, target);
2994 if (ret) 3174 if (ret)
2995 goto err; 3175 goto err;
2996 3176
3177 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3178 if (ret)
3179 goto err;
3180
2997 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 3181 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2998 3182
2999 if (!srp_conn_unique(target->srp_host, target)) { 3183 if (!srp_conn_unique(target->srp_host, target)) {
@@ -3022,59 +3206,115 @@ static ssize_t srp_create_target(struct device *dev,
3022 INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3206 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3023 INIT_WORK(&target->remove_work, srp_remove_work); 3207 INIT_WORK(&target->remove_work, srp_remove_work);
3024 spin_lock_init(&target->lock); 3208 spin_lock_init(&target->lock);
3025 INIT_LIST_HEAD(&target->free_tx); 3209 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3026 ret = srp_alloc_req_data(target);
3027 if (ret) 3210 if (ret)
3028 goto err_free_mem; 3211 goto err;
3029 3212
3030 ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 3213 ret = -ENOMEM;
3031 if (ret) 3214 target->ch_count = max_t(unsigned, num_online_nodes(),
3032 goto err_free_mem; 3215 min(ch_count ? :
3216 min(4 * num_online_nodes(),
3217 ibdev->num_comp_vectors),
3218 num_online_cpus()));
3219 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 GFP_KERNEL);
3221 if (!target->ch)
3222 goto err;
3033 3223
3034 ret = srp_create_target_ib(target); 3224 node_idx = 0;
3035 if (ret) 3225 for_each_online_node(node) {
3036 goto err_free_mem; 3226 const int ch_start = (node_idx * target->ch_count /
3227 num_online_nodes());
3228 const int ch_end = ((node_idx + 1) * target->ch_count /
3229 num_online_nodes());
3230 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3231 num_online_nodes() + target->comp_vector)
3232 % ibdev->num_comp_vectors;
3233 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3236 int cpu_idx = 0;
3237
3238 for_each_online_cpu(cpu) {
3239 if (cpu_to_node(cpu) != node)
3240 continue;
3241 if (ch_start + cpu_idx >= ch_end)
3242 continue;
3243 ch = &target->ch[ch_start + cpu_idx];
3244 ch->target = target;
3245 ch->comp_vector = cv_start == cv_end ? cv_start :
3246 cv_start + cpu_idx % (cv_end - cv_start);
3247 spin_lock_init(&ch->lock);
3248 INIT_LIST_HEAD(&ch->free_tx);
3249 ret = srp_new_cm_id(ch);
3250 if (ret)
3251 goto err_disconnect;
3037 3252
3038 ret = srp_new_cm_id(target); 3253 ret = srp_create_ch_ib(ch);
3039 if (ret) 3254 if (ret)
3040 goto err_free_ib; 3255 goto err_disconnect;
3041 3256
3042 ret = srp_connect_target(target); 3257 ret = srp_alloc_req_data(ch);
3043 if (ret) { 3258 if (ret)
3044 shost_printk(KERN_ERR, target->scsi_host, 3259 goto err_disconnect;
3045 PFX "Connection failed\n"); 3260
3046 goto err_cm_id; 3261 ret = srp_connect_ch(ch, multich);
3262 if (ret) {
3263 shost_printk(KERN_ERR, target->scsi_host,
3264 PFX "Connection %d/%d failed\n",
3265 ch_start + cpu_idx,
3266 target->ch_count);
3267 if (node_idx == 0 && cpu_idx == 0) {
3268 goto err_disconnect;
3269 } else {
3270 srp_free_ch_ib(target, ch);
3271 srp_free_req_data(target, ch);
3272 target->ch_count = ch - target->ch;
3273 break;
3274 }
3275 }
3276
3277 multich = true;
3278 cpu_idx++;
3279 }
3280 node_idx++;
3047 } 3281 }
3048 3282
3283 target->scsi_host->nr_hw_queues = target->ch_count;
3284
3049 ret = srp_add_target(host, target); 3285 ret = srp_add_target(host, target);
3050 if (ret) 3286 if (ret)
3051 goto err_disconnect; 3287 goto err_disconnect;
3052 3288
3053 shost_printk(KERN_DEBUG, target->scsi_host, PFX 3289 if (target->state != SRP_TARGET_REMOVED) {
3054 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3290 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3055 be64_to_cpu(target->id_ext), 3291 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3056 be64_to_cpu(target->ioc_guid), 3292 be64_to_cpu(target->id_ext),
3057 be16_to_cpu(target->path.pkey), 3293 be64_to_cpu(target->ioc_guid),
3058 be64_to_cpu(target->service_id), 3294 be16_to_cpu(target->pkey),
3059 target->path.sgid.raw, target->path.dgid.raw); 3295 be64_to_cpu(target->service_id),
3296 target->sgid.raw, target->orig_dgid.raw);
3297 }
3060 3298
3061 ret = count; 3299 ret = count;
3062 3300
3063out: 3301out:
3064 mutex_unlock(&host->add_target_mutex); 3302 mutex_unlock(&host->add_target_mutex);
3303
3304 scsi_host_put(target->scsi_host);
3305
3065 return ret; 3306 return ret;
3066 3307
3067err_disconnect: 3308err_disconnect:
3068 srp_disconnect_target(target); 3309 srp_disconnect_target(target);
3069 3310
3070err_cm_id: 3311 for (i = 0; i < target->ch_count; i++) {
3071 ib_destroy_cm_id(target->cm_id); 3312 ch = &target->ch[i];
3072 3313 srp_free_ch_ib(target, ch);
3073err_free_ib: 3314 srp_free_req_data(target, ch);
3074 srp_free_target_ib(target); 3315 }
3075 3316
3076err_free_mem: 3317 kfree(target->ch);
3077 srp_free_req_data(target);
3078 3318
3079err: 3319err:
3080 scsi_host_put(target_host); 3320 scsi_host_put(target_host);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e46ecb15aa0d..a611556406ac 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -70,9 +70,12 @@ enum {
70 70
71 LOCAL_INV_WR_ID_MASK = 1, 71 LOCAL_INV_WR_ID_MASK = 1,
72 FAST_REG_WR_ID_MASK = 2, 72 FAST_REG_WR_ID_MASK = 2,
73
74 SRP_LAST_WR_ID = 0xfffffffcU,
73}; 75};
74 76
75enum srp_target_state { 77enum srp_target_state {
78 SRP_TARGET_SCANNING,
76 SRP_TARGET_LIVE, 79 SRP_TARGET_LIVE,
77 SRP_TARGET_REMOVED, 80 SRP_TARGET_REMOVED,
78}; 81};
@@ -115,7 +118,6 @@ struct srp_host {
115}; 118};
116 119
117struct srp_request { 120struct srp_request {
118 struct list_head list;
119 struct scsi_cmnd *scmnd; 121 struct scsi_cmnd *scmnd;
120 struct srp_iu *cmd; 122 struct srp_iu *cmd;
121 union { 123 union {
@@ -126,24 +128,62 @@ struct srp_request {
126 struct srp_direct_buf *indirect_desc; 128 struct srp_direct_buf *indirect_desc;
127 dma_addr_t indirect_dma_addr; 129 dma_addr_t indirect_dma_addr;
128 short nmdesc; 130 short nmdesc;
129 short index;
130}; 131};
131 132
132struct srp_target_port { 133/**
134 * struct srp_rdma_ch
135 * @comp_vector: Completion vector used by this RDMA channel.
136 */
137struct srp_rdma_ch {
133 /* These are RW in the hot path, and commonly used together */ 138 /* These are RW in the hot path, and commonly used together */
134 struct list_head free_tx; 139 struct list_head free_tx;
135 struct list_head free_reqs;
136 spinlock_t lock; 140 spinlock_t lock;
137 s32 req_lim; 141 s32 req_lim;
138 142
139 /* These are read-only in the hot path */ 143 /* These are read-only in the hot path */
140 struct ib_cq *send_cq ____cacheline_aligned_in_smp; 144 struct srp_target_port *target ____cacheline_aligned_in_smp;
145 struct ib_cq *send_cq;
141 struct ib_cq *recv_cq; 146 struct ib_cq *recv_cq;
142 struct ib_qp *qp; 147 struct ib_qp *qp;
143 union { 148 union {
144 struct ib_fmr_pool *fmr_pool; 149 struct ib_fmr_pool *fmr_pool;
145 struct srp_fr_pool *fr_pool; 150 struct srp_fr_pool *fr_pool;
146 }; 151 };
152
153 /* Everything above this point is used in the hot path of
154 * command processing. Try to keep them packed into cachelines.
155 */
156
157 struct completion done;
158 int status;
159
160 struct ib_sa_path_rec path;
161 struct ib_sa_query *path_query;
162 int path_query_id;
163
164 struct ib_cm_id *cm_id;
165 struct srp_iu **tx_ring;
166 struct srp_iu **rx_ring;
167 struct srp_request *req_ring;
168 int max_ti_iu_len;
169 int comp_vector;
170
171 struct completion tsk_mgmt_done;
172 u8 tsk_mgmt_status;
173};
174
175/**
176 * struct srp_target_port
177 * @comp_vector: Completion vector used by the first RDMA channel created for
178 * this target port.
179 */
180struct srp_target_port {
181 /* read and written in the hot path */
182 spinlock_t lock;
183
184 /* read only in the hot path */
185 struct srp_rdma_ch *ch;
186 u32 ch_count;
147 u32 lkey; 187 u32 lkey;
148 u32 rkey; 188 u32 rkey;
149 enum srp_target_state state; 189 enum srp_target_state state;
@@ -152,10 +192,8 @@ struct srp_target_port {
152 unsigned int indirect_size; 192 unsigned int indirect_size;
153 bool allow_ext_sg; 193 bool allow_ext_sg;
154 194
155 /* Everything above this point is used in the hot path of 195 /* other member variables */
156 * command processing. Try to keep them packed into cachelines. 196 union ib_gid sgid;
157 */
158
159 __be64 id_ext; 197 __be64 id_ext;
160 __be64 ioc_guid; 198 __be64 ioc_guid;
161 __be64 service_id; 199 __be64 service_id;
@@ -172,34 +210,19 @@ struct srp_target_port {
172 int comp_vector; 210 int comp_vector;
173 int tl_retry_count; 211 int tl_retry_count;
174 212
175 struct ib_sa_path_rec path; 213 union ib_gid orig_dgid;
176 __be16 orig_dgid[8]; 214 __be16 pkey;
177 struct ib_sa_query *path_query;
178 int path_query_id;
179 215
180 u32 rq_tmo_jiffies; 216 u32 rq_tmo_jiffies;
181 bool connected; 217 bool connected;
182 218
183 struct ib_cm_id *cm_id;
184
185 int max_ti_iu_len;
186
187 int zero_req_lim; 219 int zero_req_lim;
188 220
189 struct srp_iu **tx_ring;
190 struct srp_iu **rx_ring;
191 struct srp_request *req_ring;
192
193 struct work_struct tl_err_work; 221 struct work_struct tl_err_work;
194 struct work_struct remove_work; 222 struct work_struct remove_work;
195 223
196 struct list_head list; 224 struct list_head list;
197 struct completion done;
198 int status;
199 bool qp_in_error; 225 bool qp_in_error;
200
201 struct completion tsk_mgmt_done;
202 u8 tsk_mgmt_status;
203}; 226};
204 227
205struct srp_iu { 228struct srp_iu {
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index bc203485716d..8afa28e4570e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
421 421
422 err_free_client: 422 err_free_client:
423 evdev_detach_client(evdev, client); 423 evdev_detach_client(evdev, client);
424 kfree(client); 424 kvfree(client);
425 return error; 425 return error;
426} 426}
427 427
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 2ed7905a068f..fc55f0d15b70 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1179 } 1179 }
1180 1180
1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; 1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
1182 usb_fill_bulk_urb(xpad->bulk_out, udev, 1182 if (usb_endpoint_is_bulk_out(ep_irq_in)) {
1183 usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), 1183 usb_fill_bulk_urb(xpad->bulk_out, udev,
1184 xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); 1184 usb_sndbulkpipe(udev,
1185 ep_irq_in->bEndpointAddress),
1186 xpad->bdata, XPAD_PKT_LEN,
1187 xpad_bulk_out, xpad);
1188 } else {
1189 usb_fill_int_urb(xpad->bulk_out, udev,
1190 usb_sndintpipe(udev,
1191 ep_irq_in->bEndpointAddress),
1192 xpad->bdata, XPAD_PKT_LEN,
1193 xpad_bulk_out, xpad, 0);
1194 }
1185 1195
1186 /* 1196 /*
1187 * Submit the int URB immediately rather than waiting for open 1197 * Submit the int URB immediately rather than waiting for open
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 432d36395f35..c9c1c8ca7267 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -23,10 +23,9 @@
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/gpio/consumer.h>
26#include <linux/gpio_keys.h> 27#include <linux/gpio_keys.h>
27#include <linux/of.h> 28#include <linux/property.h>
28#include <linux/of_platform.h>
29#include <linux/of_gpio.h>
30 29
31#define DRV_NAME "gpio-keys-polled" 30#define DRV_NAME "gpio-keys-polled"
32 31
@@ -51,15 +50,14 @@ static void gpio_keys_polled_check_state(struct input_dev *input,
51 int state; 50 int state;
52 51
53 if (bdata->can_sleep) 52 if (bdata->can_sleep)
54 state = !!gpio_get_value_cansleep(button->gpio); 53 state = !!gpiod_get_value_cansleep(button->gpiod);
55 else 54 else
56 state = !!gpio_get_value(button->gpio); 55 state = !!gpiod_get_value(button->gpiod);
57 56
58 if (state != bdata->last_state) { 57 if (state != bdata->last_state) {
59 unsigned int type = button->type ?: EV_KEY; 58 unsigned int type = button->type ?: EV_KEY;
60 59
61 input_event(input, type, button->code, 60 input_event(input, type, button->code, state);
62 !!(state ^ button->active_low));
63 input_sync(input); 61 input_sync(input);
64 bdata->count = 0; 62 bdata->count = 0;
65 bdata->last_state = state; 63 bdata->last_state = state;
@@ -102,21 +100,15 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
102 pdata->disable(bdev->dev); 100 pdata->disable(bdev->dev);
103} 101}
104 102
105#ifdef CONFIG_OF
106static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev) 103static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
107{ 104{
108 struct device_node *node, *pp;
109 struct gpio_keys_platform_data *pdata; 105 struct gpio_keys_platform_data *pdata;
110 struct gpio_keys_button *button; 106 struct gpio_keys_button *button;
107 struct fwnode_handle *child;
111 int error; 108 int error;
112 int nbuttons; 109 int nbuttons;
113 int i;
114
115 node = dev->of_node;
116 if (!node)
117 return NULL;
118 110
119 nbuttons = of_get_child_count(node); 111 nbuttons = device_get_child_node_count(dev);
120 if (nbuttons == 0) 112 if (nbuttons == 0)
121 return NULL; 113 return NULL;
122 114
@@ -126,52 +118,44 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
126 return ERR_PTR(-ENOMEM); 118 return ERR_PTR(-ENOMEM);
127 119
128 pdata->buttons = (struct gpio_keys_button *)(pdata + 1); 120 pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
129 pdata->nbuttons = nbuttons;
130 121
131 pdata->rep = !!of_get_property(node, "autorepeat", NULL); 122 pdata->rep = device_property_present(dev, "autorepeat");
132 of_property_read_u32(node, "poll-interval", &pdata->poll_interval); 123 device_property_read_u32(dev, "poll-interval", &pdata->poll_interval);
133 124
134 i = 0; 125 device_for_each_child_node(dev, child) {
135 for_each_child_of_node(node, pp) { 126 struct gpio_desc *desc;
136 int gpio;
137 enum of_gpio_flags flags;
138 127
139 if (!of_find_property(pp, "gpios", NULL)) { 128 desc = devm_get_gpiod_from_child(dev, child);
140 pdata->nbuttons--; 129 if (IS_ERR(desc)) {
141 dev_warn(dev, "Found button without gpios\n"); 130 error = PTR_ERR(desc);
142 continue;
143 }
144
145 gpio = of_get_gpio_flags(pp, 0, &flags);
146 if (gpio < 0) {
147 error = gpio;
148 if (error != -EPROBE_DEFER) 131 if (error != -EPROBE_DEFER)
149 dev_err(dev, 132 dev_err(dev,
150 "Failed to get gpio flags, error: %d\n", 133 "Failed to get gpio flags, error: %d\n",
151 error); 134 error);
135 fwnode_handle_put(child);
152 return ERR_PTR(error); 136 return ERR_PTR(error);
153 } 137 }
154 138
155 button = &pdata->buttons[i++]; 139 button = &pdata->buttons[pdata->nbuttons++];
156 140 button->gpiod = desc;
157 button->gpio = gpio;
158 button->active_low = flags & OF_GPIO_ACTIVE_LOW;
159 141
160 if (of_property_read_u32(pp, "linux,code", &button->code)) { 142 if (fwnode_property_read_u32(child, "linux,code", &button->code)) {
161 dev_err(dev, "Button without keycode: 0x%x\n", 143 dev_err(dev, "Button without keycode: %d\n",
162 button->gpio); 144 pdata->nbuttons - 1);
145 fwnode_handle_put(child);
163 return ERR_PTR(-EINVAL); 146 return ERR_PTR(-EINVAL);
164 } 147 }
165 148
166 button->desc = of_get_property(pp, "label", NULL); 149 fwnode_property_read_string(child, "label", &button->desc);
167 150
168 if (of_property_read_u32(pp, "linux,input-type", &button->type)) 151 if (fwnode_property_read_u32(child, "linux,input-type",
152 &button->type))
169 button->type = EV_KEY; 153 button->type = EV_KEY;
170 154
171 button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL); 155 button->wakeup = fwnode_property_present(child, "gpio-key,wakeup");
172 156
173 if (of_property_read_u32(pp, "debounce-interval", 157 if (fwnode_property_read_u32(child, "debounce-interval",
174 &button->debounce_interval)) 158 &button->debounce_interval))
175 button->debounce_interval = 5; 159 button->debounce_interval = 5;
176 } 160 }
177 161
@@ -187,15 +171,6 @@ static const struct of_device_id gpio_keys_polled_of_match[] = {
187}; 171};
188MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match); 172MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
189 173
190#else
191
192static inline struct gpio_keys_platform_data *
193gpio_keys_polled_get_devtree_pdata(struct device *dev)
194{
195 return NULL;
196}
197#endif
198
199static int gpio_keys_polled_probe(struct platform_device *pdev) 174static int gpio_keys_polled_probe(struct platform_device *pdev)
200{ 175{
201 struct device *dev = &pdev->dev; 176 struct device *dev = &pdev->dev;
@@ -259,7 +234,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
259 for (i = 0; i < pdata->nbuttons; i++) { 234 for (i = 0; i < pdata->nbuttons; i++) {
260 struct gpio_keys_button *button = &pdata->buttons[i]; 235 struct gpio_keys_button *button = &pdata->buttons[i];
261 struct gpio_keys_button_data *bdata = &bdev->data[i]; 236 struct gpio_keys_button_data *bdata = &bdev->data[i];
262 unsigned int gpio = button->gpio;
263 unsigned int type = button->type ?: EV_KEY; 237 unsigned int type = button->type ?: EV_KEY;
264 238
265 if (button->wakeup) { 239 if (button->wakeup) {
@@ -267,15 +241,31 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
267 return -EINVAL; 241 return -EINVAL;
268 } 242 }
269 243
270 error = devm_gpio_request_one(&pdev->dev, gpio, GPIOF_IN, 244 /*
271 button->desc ? : DRV_NAME); 245 * Legacy GPIO number so request the GPIO here and
272 if (error) { 246 * convert it to descriptor.
273 dev_err(dev, "unable to claim gpio %u, err=%d\n", 247 */
274 gpio, error); 248 if (!button->gpiod && gpio_is_valid(button->gpio)) {
275 return error; 249 unsigned flags = 0;
250
251 if (button->active_low)
252 flags |= GPIOF_ACTIVE_LOW;
253
254 error = devm_gpio_request_one(&pdev->dev, button->gpio,
255 flags, button->desc ? : DRV_NAME);
256 if (error) {
257 dev_err(dev, "unable to claim gpio %u, err=%d\n",
258 button->gpio, error);
259 return error;
260 }
261
262 button->gpiod = gpio_to_desc(button->gpio);
276 } 263 }
277 264
278 bdata->can_sleep = gpio_cansleep(gpio); 265 if (IS_ERR(button->gpiod))
266 return PTR_ERR(button->gpiod);
267
268 bdata->can_sleep = gpiod_cansleep(button->gpiod);
279 bdata->last_state = -1; 269 bdata->last_state = -1;
280 bdata->threshold = DIV_ROUND_UP(button->debounce_interval, 270 bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
281 pdata->poll_interval); 271 pdata->poll_interval);
@@ -308,7 +298,7 @@ static struct platform_driver gpio_keys_polled_driver = {
308 .driver = { 298 .driver = {
309 .name = DRV_NAME, 299 .name = DRV_NAME,
310 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
311 .of_match_table = of_match_ptr(gpio_keys_polled_of_match), 301 .of_match_table = gpio_keys_polled_of_match,
312 }, 302 },
313}; 303};
314module_platform_driver(gpio_keys_polled_driver); 304module_platform_driver(gpio_keys_polled_driver);
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 5e80fbf7b5ed..c994e3bbd776 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -463,7 +463,7 @@ static int samsung_keypad_remove(struct platform_device *pdev)
463 return 0; 463 return 0;
464} 464}
465 465
466#ifdef CONFIG_PM_RUNTIME 466#ifdef CONFIG_PM
467static int samsung_keypad_runtime_suspend(struct device *dev) 467static int samsung_keypad_runtime_suspend(struct device *dev)
468{ 468{
469 struct platform_device *pdev = to_platform_device(dev); 469 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 3fcb6b3cb0bd..f2b978026407 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
428 int x, y; 428 int x, y;
429 u32 t; 429 u32 t;
430 430
431 if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
432 !tp_dev,
433 psmouse_fmt("Unexpected trackpoint message\n"))) {
434 if (etd->debug == 1)
435 elantech_packet_dump(psmouse);
436 return;
437 }
438
439 t = get_unaligned_le32(&packet[0]); 431 t = get_unaligned_le32(&packet[0]);
440 432
441 switch (t & ~7U) { 433 switch (t & ~7U) {
@@ -793,7 +785,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
793 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
794 bool sanity_check; 786 bool sanity_check;
795 787
796 if ((packet[3] & 0x0f) == 0x06) 788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
797 return PACKET_TRACKPOINT; 789 return PACKET_TRACKPOINT;
798 790
799 /* 791 /*
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2a7a9174c702..f9472920d986 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
143 (const char * const []){"LEN2001", NULL}, 143 (const char * const []){"LEN2001", NULL},
144 1024, 5022, 2508, 4832 144 1024, 5022, 2508, 4832
145 }, 145 },
146 {
147 (const char * const []){"LEN2006", NULL},
148 1264, 5675, 1171, 4688
149 },
146 { } 150 { }
147}; 151};
148 152
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index a035a390f8e2..568a3d340c8a 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1716,7 +1716,7 @@ static void cyttsp4_free_si_ptrs(struct cyttsp4 *cd)
1716 kfree(si->btn_rec_data); 1716 kfree(si->btn_rec_data);
1717} 1717}
1718 1718
1719#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) 1719#ifdef CONFIG_PM
1720static int cyttsp4_core_sleep(struct cyttsp4 *cd) 1720static int cyttsp4_core_sleep(struct cyttsp4 *cd)
1721{ 1721{
1722 int rc; 1722 int rc;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd5112265cc9..6dbfbc209491 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -163,14 +163,14 @@ config TEGRA_IOMMU_GART
163 hardware included on Tegra SoCs. 163 hardware included on Tegra SoCs.
164 164
165config TEGRA_IOMMU_SMMU 165config TEGRA_IOMMU_SMMU
166 bool "Tegra SMMU IOMMU Support" 166 bool "NVIDIA Tegra SMMU Support"
167 depends on ARCH_TEGRA && TEGRA_AHB 167 depends on ARCH_TEGRA
168 depends on TEGRA_AHB
169 depends on TEGRA_MC
168 select IOMMU_API 170 select IOMMU_API
169 help 171 help
170 Enables support for remapping discontiguous physical memory 172 This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
171 shared with the operating system into contiguous I/O virtual 173 SoCs (Tegra30 up to Tegra124).
172 space through the SMMU (System Memory Management Unit)
173 hardware included on Tegra SoCs.
174 174
175config EXYNOS_IOMMU 175config EXYNOS_IOMMU
176 bool "Exynos IOMMU Support" 176 bool "Exynos IOMMU Support"
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 505a9adac2d5..2d84c9edf3b8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3424,6 +3424,7 @@ static const struct iommu_ops amd_iommu_ops = {
3424 .detach_dev = amd_iommu_detach_device, 3424 .detach_dev = amd_iommu_detach_device,
3425 .map = amd_iommu_map, 3425 .map = amd_iommu_map,
3426 .unmap = amd_iommu_unmap, 3426 .unmap = amd_iommu_unmap,
3427 .map_sg = default_iommu_map_sg,
3427 .iova_to_phys = amd_iommu_iova_to_phys, 3428 .iova_to_phys = amd_iommu_iova_to_phys,
3428 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3429 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3429}; 3430};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 60558f794922..e393ae01b5d2 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1652,6 +1652,7 @@ static const struct iommu_ops arm_smmu_ops = {
1652 .detach_dev = arm_smmu_detach_dev, 1652 .detach_dev = arm_smmu_detach_dev,
1653 .map = arm_smmu_map, 1653 .map = arm_smmu_map,
1654 .unmap = arm_smmu_unmap, 1654 .unmap = arm_smmu_unmap,
1655 .map_sg = default_iommu_map_sg,
1655 .iova_to_phys = arm_smmu_iova_to_phys, 1656 .iova_to_phys = arm_smmu_iova_to_phys,
1656 .add_device = arm_smmu_add_device, 1657 .add_device = arm_smmu_add_device,
1657 .remove_device = arm_smmu_remove_device, 1658 .remove_device = arm_smmu_remove_device,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 74233186f6f7..28372b85d8da 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = {
1178 .detach_dev = exynos_iommu_detach_device, 1178 .detach_dev = exynos_iommu_detach_device,
1179 .map = exynos_iommu_map, 1179 .map = exynos_iommu_map,
1180 .unmap = exynos_iommu_unmap, 1180 .unmap = exynos_iommu_unmap,
1181 .map_sg = default_iommu_map_sg,
1181 .iova_to_phys = exynos_iommu_iova_to_phys, 1182 .iova_to_phys = exynos_iommu_iova_to_phys,
1182 .add_device = exynos_iommu_add_device, 1183 .add_device = exynos_iommu_add_device,
1183 .remove_device = exynos_iommu_remove_device, 1184 .remove_device = exynos_iommu_remove_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a27d6cb1a793..02cd26a17fe0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4467,6 +4467,7 @@ static const struct iommu_ops intel_iommu_ops = {
4467 .detach_dev = intel_iommu_detach_device, 4467 .detach_dev = intel_iommu_detach_device,
4468 .map = intel_iommu_map, 4468 .map = intel_iommu_map,
4469 .unmap = intel_iommu_unmap, 4469 .unmap = intel_iommu_unmap,
4470 .map_sg = default_iommu_map_sg,
4470 .iova_to_phys = intel_iommu_iova_to_phys, 4471 .iova_to_phys = intel_iommu_iova_to_phys,
4471 .add_device = intel_iommu_add_device, 4472 .add_device = intel_iommu_add_device,
4472 .remove_device = intel_iommu_remove_device, 4473 .remove_device = intel_iommu_remove_device,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed8b04867b1f..02e4313e937c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
818 kfree(nb); 818 kfree(nb);
819 return err; 819 return err;
820 } 820 }
821 return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); 821
822 err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
823 if (err) {
824 bus_unregister_notifier(bus, nb);
825 kfree(nb);
826 return err;
827 }
828
829 return 0;
822} 830}
823 831
824/** 832/**
@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
836 */ 844 */
837int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 845int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
838{ 846{
847 int err;
848
839 if (bus->iommu_ops != NULL) 849 if (bus->iommu_ops != NULL)
840 return -EBUSY; 850 return -EBUSY;
841 851
842 bus->iommu_ops = ops; 852 bus->iommu_ops = ops;
843 853
844 /* Do IOMMU specific setup for this bus-type */ 854 /* Do IOMMU specific setup for this bus-type */
845 return iommu_bus_init(bus, ops); 855 err = iommu_bus_init(bus, ops);
856 if (err)
857 bus->iommu_ops = NULL;
858
859 return err;
846} 860}
847EXPORT_SYMBOL_GPL(bus_set_iommu); 861EXPORT_SYMBOL_GPL(bus_set_iommu);
848 862
@@ -1124,6 +1138,38 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1124} 1138}
1125EXPORT_SYMBOL_GPL(iommu_unmap); 1139EXPORT_SYMBOL_GPL(iommu_unmap);
1126 1140
1141size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1142 struct scatterlist *sg, unsigned int nents, int prot)
1143{
1144 struct scatterlist *s;
1145 size_t mapped = 0;
1146 unsigned int i;
1147 int ret;
1148
1149 for_each_sg(sg, s, nents, i) {
1150 phys_addr_t phys = page_to_phys(sg_page(s));
1151
1152 /* We are mapping on page boundarys, so offset must be 0 */
1153 if (s->offset)
1154 goto out_err;
1155
1156 ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1157 if (ret)
1158 goto out_err;
1159
1160 mapped += s->length;
1161 }
1162
1163 return mapped;
1164
1165out_err:
1166 /* undo mappings already done */
1167 iommu_unmap(domain, iova, mapped);
1168
1169 return 0;
1170
1171}
1172EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1127 1173
1128int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 1174int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1129 phys_addr_t paddr, u64 size, int prot) 1175 phys_addr_t paddr, u64 size, int prot)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 7dab5cbcc775..e509c58eee92 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
1127 .detach_dev = ipmmu_detach_device, 1127 .detach_dev = ipmmu_detach_device,
1128 .map = ipmmu_map, 1128 .map = ipmmu_map,
1129 .unmap = ipmmu_unmap, 1129 .unmap = ipmmu_unmap,
1130 .map_sg = default_iommu_map_sg,
1130 .iova_to_phys = ipmmu_iova_to_phys, 1131 .iova_to_phys = ipmmu_iova_to_phys,
1131 .add_device = ipmmu_add_device, 1132 .add_device = ipmmu_add_device,
1132 .remove_device = ipmmu_remove_device, 1133 .remove_device = ipmmu_remove_device,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 74a1767c89b5..2c3f5ad01098 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
56 unsigned int irq; 56 unsigned int irq;
57 struct msi_desc *msidesc; 57 struct msi_desc *msidesc;
58 58
59 WARN_ON(!list_is_singular(&dev->msi_list));
60 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); 59 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
61 WARN_ON(msidesc->irq);
62 WARN_ON(msidesc->msi_attrib.multiple);
63 WARN_ON(msidesc->nvec_used);
64 60
65 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); 61 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
66 if (irq == 0) 62 if (irq == 0)
67 return -ENOSPC; 63 return -ENOSPC;
68 64
69 nvec_pow2 = __roundup_pow_of_two(nvec); 65 nvec_pow2 = __roundup_pow_of_two(nvec);
70 msidesc->nvec_used = nvec;
71 msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
72 for (sub_handle = 0; sub_handle < nvec; sub_handle++) { 66 for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
73 if (!sub_handle) { 67 if (!sub_handle) {
74 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); 68 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
@@ -96,8 +90,6 @@ error:
96 * IRQs from tearing down again in default_teardown_msi_irqs() 90 * IRQs from tearing down again in default_teardown_msi_irqs()
97 */ 91 */
98 msidesc->irq = 0; 92 msidesc->irq = 0;
99 msidesc->nvec_used = 0;
100 msidesc->msi_attrib.multiple = 0;
101 93
102 return ret; 94 return ret;
103} 95}
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6e3dcc289d59..1c7b78ecf3e3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = {
681 .detach_dev = msm_iommu_detach_dev, 681 .detach_dev = msm_iommu_detach_dev,
682 .map = msm_iommu_map, 682 .map = msm_iommu_map,
683 .unmap = msm_iommu_unmap, 683 .unmap = msm_iommu_unmap,
684 .map_sg = default_iommu_map_sg,
684 .iova_to_phys = msm_iommu_iova_to_phys, 685 .iova_to_phys = msm_iommu_iova_to_phys,
685 .pgsize_bitmap = MSM_IOMMU_PGSIZES, 686 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
686}; 687};
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 36278870e84a..18003c044454 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = {
1288 .detach_dev = omap_iommu_detach_dev, 1288 .detach_dev = omap_iommu_detach_dev,
1289 .map = omap_iommu_map, 1289 .map = omap_iommu_map,
1290 .unmap = omap_iommu_unmap, 1290 .unmap = omap_iommu_unmap,
1291 .map_sg = default_iommu_map_sg,
1291 .iova_to_phys = omap_iommu_iova_to_phys, 1292 .iova_to_phys = omap_iommu_iova_to_phys,
1292 .add_device = omap_iommu_add_device, 1293 .add_device = omap_iommu_add_device,
1293 .remove_device = omap_iommu_remove_device, 1294 .remove_device = omap_iommu_remove_device,
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index 1333e6fb3405..f1b00774e4de 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
361 .detach_dev = shmobile_iommu_detach_device, 361 .detach_dev = shmobile_iommu_detach_device,
362 .map = shmobile_iommu_map, 362 .map = shmobile_iommu_map,
363 .unmap = shmobile_iommu_unmap, 363 .unmap = shmobile_iommu_unmap,
364 .map_sg = default_iommu_map_sg,
364 .iova_to_phys = shmobile_iommu_iova_to_phys, 365 .iova_to_phys = shmobile_iommu_iova_to_phys,
365 .add_device = shmobile_iommu_add_device, 366 .add_device = shmobile_iommu_add_device,
366 .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, 367 .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3afdf43f732a..6e134c7c227f 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,1295 +1,732 @@
1/* 1/*
2 * IOMMU API for SMMU in Tegra30 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. 4 * This program is free software; you can redistribute it and/or modify
5 * 5 * it under the terms of the GNU General Public License version 2 as
6 * This program is free software; you can redistribute it and/or modify it 6 * published by the Free Software Foundation.
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 7 */
19 8
20#define pr_fmt(fmt) "%s(): " fmt, __func__
21
22#include <linux/err.h> 9#include <linux/err.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/device.h>
31#include <linux/sched.h>
32#include <linux/iommu.h> 10#include <linux/iommu.h>
33#include <linux/io.h> 11#include <linux/kernel.h>
34#include <linux/of.h> 12#include <linux/of.h>
35#include <linux/of_iommu.h> 13#include <linux/of_device.h>
36#include <linux/debugfs.h> 14#include <linux/platform_device.h>
37#include <linux/seq_file.h> 15#include <linux/slab.h>
38 16
39#include <soc/tegra/ahb.h> 17#include <soc/tegra/ahb.h>
18#include <soc/tegra/mc.h>
40 19
41#include <asm/page.h> 20struct tegra_smmu {
42#include <asm/cacheflush.h> 21 void __iomem *regs;
43 22 struct device *dev;
44enum smmu_hwgrp {
45 HWGRP_AFI,
46 HWGRP_AVPC,
47 HWGRP_DC,
48 HWGRP_DCB,
49 HWGRP_EPP,
50 HWGRP_G2,
51 HWGRP_HC,
52 HWGRP_HDA,
53 HWGRP_ISP,
54 HWGRP_MPE,
55 HWGRP_NV,
56 HWGRP_NV2,
57 HWGRP_PPCS,
58 HWGRP_SATA,
59 HWGRP_VDE,
60 HWGRP_VI,
61
62 HWGRP_COUNT,
63
64 HWGRP_END = ~0,
65};
66 23
67#define HWG_AFI (1 << HWGRP_AFI) 24 struct tegra_mc *mc;
68#define HWG_AVPC (1 << HWGRP_AVPC) 25 const struct tegra_smmu_soc *soc;
69#define HWG_DC (1 << HWGRP_DC)
70#define HWG_DCB (1 << HWGRP_DCB)
71#define HWG_EPP (1 << HWGRP_EPP)
72#define HWG_G2 (1 << HWGRP_G2)
73#define HWG_HC (1 << HWGRP_HC)
74#define HWG_HDA (1 << HWGRP_HDA)
75#define HWG_ISP (1 << HWGRP_ISP)
76#define HWG_MPE (1 << HWGRP_MPE)
77#define HWG_NV (1 << HWGRP_NV)
78#define HWG_NV2 (1 << HWGRP_NV2)
79#define HWG_PPCS (1 << HWGRP_PPCS)
80#define HWG_SATA (1 << HWGRP_SATA)
81#define HWG_VDE (1 << HWGRP_VDE)
82#define HWG_VI (1 << HWGRP_VI)
83
84/* bitmap of the page sizes currently supported */
85#define SMMU_IOMMU_PGSIZES (SZ_4K)
86
87#define SMMU_CONFIG 0x10
88#define SMMU_CONFIG_DISABLE 0
89#define SMMU_CONFIG_ENABLE 1
90
91/* REVISIT: To support multiple MCs */
92enum {
93 _MC = 0,
94};
95 26
96enum { 27 unsigned long *asids;
97 _TLB = 0, 28 struct mutex lock;
98 _PTC,
99};
100 29
101#define SMMU_CACHE_CONFIG_BASE 0x14 30 struct list_head list;
102#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
103#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
104
105#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
106#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
107#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
108#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
109
110#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
111#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
112#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
113
114#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
115#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
116#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
117
118#define SMMU_PTB_ASID 0x1c
119#define SMMU_PTB_ASID_CURRENT_SHIFT 0
120
121#define SMMU_PTB_DATA 0x20
122#define SMMU_PTB_DATA_RESET_VAL 0
123#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29
124#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30
125#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31
126
127#define SMMU_TLB_FLUSH 0x30
128#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0
129#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2
130#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3
131#define SMMU_TLB_FLUSH_ASID_SHIFT 29
132#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0
133#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1
134#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31
135
136#define SMMU_PTC_FLUSH 0x34
137#define SMMU_PTC_FLUSH_TYPE_ALL 0
138#define SMMU_PTC_FLUSH_TYPE_ADR 1
139#define SMMU_PTC_FLUSH_ADR_SHIFT 4
140
141#define SMMU_ASID_SECURITY 0x38
142
143#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
144
145#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
146 (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
147
148#define SMMU_TRANSLATION_ENABLE_0 0x228
149#define SMMU_TRANSLATION_ENABLE_1 0x22c
150#define SMMU_TRANSLATION_ENABLE_2 0x230
151
152#define SMMU_AFI_ASID 0x238 /* PCIE */
153#define SMMU_AVPC_ASID 0x23c /* AVP */
154#define SMMU_DC_ASID 0x240 /* Display controller */
155#define SMMU_DCB_ASID 0x244 /* Display controller B */
156#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */
157#define SMMU_G2_ASID 0x24c /* 2D engine */
158#define SMMU_HC_ASID 0x250 /* Host1x */
159#define SMMU_HDA_ASID 0x254 /* High-def audio */
160#define SMMU_ISP_ASID 0x258 /* Image signal processor */
161#define SMMU_MPE_ASID 0x264 /* MPEG encoder */
162#define SMMU_NV_ASID 0x268 /* (3D) */
163#define SMMU_NV2_ASID 0x26c /* (3D) */
164#define SMMU_PPCS_ASID 0x270 /* AHB */
165#define SMMU_SATA_ASID 0x278 /* SATA */
166#define SMMU_VDE_ASID 0x27c /* Video decoder */
167#define SMMU_VI_ASID 0x280 /* Video input */
168
169#define SMMU_PDE_NEXT_SHIFT 28
170
171#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
172#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
173#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
174#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
175#define SMMU_TLB_FLUSH_VA(iova, which) \
176 ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
177 SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \
178 SMMU_TLB_FLUSH_VA_MATCH_##which)
179#define SMMU_PTB_ASID_CUR(n) \
180 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
181#define SMMU_TLB_FLUSH_ASID_MATCH_disable \
182 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \
183 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
184#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \
185 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \
186 SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
187
188#define SMMU_PAGE_SHIFT 12
189#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
190#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
191
192#define SMMU_PDIR_COUNT 1024
193#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
194#define SMMU_PTBL_COUNT 1024
195#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
196#define SMMU_PDIR_SHIFT 12
197#define SMMU_PDE_SHIFT 12
198#define SMMU_PTE_SHIFT 12
199#define SMMU_PFN_MASK 0x000fffff
200
201#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
202#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
203#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
204
205#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
206#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
207#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
208#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
209#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
210
211#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
212
213#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
214#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
215#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
216
217#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
218#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
219
220#define SMMU_MK_PDIR(page, attr) \
221 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
222#define SMMU_MK_PDE(page, attr) \
223 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
224#define SMMU_EX_PTBL_PAGE(pde) \
225 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
226#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
227
228#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
229#define SMMU_ASID_DISABLE 0
230#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
231
232#define NUM_SMMU_REG_BANKS 3
233
234#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
235#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
236#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
237#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0)
238
239#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID
240
241static const u32 smmu_hwgrp_asid_reg[] = {
242 HWGRP_INIT(AFI),
243 HWGRP_INIT(AVPC),
244 HWGRP_INIT(DC),
245 HWGRP_INIT(DCB),
246 HWGRP_INIT(EPP),
247 HWGRP_INIT(G2),
248 HWGRP_INIT(HC),
249 HWGRP_INIT(HDA),
250 HWGRP_INIT(ISP),
251 HWGRP_INIT(MPE),
252 HWGRP_INIT(NV),
253 HWGRP_INIT(NV2),
254 HWGRP_INIT(PPCS),
255 HWGRP_INIT(SATA),
256 HWGRP_INIT(VDE),
257 HWGRP_INIT(VI),
258}; 31};
259#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x])
260 32
261/* 33struct tegra_smmu_as {
262 * Per client for address space 34 struct iommu_domain *domain;
263 */ 35 struct tegra_smmu *smmu;
264struct smmu_client { 36 unsigned int use_count;
265 struct device *dev; 37 struct page *count;
266 struct list_head list; 38 struct page *pd;
267 struct smmu_as *as; 39 unsigned id;
268 u32 hwgrp; 40 u32 attr;
269}; 41};
270 42
271/* 43static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
272 * Per address space 44 unsigned long offset)
273 */ 45{
274struct smmu_as { 46 writel(value, smmu->regs + offset);
275 struct smmu_device *smmu; /* back pointer to container */ 47}
276 unsigned int asid;
277 spinlock_t lock; /* for pagetable */
278 struct page *pdir_page;
279 unsigned long pdir_attr;
280 unsigned long pde_attr;
281 unsigned long pte_attr;
282 unsigned int *pte_count;
283
284 struct list_head client;
285 spinlock_t client_lock; /* for client list */
286};
287 48
288struct smmu_debugfs_info { 49static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
289 struct smmu_device *smmu; 50{
290 int mc; 51 return readl(smmu->regs + offset);
291 int cache; 52}
292};
293 53
294/* 54#define SMMU_CONFIG 0x010
295 * Per SMMU device - IOMMU device 55#define SMMU_CONFIG_ENABLE (1 << 0)
296 */
297struct smmu_device {
298 void __iomem *regbase; /* register offset base */
299 void __iomem **regs; /* register block start address array */
300 void __iomem **rege; /* register block end address array */
301 int nregs; /* number of register blocks */
302
303 unsigned long iovmm_base; /* remappable base address */
304 unsigned long page_count; /* total remappable size */
305 spinlock_t lock;
306 char *name;
307 struct device *dev;
308 struct page *avp_vector_page; /* dummy page shared by all AS's */
309 56
310 /* 57#define SMMU_TLB_CONFIG 0x14
311 * Register image savers for suspend/resume 58#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
312 */ 59#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
313 unsigned long translation_enable_0; 60#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
314 unsigned long translation_enable_1;
315 unsigned long translation_enable_2;
316 unsigned long asid_security;
317 61
318 struct dentry *debugfs_root; 62#define SMMU_PTC_CONFIG 0x18
319 struct smmu_debugfs_info *debugfs_info; 63#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
64#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
65#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
320 66
321 struct device_node *ahb; 67#define SMMU_PTB_ASID 0x01c
68#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
322 69
323 int num_as; 70#define SMMU_PTB_DATA 0x020
324 struct smmu_as as[0]; /* Run-time allocated array */ 71#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
325};
326 72
327static struct smmu_device *smmu_handle; /* unique for a system */ 73#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
328 74
329/* 75#define SMMU_TLB_FLUSH 0x030
330 * SMMU register accessors 76#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
331 */ 77#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
332static bool inline smmu_valid_reg(struct smmu_device *smmu, 78#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
333 void __iomem *addr) 79#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
334{ 80#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
335 int i; 81 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
82#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
83 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
84#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
336 85
337 for (i = 0; i < smmu->nregs; i++) { 86#define SMMU_PTC_FLUSH 0x034
338 if (addr < smmu->regs[i]) 87#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
339 break; 88#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
340 if (addr <= smmu->rege[i])
341 return true;
342 }
343 89
344 return false; 90#define SMMU_PTC_FLUSH_HI 0x9b8
345} 91#define SMMU_PTC_FLUSH_HI_MASK 0x3
346 92
347static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) 93/* per-SWGROUP SMMU_*_ASID register */
348{ 94#define SMMU_ASID_ENABLE (1 << 31)
349 void __iomem *addr = smmu->regbase + offs; 95#define SMMU_ASID_MASK 0x7f
96#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
350 97
351 BUG_ON(!smmu_valid_reg(smmu, addr)); 98/* page table definitions */
99#define SMMU_NUM_PDE 1024
100#define SMMU_NUM_PTE 1024
352 101
353 return readl(addr); 102#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
354} 103#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
355 104
356static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) 105#define SMMU_PDE_SHIFT 22
357{ 106#define SMMU_PTE_SHIFT 12
358 void __iomem *addr = smmu->regbase + offs;
359 107
360 BUG_ON(!smmu_valid_reg(smmu, addr)); 108#define SMMU_PFN_MASK 0x000fffff
361 109
362 writel(val, addr); 110#define SMMU_PD_READABLE (1 << 31)
363} 111#define SMMU_PD_WRITABLE (1 << 30)
112#define SMMU_PD_NONSECURE (1 << 29)
364 113
365#define VA_PAGE_TO_PA(va, page) \ 114#define SMMU_PDE_READABLE (1 << 31)
366 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) 115#define SMMU_PDE_WRITABLE (1 << 30)
116#define SMMU_PDE_NONSECURE (1 << 29)
117#define SMMU_PDE_NEXT (1 << 28)
367 118
368#define FLUSH_CPU_DCACHE(va, page, size) \ 119#define SMMU_PTE_READABLE (1 << 31)
369 do { \ 120#define SMMU_PTE_WRITABLE (1 << 30)
370 unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ 121#define SMMU_PTE_NONSECURE (1 << 29)
371 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
372 outer_flush_range(_pa_, _pa_+(size_t)(size)); \
373 } while (0)
374 122
375/* 123#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
376 * Any interaction between any block on PPSB and a block on APB or AHB 124 SMMU_PDE_NONSECURE)
377 * must have these read-back barriers to ensure the APB/AHB bus 125#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
378 * transaction is complete before initiating activity on the PPSB 126 SMMU_PTE_NONSECURE)
379 * block.
380 */
381#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG)
382 127
383#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) 128static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
384 129 unsigned long offset)
385static int __smmu_client_set_hwgrp(struct smmu_client *c,
386 unsigned long map, int on)
387{ 130{
388 int i; 131 phys_addr_t phys = page ? page_to_phys(page) : 0;
389 struct smmu_as *as = c->as; 132 u32 value;
390 u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); 133
391 struct smmu_device *smmu = as->smmu; 134 if (page) {
392 135 offset &= ~(smmu->mc->soc->atom_size - 1);
393 WARN_ON(!on && map); 136
394 if (on && !map) 137 if (smmu->mc->soc->num_address_bits > 32) {
395 return -EINVAL; 138#ifdef CONFIG_PHYS_ADDR_T_64BIT
396 if (!on) 139 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
397 map = smmu_client_hwgrp(c); 140#else
398 141 value = 0;
399 for_each_set_bit(i, &map, HWGRP_COUNT) { 142#endif
400 offs = HWGRP_ASID_REG(i); 143 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
401 val = smmu_read(smmu, offs);
402 if (on) {
403 if (WARN_ON(val & mask))
404 goto err_hw_busy;
405 val |= mask;
406 } else {
407 WARN_ON((val & mask) == mask);
408 val &= ~mask;
409 } 144 }
410 smmu_write(smmu, val, offs);
411 }
412 FLUSH_SMMU_REGS(smmu);
413 c->hwgrp = map;
414 return 0;
415 145
416err_hw_busy: 146 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
417 for_each_set_bit(i, &map, HWGRP_COUNT) { 147 } else {
418 offs = HWGRP_ASID_REG(i); 148 value = SMMU_PTC_FLUSH_TYPE_ALL;
419 val = smmu_read(smmu, offs);
420 val &= ~mask;
421 smmu_write(smmu, val, offs);
422 } 149 }
423 return -EBUSY; 150
151 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
424} 152}
425 153
426static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) 154static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
427{ 155{
428 u32 val; 156 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
429 unsigned long flags;
430 struct smmu_as *as = c->as;
431 struct smmu_device *smmu = as->smmu;
432
433 spin_lock_irqsave(&smmu->lock, flags);
434 val = __smmu_client_set_hwgrp(c, map, on);
435 spin_unlock_irqrestore(&smmu->lock, flags);
436 return val;
437} 157}
438 158
439/* 159static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
440 * Flush all TLB entries and all PTC entries 160 unsigned long asid)
441 * Caller must lock smmu
442 */
443static void smmu_flush_regs(struct smmu_device *smmu, int enable)
444{ 161{
445 u32 val; 162 u32 value;
446
447 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
448 FLUSH_SMMU_REGS(smmu);
449 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
450 SMMU_TLB_FLUSH_ASID_MATCH_disable;
451 smmu_write(smmu, val, SMMU_TLB_FLUSH);
452 163
453 if (enable) 164 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
454 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 165 SMMU_TLB_FLUSH_VA_MATCH_ALL;
455 FLUSH_SMMU_REGS(smmu); 166 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
456} 167}
457 168
458static int smmu_setup_regs(struct smmu_device *smmu) 169static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
170 unsigned long asid,
171 unsigned long iova)
459{ 172{
460 int i; 173 u32 value;
461 u32 val;
462 174
463 for (i = 0; i < smmu->num_as; i++) { 175 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
464 struct smmu_as *as = &smmu->as[i]; 176 SMMU_TLB_FLUSH_VA_SECTION(iova);
465 struct smmu_client *c; 177 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
466
467 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
468 val = as->pdir_page ?
469 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
470 SMMU_PTB_DATA_RESET_VAL;
471 smmu_write(smmu, val, SMMU_PTB_DATA);
472
473 list_for_each_entry(c, &as->client, list)
474 __smmu_client_set_hwgrp(c, c->hwgrp, 1);
475 }
476
477 smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0);
478 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
479 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
480 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
481 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
482 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
483
484 smmu_flush_regs(smmu, 1);
485
486 return tegra_ahb_enable_smmu(smmu->ahb);
487} 178}
488 179
489static void flush_ptc_and_tlb(struct smmu_device *smmu, 180static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
490 struct smmu_as *as, dma_addr_t iova, 181 unsigned long asid,
491 unsigned long *pte, struct page *page, int is_pde) 182 unsigned long iova)
492{ 183{
493 u32 val; 184 u32 value;
494 unsigned long tlb_flush_va = is_pde
495 ? SMMU_TLB_FLUSH_VA(iova, SECTION)
496 : SMMU_TLB_FLUSH_VA(iova, GROUP);
497
498 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
499 smmu_write(smmu, val, SMMU_PTC_FLUSH);
500 FLUSH_SMMU_REGS(smmu);
501 val = tlb_flush_va |
502 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
503 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
504 smmu_write(smmu, val, SMMU_TLB_FLUSH);
505 FLUSH_SMMU_REGS(smmu);
506}
507 185
508static void free_ptbl(struct smmu_as *as, dma_addr_t iova) 186 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
509{ 187 SMMU_TLB_FLUSH_VA_GROUP(iova);
510 unsigned long pdn = SMMU_ADDR_TO_PDN(iova); 188 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
511 unsigned long *pdir = (unsigned long *)page_address(as->pdir_page);
512
513 if (pdir[pdn] != _PDE_VACANT(pdn)) {
514 dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn);
515
516 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
517 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
518 pdir[pdn] = _PDE_VACANT(pdn);
519 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
520 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
521 as->pdir_page, 1);
522 }
523} 189}
524 190
525static void free_pdir(struct smmu_as *as) 191static inline void smmu_flush(struct tegra_smmu *smmu)
526{ 192{
527 unsigned addr; 193 smmu_readl(smmu, SMMU_CONFIG);
528 int count;
529 struct device *dev = as->smmu->dev;
530
531 if (!as->pdir_page)
532 return;
533
534 addr = as->smmu->iovmm_base;
535 count = as->smmu->page_count;
536 while (count-- > 0) {
537 free_ptbl(as, addr);
538 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
539 }
540 ClearPageReserved(as->pdir_page);
541 __free_page(as->pdir_page);
542 as->pdir_page = NULL;
543 devm_kfree(dev, as->pte_count);
544 as->pte_count = NULL;
545} 194}
546 195
547/* 196static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
548 * Maps PTBL for given iova and returns the PTE address
549 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
550 */
551static unsigned long *locate_pte(struct smmu_as *as,
552 dma_addr_t iova, bool allocate,
553 struct page **ptbl_page_p,
554 unsigned int **count)
555{ 197{
556 unsigned long ptn = SMMU_ADDR_TO_PFN(iova); 198 unsigned long id;
557 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
558 unsigned long *pdir = page_address(as->pdir_page);
559 unsigned long *ptbl;
560
561 if (pdir[pdn] != _PDE_VACANT(pdn)) {
562 /* Mapped entry table already exists */
563 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
564 ptbl = page_address(*ptbl_page_p);
565 } else if (!allocate) {
566 return NULL;
567 } else {
568 int pn;
569 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
570 199
571 /* Vacant - allocate a new page table */ 200 mutex_lock(&smmu->lock);
572 dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn);
573 201
574 *ptbl_page_p = alloc_page(GFP_ATOMIC); 202 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
575 if (!*ptbl_page_p) { 203 if (id >= smmu->soc->num_asids) {
576 dev_err(as->smmu->dev, 204 mutex_unlock(&smmu->lock);
577 "failed to allocate smmu_device page table\n"); 205 return -ENOSPC;
578 return NULL;
579 }
580 SetPageReserved(*ptbl_page_p);
581 ptbl = (unsigned long *)page_address(*ptbl_page_p);
582 for (pn = 0; pn < SMMU_PTBL_COUNT;
583 pn++, addr += SMMU_PAGE_SIZE) {
584 ptbl[pn] = _PTE_VACANT(addr);
585 }
586 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
587 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
588 as->pde_attr | _PDE_NEXT);
589 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
590 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
591 as->pdir_page, 1);
592 } 206 }
593 *count = &as->pte_count[pdn];
594 207
595 return &ptbl[ptn % SMMU_PTBL_COUNT]; 208 set_bit(id, smmu->asids);
209 *idp = id;
210
211 mutex_unlock(&smmu->lock);
212 return 0;
596} 213}
597 214
598#ifdef CONFIG_SMMU_SIG_DEBUG 215static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
599static void put_signature(struct smmu_as *as,
600 dma_addr_t iova, unsigned long pfn)
601{ 216{
602 struct page *page; 217 mutex_lock(&smmu->lock);
603 unsigned long *vaddr; 218 clear_bit(id, smmu->asids);
604 219 mutex_unlock(&smmu->lock);
605 page = pfn_to_page(pfn);
606 vaddr = page_address(page);
607 if (!vaddr)
608 return;
609
610 vaddr[0] = iova;
611 vaddr[1] = pfn << PAGE_SHIFT;
612 FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
613} 220}
614#else 221
615static inline void put_signature(struct smmu_as *as, 222static bool tegra_smmu_capable(enum iommu_cap cap)
616 unsigned long addr, unsigned long pfn)
617{ 223{
224 return false;
618} 225}
619#endif
620 226
621/* 227static int tegra_smmu_domain_init(struct iommu_domain *domain)
622 * Caller must not hold as->lock
623 */
624static int alloc_pdir(struct smmu_as *as)
625{ 228{
626 unsigned long *pdir, flags; 229 struct tegra_smmu_as *as;
627 int pdn, err = 0; 230 unsigned int i;
628 u32 val; 231 uint32_t *pd;
629 struct smmu_device *smmu = as->smmu;
630 struct page *page;
631 unsigned int *cnt;
632 232
633 /* 233 as = kzalloc(sizeof(*as), GFP_KERNEL);
634 * do the allocation, then grab as->lock 234 if (!as)
635 */ 235 return -ENOMEM;
636 cnt = devm_kzalloc(smmu->dev,
637 sizeof(cnt[0]) * SMMU_PDIR_COUNT,
638 GFP_KERNEL);
639 page = alloc_page(GFP_KERNEL | __GFP_DMA);
640 236
641 spin_lock_irqsave(&as->lock, flags); 237 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
238 as->domain = domain;
642 239
643 if (as->pdir_page) { 240 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
644 /* We raced, free the redundant */ 241 if (!as->pd) {
645 err = -EAGAIN; 242 kfree(as);
646 goto err_out; 243 return -ENOMEM;
647 } 244 }
648 245
649 if (!page || !cnt) { 246 as->count = alloc_page(GFP_KERNEL);
650 dev_err(smmu->dev, "failed to allocate at %s\n", __func__); 247 if (!as->count) {
651 err = -ENOMEM; 248 __free_page(as->pd);
652 goto err_out; 249 kfree(as);
250 return -ENOMEM;
653 } 251 }
654 252
655 as->pdir_page = page; 253 /* clear PDEs */
656 as->pte_count = cnt; 254 pd = page_address(as->pd);
255 SetPageReserved(as->pd);
657 256
658 SetPageReserved(as->pdir_page); 257 for (i = 0; i < SMMU_NUM_PDE; i++)
659 pdir = page_address(as->pdir_page); 258 pd[i] = 0;
660 259
661 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) 260 /* clear PDE usage counters */
662 pdir[pdn] = _PDE_VACANT(pdn); 261 pd = page_address(as->count);
663 FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); 262 SetPageReserved(as->count);
664 val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page);
665 smmu_write(smmu, val, SMMU_PTC_FLUSH);
666 FLUSH_SMMU_REGS(as->smmu);
667 val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
668 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
669 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT);
670 smmu_write(smmu, val, SMMU_TLB_FLUSH);
671 FLUSH_SMMU_REGS(as->smmu);
672 263
673 spin_unlock_irqrestore(&as->lock, flags); 264 for (i = 0; i < SMMU_NUM_PDE; i++)
674 265 pd[i] = 0;
675 return 0;
676 266
677err_out: 267 domain->priv = as;
678 spin_unlock_irqrestore(&as->lock, flags);
679 268
680 devm_kfree(smmu->dev, cnt); 269 return 0;
681 if (page)
682 __free_page(page);
683 return err;
684} 270}
685 271
686static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) 272static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
687{ 273{
688 unsigned long *pte; 274 struct tegra_smmu_as *as = domain->priv;
689 struct page *page;
690 unsigned int *count;
691 275
692 pte = locate_pte(as, iova, false, &page, &count); 276 /* TODO: free page directory and page tables */
693 if (WARN_ON(!pte)) 277 ClearPageReserved(as->pd);
694 return;
695 278
696 if (WARN_ON(*pte == _PTE_VACANT(iova))) 279 kfree(as);
697 return;
698
699 *pte = _PTE_VACANT(iova);
700 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
701 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
702 if (!--(*count))
703 free_ptbl(as, iova);
704} 280}
705 281
706static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, 282static const struct tegra_smmu_swgroup *
707 unsigned long pfn) 283tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
708{ 284{
709 struct smmu_device *smmu = as->smmu; 285 const struct tegra_smmu_swgroup *group = NULL;
710 unsigned long *pte; 286 unsigned int i;
711 unsigned int *count;
712 struct page *page;
713 287
714 pte = locate_pte(as, iova, true, &page, &count); 288 for (i = 0; i < smmu->soc->num_swgroups; i++) {
715 if (WARN_ON(!pte)) 289 if (smmu->soc->swgroups[i].swgroup == swgroup) {
716 return; 290 group = &smmu->soc->swgroups[i];
291 break;
292 }
293 }
717 294
718 if (*pte == _PTE_VACANT(iova)) 295 return group;
719 (*count)++;
720 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
721 if (unlikely((*pte == _PTE_VACANT(iova))))
722 (*count)--;
723 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
724 flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
725 put_signature(as, iova, pfn);
726} 296}
727 297
728static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, 298static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
729 phys_addr_t pa, size_t bytes, int prot) 299 unsigned int asid)
730{ 300{
731 struct smmu_as *as = domain->priv; 301 const struct tegra_smmu_swgroup *group;
732 unsigned long pfn = __phys_to_pfn(pa); 302 unsigned int i;
733 unsigned long flags; 303 u32 value;
734 304
735 dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa); 305 for (i = 0; i < smmu->soc->num_clients; i++) {
306 const struct tegra_mc_client *client = &smmu->soc->clients[i];
736 307
737 if (!pfn_valid(pfn)) 308 if (client->swgroup != swgroup)
738 return -ENOMEM; 309 continue;
739
740 spin_lock_irqsave(&as->lock, flags);
741 __smmu_iommu_map_pfn(as, iova, pfn);
742 spin_unlock_irqrestore(&as->lock, flags);
743 return 0;
744}
745
746static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
747 size_t bytes)
748{
749 struct smmu_as *as = domain->priv;
750 unsigned long flags;
751 310
752 dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); 311 value = smmu_readl(smmu, client->smmu.reg);
312 value |= BIT(client->smmu.bit);
313 smmu_writel(smmu, value, client->smmu.reg);
314 }
753 315
754 spin_lock_irqsave(&as->lock, flags); 316 group = tegra_smmu_find_swgroup(smmu, swgroup);
755 __smmu_iommu_unmap(as, iova); 317 if (group) {
756 spin_unlock_irqrestore(&as->lock, flags); 318 value = smmu_readl(smmu, group->reg);
757 return SMMU_PAGE_SIZE; 319 value &= ~SMMU_ASID_MASK;
320 value |= SMMU_ASID_VALUE(asid);
321 value |= SMMU_ASID_ENABLE;
322 smmu_writel(smmu, value, group->reg);
323 }
758} 324}
759 325
760static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, 326static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
761 dma_addr_t iova) 327 unsigned int asid)
762{ 328{
763 struct smmu_as *as = domain->priv; 329 const struct tegra_smmu_swgroup *group;
764 unsigned long *pte; 330 unsigned int i;
765 unsigned int *count; 331 u32 value;
766 struct page *page;
767 unsigned long pfn;
768 unsigned long flags;
769 332
770 spin_lock_irqsave(&as->lock, flags); 333 group = tegra_smmu_find_swgroup(smmu, swgroup);
334 if (group) {
335 value = smmu_readl(smmu, group->reg);
336 value &= ~SMMU_ASID_MASK;
337 value |= SMMU_ASID_VALUE(asid);
338 value &= ~SMMU_ASID_ENABLE;
339 smmu_writel(smmu, value, group->reg);
340 }
771 341
772 pte = locate_pte(as, iova, true, &page, &count); 342 for (i = 0; i < smmu->soc->num_clients; i++) {
773 pfn = *pte & SMMU_PFN_MASK; 343 const struct tegra_mc_client *client = &smmu->soc->clients[i];
774 WARN_ON(!pfn_valid(pfn));
775 dev_dbg(as->smmu->dev,
776 "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova,
777 pfn, as->asid);
778 344
779 spin_unlock_irqrestore(&as->lock, flags); 345 if (client->swgroup != swgroup)
780 return PFN_PHYS(pfn); 346 continue;
781}
782 347
783static bool smmu_iommu_capable(enum iommu_cap cap) 348 value = smmu_readl(smmu, client->smmu.reg);
784{ 349 value &= ~BIT(client->smmu.bit);
785 return false; 350 smmu_writel(smmu, value, client->smmu.reg);
351 }
786} 352}
787 353
788static int smmu_iommu_attach_dev(struct iommu_domain *domain, 354static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
789 struct device *dev) 355 struct tegra_smmu_as *as)
790{ 356{
791 struct smmu_as *as = domain->priv; 357 u32 value;
792 struct smmu_device *smmu = as->smmu;
793 struct smmu_client *client, *c;
794 u32 map;
795 int err; 358 int err;
796 359
797 client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); 360 if (as->use_count > 0) {
798 if (!client) 361 as->use_count++;
799 return -ENOMEM; 362 return 0;
800 client->dev = dev;
801 client->as = as;
802 map = (unsigned long)dev->platform_data;
803 if (!map)
804 return -EINVAL;
805
806 err = smmu_client_enable_hwgrp(client, map);
807 if (err)
808 goto err_hwgrp;
809
810 spin_lock(&as->client_lock);
811 list_for_each_entry(c, &as->client, list) {
812 if (c->dev == dev) {
813 dev_err(smmu->dev,
814 "%s is already attached\n", dev_name(c->dev));
815 err = -EINVAL;
816 goto err_client;
817 }
818 } 363 }
819 list_add(&client->list, &as->client);
820 spin_unlock(&as->client_lock);
821 364
822 /* 365 err = tegra_smmu_alloc_asid(smmu, &as->id);
823 * Reserve "page zero" for AVP vectors using a common dummy 366 if (err < 0)
824 * page. 367 return err;
825 */
826 if (map & HWG_AVPC) {
827 struct page *page;
828 368
829 page = as->smmu->avp_vector_page; 369 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
830 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); 370 smmu_flush_ptc(smmu, as->pd, 0);
371 smmu_flush_tlb_asid(smmu, as->id);
831 372
832 pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); 373 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
833 } 374 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
375 smmu_writel(smmu, value, SMMU_PTB_DATA);
376 smmu_flush(smmu);
834 377
835 dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev)); 378 as->smmu = smmu;
836 return 0; 379 as->use_count++;
837 380
838err_client: 381 return 0;
839 smmu_client_disable_hwgrp(client);
840 spin_unlock(&as->client_lock);
841err_hwgrp:
842 devm_kfree(smmu->dev, client);
843 return err;
844} 382}
845 383
846static void smmu_iommu_detach_dev(struct iommu_domain *domain, 384static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
847 struct device *dev) 385 struct tegra_smmu_as *as)
848{ 386{
849 struct smmu_as *as = domain->priv; 387 if (--as->use_count > 0)
850 struct smmu_device *smmu = as->smmu; 388 return;
851 struct smmu_client *c; 389
852 390 tegra_smmu_free_asid(smmu, as->id);
853 spin_lock(&as->client_lock); 391 as->smmu = NULL;
854
855 list_for_each_entry(c, &as->client, list) {
856 if (c->dev == dev) {
857 smmu_client_disable_hwgrp(c);
858 list_del(&c->list);
859 devm_kfree(smmu->dev, c);
860 c->as = NULL;
861 dev_dbg(smmu->dev,
862 "%s is detached\n", dev_name(c->dev));
863 goto out;
864 }
865 }
866 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
867out:
868 spin_unlock(&as->client_lock);
869} 392}
870 393
871static int smmu_iommu_domain_init(struct iommu_domain *domain) 394static int tegra_smmu_attach_dev(struct iommu_domain *domain,
395 struct device *dev)
872{ 396{
873 int i, err = -EAGAIN; 397 struct tegra_smmu *smmu = dev->archdata.iommu;
874 unsigned long flags; 398 struct tegra_smmu_as *as = domain->priv;
875 struct smmu_as *as; 399 struct device_node *np = dev->of_node;
876 struct smmu_device *smmu = smmu_handle; 400 struct of_phandle_args args;
401 unsigned int index = 0;
402 int err = 0;
877 403
878 /* Look for a free AS with lock held */ 404 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
879 for (i = 0; i < smmu->num_as; i++) { 405 &args)) {
880 as = &smmu->as[i]; 406 unsigned int swgroup = args.args[0];
881 407
882 if (as->pdir_page) 408 if (args.np != smmu->dev->of_node) {
409 of_node_put(args.np);
883 continue; 410 continue;
411 }
884 412
885 err = alloc_pdir(as); 413 of_node_put(args.np);
886 if (!err)
887 goto found;
888 414
889 if (err != -EAGAIN) 415 err = tegra_smmu_as_prepare(smmu, as);
890 break; 416 if (err < 0)
417 return err;
418
419 tegra_smmu_enable(smmu, swgroup, as->id);
420 index++;
891 } 421 }
892 if (i == smmu->num_as)
893 dev_err(smmu->dev, "no free AS\n");
894 return err;
895 422
896found: 423 if (index == 0)
897 spin_lock_irqsave(&smmu->lock, flags); 424 return -ENODEV;
898 425
899 /* Update PDIR register */ 426 return 0;
900 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); 427}
901 smmu_write(smmu,
902 SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
903 FLUSH_SMMU_REGS(smmu);
904 428
905 spin_unlock_irqrestore(&smmu->lock, flags); 429static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
430{
431 struct tegra_smmu_as *as = domain->priv;
432 struct device_node *np = dev->of_node;
433 struct tegra_smmu *smmu = as->smmu;
434 struct of_phandle_args args;
435 unsigned int index = 0;
906 436
907 domain->priv = as; 437 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
438 &args)) {
439 unsigned int swgroup = args.args[0];
908 440
909 domain->geometry.aperture_start = smmu->iovmm_base; 441 if (args.np != smmu->dev->of_node) {
910 domain->geometry.aperture_end = smmu->iovmm_base + 442 of_node_put(args.np);
911 smmu->page_count * SMMU_PAGE_SIZE - 1; 443 continue;
912 domain->geometry.force_aperture = true; 444 }
913 445
914 dev_dbg(smmu->dev, "smmu_as@%p\n", as); 446 of_node_put(args.np);
915 447
916 return 0; 448 tegra_smmu_disable(smmu, swgroup, as->id);
449 tegra_smmu_as_unprepare(smmu, as);
450 index++;
451 }
917} 452}
918 453
919static void smmu_iommu_domain_destroy(struct iommu_domain *domain) 454static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
455 struct page **pagep)
920{ 456{
921 struct smmu_as *as = domain->priv; 457 u32 *pd = page_address(as->pd), *pt, *count;
922 struct smmu_device *smmu = as->smmu; 458 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
923 unsigned long flags; 459 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
460 struct tegra_smmu *smmu = as->smmu;
461 struct page *page;
462 unsigned int i;
463
464 if (pd[pde] == 0) {
465 page = alloc_page(GFP_KERNEL | __GFP_DMA);
466 if (!page)
467 return NULL;
924 468
925 spin_lock_irqsave(&as->lock, flags); 469 pt = page_address(page);
470 SetPageReserved(page);
926 471
927 if (as->pdir_page) { 472 for (i = 0; i < SMMU_NUM_PTE; i++)
928 spin_lock(&smmu->lock); 473 pt[i] = 0;
929 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
930 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
931 FLUSH_SMMU_REGS(smmu);
932 spin_unlock(&smmu->lock);
933 474
934 free_pdir(as); 475 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
935 }
936 476
937 if (!list_empty(&as->client)) { 477 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
938 struct smmu_client *c;
939 478
940 list_for_each_entry(c, &as->client, list) 479 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
941 smmu_iommu_detach_dev(domain, c->dev); 480 smmu_flush_ptc(smmu, as->pd, pde << 2);
481 smmu_flush_tlb_section(smmu, as->id, iova);
482 smmu_flush(smmu);
483 } else {
484 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
485 pt = page_address(page);
942 } 486 }
943 487
944 spin_unlock_irqrestore(&as->lock, flags); 488 *pagep = page;
945 489
946 domain->priv = NULL; 490 /* Keep track of entries in this page table. */
947 dev_dbg(smmu->dev, "smmu_as@%p\n", as); 491 count = page_address(as->count);
948} 492 if (pt[pte] == 0)
493 count[pde]++;
949 494
950static const struct iommu_ops smmu_iommu_ops = { 495 return &pt[pte];
951 .capable = smmu_iommu_capable, 496}
952 .domain_init = smmu_iommu_domain_init,
953 .domain_destroy = smmu_iommu_domain_destroy,
954 .attach_dev = smmu_iommu_attach_dev,
955 .detach_dev = smmu_iommu_detach_dev,
956 .map = smmu_iommu_map,
957 .unmap = smmu_iommu_unmap,
958 .iova_to_phys = smmu_iommu_iova_to_phys,
959 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
960};
961
962/* Should be in the order of enum */
963static const char * const smmu_debugfs_mc[] = { "mc", };
964static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
965 497
966static ssize_t smmu_debugfs_stats_write(struct file *file, 498static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
967 const char __user *buffer,
968 size_t count, loff_t *pos)
969{ 499{
970 struct smmu_debugfs_info *info; 500 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
971 struct smmu_device *smmu; 501 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
972 int i; 502 u32 *count = page_address(as->count);
973 enum { 503 u32 *pd = page_address(as->pd), *pt;
974 _OFF = 0, 504 struct page *page;
975 _ON,
976 _RESET,
977 };
978 const char * const command[] = {
979 [_OFF] = "off",
980 [_ON] = "on",
981 [_RESET] = "reset",
982 };
983 char str[] = "reset";
984 u32 val;
985 size_t offs;
986 505
987 count = min_t(size_t, count, sizeof(str)); 506 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
988 if (copy_from_user(str, buffer, count)) 507 pt = page_address(page);
989 return -EINVAL;
990 508
991 for (i = 0; i < ARRAY_SIZE(command); i++) 509 /*
992 if (strncmp(str, command[i], 510 * When no entries in this page table are used anymore, return the
993 strlen(command[i])) == 0) 511 * memory page to the system.
994 break; 512 */
513 if (pt[pte] != 0) {
514 if (--count[pde] == 0) {
515 ClearPageReserved(page);
516 __free_page(page);
517 pd[pde] = 0;
518 }
995 519
996 if (i == ARRAY_SIZE(command)) 520 pt[pte] = 0;
997 return -EINVAL;
998
999 info = file_inode(file)->i_private;
1000 smmu = info->smmu;
1001
1002 offs = SMMU_CACHE_CONFIG(info->cache);
1003 val = smmu_read(smmu, offs);
1004 switch (i) {
1005 case _OFF:
1006 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1007 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1008 smmu_write(smmu, val, offs);
1009 break;
1010 case _ON:
1011 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1012 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1013 smmu_write(smmu, val, offs);
1014 break;
1015 case _RESET:
1016 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1017 smmu_write(smmu, val, offs);
1018 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1019 smmu_write(smmu, val, offs);
1020 break;
1021 default:
1022 BUG();
1023 break;
1024 } 521 }
1025
1026 dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1027 val, smmu_read(smmu, offs), offs);
1028
1029 return count;
1030} 522}
1031 523
1032static int smmu_debugfs_stats_show(struct seq_file *s, void *v) 524static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
525 phys_addr_t paddr, size_t size, int prot)
1033{ 526{
1034 struct smmu_debugfs_info *info = s->private; 527 struct tegra_smmu_as *as = domain->priv;
1035 struct smmu_device *smmu = info->smmu; 528 struct tegra_smmu *smmu = as->smmu;
1036 int i; 529 unsigned long offset;
1037 const char * const stats[] = { "hit", "miss", }; 530 struct page *page;
531 u32 *pte;
1038 532
533 pte = as_get_pte(as, iova, &page);
534 if (!pte)
535 return -ENOMEM;
1039 536
1040 for (i = 0; i < ARRAY_SIZE(stats); i++) { 537 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
1041 u32 val; 538 offset = offset_in_page(pte);
1042 size_t offs;
1043 539
1044 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i); 540 smmu->soc->ops->flush_dcache(page, offset, 4);
1045 val = smmu_read(smmu, offs); 541 smmu_flush_ptc(smmu, page, offset);
1046 seq_printf(s, "%s:%08x ", stats[i], val); 542 smmu_flush_tlb_group(smmu, as->id, iova);
543 smmu_flush(smmu);
1047 544
1048 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1049 stats[i], val, offs);
1050 }
1051 seq_printf(s, "\n");
1052 return 0; 545 return 0;
1053} 546}
1054 547
1055static int smmu_debugfs_stats_open(struct inode *inode, struct file *file) 548static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
549 size_t size)
1056{ 550{
1057 return single_open(file, smmu_debugfs_stats_show, inode->i_private); 551 struct tegra_smmu_as *as = domain->priv;
1058} 552 struct tegra_smmu *smmu = as->smmu;
553 unsigned long offset;
554 struct page *page;
555 u32 *pte;
1059 556
1060static const struct file_operations smmu_debugfs_stats_fops = { 557 pte = as_get_pte(as, iova, &page);
1061 .open = smmu_debugfs_stats_open, 558 if (!pte)
1062 .read = seq_read, 559 return 0;
1063 .llseek = seq_lseek,
1064 .release = single_release,
1065 .write = smmu_debugfs_stats_write,
1066};
1067 560
1068static void smmu_debugfs_delete(struct smmu_device *smmu) 561 offset = offset_in_page(pte);
1069{ 562 as_put_pte(as, iova);
1070 debugfs_remove_recursive(smmu->debugfs_root); 563
1071 kfree(smmu->debugfs_info); 564 smmu->soc->ops->flush_dcache(page, offset, 4);
565 smmu_flush_ptc(smmu, page, offset);
566 smmu_flush_tlb_group(smmu, as->id, iova);
567 smmu_flush(smmu);
568
569 return size;
1072} 570}
1073 571
1074static void smmu_debugfs_create(struct smmu_device *smmu) 572static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
573 dma_addr_t iova)
1075{ 574{
1076 int i; 575 struct tegra_smmu_as *as = domain->priv;
1077 size_t bytes; 576 struct page *page;
1078 struct dentry *root; 577 unsigned long pfn;
1079 578 u32 *pte;
1080 bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1081 sizeof(*smmu->debugfs_info);
1082 smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1083 if (!smmu->debugfs_info)
1084 return;
1085
1086 root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1087 if (!root)
1088 goto err_out;
1089 smmu->debugfs_root = root;
1090
1091 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1092 int j;
1093 struct dentry *mc;
1094
1095 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1096 if (!mc)
1097 goto err_out;
1098
1099 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1100 struct dentry *cache;
1101 struct smmu_debugfs_info *info;
1102
1103 info = smmu->debugfs_info;
1104 info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1105 info->smmu = smmu;
1106 info->mc = i;
1107 info->cache = j;
1108
1109 cache = debugfs_create_file(smmu_debugfs_cache[j],
1110 S_IWUGO | S_IRUGO, mc,
1111 (void *)info,
1112 &smmu_debugfs_stats_fops);
1113 if (!cache)
1114 goto err_out;
1115 }
1116 }
1117 579
1118 return; 580 pte = as_get_pte(as, iova, &page);
581 pfn = *pte & SMMU_PFN_MASK;
1119 582
1120err_out: 583 return PFN_PHYS(pfn);
1121 smmu_debugfs_delete(smmu);
1122} 584}
1123 585
1124static int tegra_smmu_suspend(struct device *dev) 586static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
1125{ 587{
1126 struct smmu_device *smmu = dev_get_drvdata(dev); 588 struct platform_device *pdev;
589 struct tegra_mc *mc;
1127 590
1128 smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); 591 pdev = of_find_device_by_node(np);
1129 smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); 592 if (!pdev)
1130 smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); 593 return NULL;
1131 smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); 594
1132 return 0; 595 mc = platform_get_drvdata(pdev);
596 if (!mc)
597 return NULL;
598
599 return mc->smmu;
1133} 600}
1134 601
1135static int tegra_smmu_resume(struct device *dev) 602static int tegra_smmu_add_device(struct device *dev)
1136{ 603{
1137 struct smmu_device *smmu = dev_get_drvdata(dev); 604 struct device_node *np = dev->of_node;
1138 unsigned long flags; 605 struct of_phandle_args args;
1139 int err; 606 unsigned int index = 0;
1140 607
1141 spin_lock_irqsave(&smmu->lock, flags); 608 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
1142 err = smmu_setup_regs(smmu); 609 &args) == 0) {
1143 spin_unlock_irqrestore(&smmu->lock, flags); 610 struct tegra_smmu *smmu;
1144 return err; 611
612 smmu = tegra_smmu_find(args.np);
613 if (smmu) {
614 /*
615 * Only a single IOMMU master interface is currently
616 * supported by the Linux kernel, so abort after the
617 * first match.
618 */
619 dev->archdata.iommu = smmu;
620 break;
621 }
622
623 index++;
624 }
625
626 return 0;
1145} 627}
1146 628
1147static int tegra_smmu_probe(struct platform_device *pdev) 629static void tegra_smmu_remove_device(struct device *dev)
1148{ 630{
1149 struct smmu_device *smmu; 631 dev->archdata.iommu = NULL;
1150 struct device *dev = &pdev->dev; 632}
1151 int i, asids, err = 0;
1152 dma_addr_t uninitialized_var(base);
1153 size_t bytes, uninitialized_var(size);
1154 633
1155 if (smmu_handle) 634static const struct iommu_ops tegra_smmu_ops = {
1156 return -EIO; 635 .capable = tegra_smmu_capable,
636 .domain_init = tegra_smmu_domain_init,
637 .domain_destroy = tegra_smmu_domain_destroy,
638 .attach_dev = tegra_smmu_attach_dev,
639 .detach_dev = tegra_smmu_detach_dev,
640 .add_device = tegra_smmu_add_device,
641 .remove_device = tegra_smmu_remove_device,
642 .map = tegra_smmu_map,
643 .unmap = tegra_smmu_unmap,
644 .map_sg = default_iommu_map_sg,
645 .iova_to_phys = tegra_smmu_iova_to_phys,
1157 646
1158 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); 647 .pgsize_bitmap = SZ_4K,
648};
1159 649
1160 if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids)) 650static void tegra_smmu_ahb_enable(void)
1161 return -ENODEV; 651{
652 static const struct of_device_id ahb_match[] = {
653 { .compatible = "nvidia,tegra30-ahb", },
654 { }
655 };
656 struct device_node *ahb;
1162 657
1163 bytes = sizeof(*smmu) + asids * sizeof(*smmu->as); 658 ahb = of_find_matching_node(NULL, ahb_match);
1164 smmu = devm_kzalloc(dev, bytes, GFP_KERNEL); 659 if (ahb) {
1165 if (!smmu) { 660 tegra_ahb_enable_smmu(ahb);
1166 dev_err(dev, "failed to allocate smmu_device\n"); 661 of_node_put(ahb);
1167 return -ENOMEM;
1168 } 662 }
663}
1169 664
1170 smmu->nregs = pdev->num_resources; 665struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1171 smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs), 666 const struct tegra_smmu_soc *soc,
1172 GFP_KERNEL); 667 struct tegra_mc *mc)
1173 smmu->rege = smmu->regs + smmu->nregs; 668{
1174 if (!smmu->regs) 669 struct tegra_smmu *smmu;
1175 return -ENOMEM; 670 size_t size;
1176 for (i = 0; i < smmu->nregs; i++) { 671 u32 value;
1177 struct resource *res; 672 int err;
1178
1179 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1180 smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
1181 if (IS_ERR(smmu->regs[i]))
1182 return PTR_ERR(smmu->regs[i]);
1183 smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1;
1184 }
1185 /* Same as "mc" 1st regiter block start address */
1186 smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK);
1187 673
1188 err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size); 674 /* This can happen on Tegra20 which doesn't have an SMMU */
1189 if (err) 675 if (!soc)
1190 return -ENODEV; 676 return NULL;
1191 677
1192 if (size & SMMU_PAGE_MASK) 678 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1193 return -EINVAL; 679 if (!smmu)
680 return ERR_PTR(-ENOMEM);
1194 681
1195 size >>= SMMU_PAGE_SHIFT; 682 /*
1196 if (!size) 683 * This is a bit of a hack. Ideally we'd want to simply return this
1197 return -EINVAL; 684 * value. However the IOMMU registration process will attempt to add
685 * all devices to the IOMMU when bus_set_iommu() is called. In order
686 * not to rely on global variables to track the IOMMU instance, we
687 * set it here so that it can be looked up from the .add_device()
688 * callback via the IOMMU device's .drvdata field.
689 */
690 mc->smmu = smmu;
1198 691
1199 smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0); 692 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
1200 if (!smmu->ahb)
1201 return -ENODEV;
1202 693
1203 smmu->dev = dev; 694 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1204 smmu->num_as = asids; 695 if (!smmu->asids)
1205 smmu->iovmm_base = base; 696 return ERR_PTR(-ENOMEM);
1206 smmu->page_count = size;
1207
1208 smmu->translation_enable_0 = ~0;
1209 smmu->translation_enable_1 = ~0;
1210 smmu->translation_enable_2 = ~0;
1211 smmu->asid_security = 0;
1212
1213 for (i = 0; i < smmu->num_as; i++) {
1214 struct smmu_as *as = &smmu->as[i];
1215
1216 as->smmu = smmu;
1217 as->asid = i;
1218 as->pdir_attr = _PDIR_ATTR;
1219 as->pde_attr = _PDE_ATTR;
1220 as->pte_attr = _PTE_ATTR;
1221
1222 spin_lock_init(&as->lock);
1223 spin_lock_init(&as->client_lock);
1224 INIT_LIST_HEAD(&as->client);
1225 }
1226 spin_lock_init(&smmu->lock);
1227 err = smmu_setup_regs(smmu);
1228 if (err)
1229 return err;
1230 platform_set_drvdata(pdev, smmu);
1231 697
1232 smmu->avp_vector_page = alloc_page(GFP_KERNEL); 698 mutex_init(&smmu->lock);
1233 if (!smmu->avp_vector_page)
1234 return -ENOMEM;
1235 699
1236 smmu_debugfs_create(smmu); 700 smmu->regs = mc->regs;
1237 smmu_handle = smmu; 701 smmu->soc = soc;
1238 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); 702 smmu->dev = dev;
1239 return 0; 703 smmu->mc = mc;
1240}
1241 704
1242static int tegra_smmu_remove(struct platform_device *pdev) 705 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1243{
1244 struct smmu_device *smmu = platform_get_drvdata(pdev);
1245 int i;
1246 706
1247 smmu_debugfs_delete(smmu); 707 if (soc->supports_request_limit)
708 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1248 709
1249 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); 710 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1250 for (i = 0; i < smmu->num_as; i++)
1251 free_pdir(&smmu->as[i]);
1252 __free_page(smmu->avp_vector_page);
1253 smmu_handle = NULL;
1254 return 0;
1255}
1256 711
1257static const struct dev_pm_ops tegra_smmu_pm_ops = { 712 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1258 .suspend = tegra_smmu_suspend, 713 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
1259 .resume = tegra_smmu_resume,
1260};
1261 714
1262static const struct of_device_id tegra_smmu_of_match[] = { 715 if (soc->supports_round_robin_arbitration)
1263 { .compatible = "nvidia,tegra30-smmu", }, 716 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1264 { },
1265};
1266MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
1267
1268static struct platform_driver tegra_smmu_driver = {
1269 .probe = tegra_smmu_probe,
1270 .remove = tegra_smmu_remove,
1271 .driver = {
1272 .owner = THIS_MODULE,
1273 .name = "tegra-smmu",
1274 .pm = &tegra_smmu_pm_ops,
1275 .of_match_table = tegra_smmu_of_match,
1276 },
1277};
1278 717
1279static int tegra_smmu_init(void) 718 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1280{
1281 return platform_driver_register(&tegra_smmu_driver);
1282}
1283 719
1284static void __exit tegra_smmu_exit(void) 720 smmu_flush_ptc(smmu, NULL, 0);
1285{ 721 smmu_flush_tlb(smmu);
1286 platform_driver_unregister(&tegra_smmu_driver); 722 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1287} 723 smmu_flush(smmu);
724
725 tegra_smmu_ahb_enable();
1288 726
1289subsys_initcall(tegra_smmu_init); 727 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
1290module_exit(tegra_smmu_exit); 728 if (err < 0)
729 return ERR_PTR(err);
1291 730
1292MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); 731 return smmu;
1293MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); 732}
1294MODULE_ALIAS("platform:tegra-smmu");
1295MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b21f12f1766d..9efe5f10f97b 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -48,14 +48,19 @@ config ATMEL_AIC5_IRQ
48 select MULTI_IRQ_HANDLER 48 select MULTI_IRQ_HANDLER
49 select SPARSE_IRQ 49 select SPARSE_IRQ
50 50
51config BCM7120_L2_IRQ
52 bool
53 select GENERIC_IRQ_CHIP
54 select IRQ_DOMAIN
55
51config BRCMSTB_L2_IRQ 56config BRCMSTB_L2_IRQ
52 bool 57 bool
53 depends on ARM
54 select GENERIC_IRQ_CHIP 58 select GENERIC_IRQ_CHIP
55 select IRQ_DOMAIN 59 select IRQ_DOMAIN
56 60
57config DW_APB_ICTL 61config DW_APB_ICTL
58 bool 62 bool
63 select GENERIC_IRQ_CHIP
59 select IRQ_DOMAIN 64 select IRQ_DOMAIN
60 65
61config IMGPDC_IRQ 66config IMGPDC_IRQ
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 173bb5fa2cc9..f0909d05eae3 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -35,6 +35,6 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
35obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o 35obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
36obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o 36obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
37obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o 37obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
38obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o \ 38obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
39 irq-bcm7120-l2.o 39obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
40obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o 40obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 6a2e168c3ab0..463c235acbdc 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -26,6 +26,7 @@
26#include <linux/of_pci.h> 26#include <linux/of_pci.h>
27#include <linux/irqdomain.h> 27#include <linux/irqdomain.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/syscore_ops.h>
29#include <linux/msi.h> 30#include <linux/msi.h>
30#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
31#include <asm/exception.h> 32#include <asm/exception.h>
@@ -67,6 +68,7 @@
67static void __iomem *per_cpu_int_base; 68static void __iomem *per_cpu_int_base;
68static void __iomem *main_int_base; 69static void __iomem *main_int_base;
69static struct irq_domain *armada_370_xp_mpic_domain; 70static struct irq_domain *armada_370_xp_mpic_domain;
71static u32 doorbell_mask_reg;
70#ifdef CONFIG_PCI_MSI 72#ifdef CONFIG_PCI_MSI
71static struct irq_domain *armada_370_xp_msi_domain; 73static struct irq_domain *armada_370_xp_msi_domain;
72static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 74static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -130,7 +132,7 @@ static void armada_370_xp_free_msi(int hwirq)
130 mutex_unlock(&msi_used_lock); 132 mutex_unlock(&msi_used_lock);
131} 133}
132 134
133static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, 135static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
134 struct pci_dev *pdev, 136 struct pci_dev *pdev,
135 struct msi_desc *desc) 137 struct msi_desc *desc)
136{ 138{
@@ -157,11 +159,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
157 msg.address_hi = 0; 159 msg.address_hi = 0;
158 msg.data = 0xf00 | (hwirq + 16); 160 msg.data = 0xf00 | (hwirq + 16);
159 161
160 write_msi_msg(virq, &msg); 162 pci_write_msi_msg(virq, &msg);
161 return 0; 163 return 0;
162} 164}
163 165
164static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, 166static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
165 unsigned int irq) 167 unsigned int irq)
166{ 168{
167 struct irq_data *d = irq_get_irq_data(irq); 169 struct irq_data *d = irq_get_irq_data(irq);
@@ -173,10 +175,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
173 175
174static struct irq_chip armada_370_xp_msi_irq_chip = { 176static struct irq_chip armada_370_xp_msi_irq_chip = {
175 .name = "armada_370_xp_msi_irq", 177 .name = "armada_370_xp_msi_irq",
176 .irq_enable = unmask_msi_irq, 178 .irq_enable = pci_msi_unmask_irq,
177 .irq_disable = mask_msi_irq, 179 .irq_disable = pci_msi_mask_irq,
178 .irq_mask = mask_msi_irq, 180 .irq_mask = pci_msi_mask_irq,
179 .irq_unmask = unmask_msi_irq, 181 .irq_unmask = pci_msi_unmask_irq,
180}; 182};
181 183
182static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, 184static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
@@ -196,7 +198,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
196static int armada_370_xp_msi_init(struct device_node *node, 198static int armada_370_xp_msi_init(struct device_node *node,
197 phys_addr_t main_int_phys_base) 199 phys_addr_t main_int_phys_base)
198{ 200{
199 struct msi_chip *msi_chip; 201 struct msi_controller *msi_chip;
200 u32 reg; 202 u32 reg;
201 int ret; 203 int ret;
202 204
@@ -266,7 +268,7 @@ static int armada_xp_set_affinity(struct irq_data *d,
266 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 268 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
267 raw_spin_unlock(&irq_controller_lock); 269 raw_spin_unlock(&irq_controller_lock);
268 270
269 return 0; 271 return IRQ_SET_MASK_OK;
270} 272}
271#endif 273#endif
272 274
@@ -485,6 +487,54 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
485 } while (1); 487 } while (1);
486} 488}
487 489
490static int armada_370_xp_mpic_suspend(void)
491{
492 doorbell_mask_reg = readl(per_cpu_int_base +
493 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
494 return 0;
495}
496
497static void armada_370_xp_mpic_resume(void)
498{
499 int nirqs;
500 irq_hw_number_t irq;
501
502 /* Re-enable interrupts */
503 nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
504 for (irq = 0; irq < nirqs; irq++) {
505 struct irq_data *data;
506 int virq;
507
508 virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
509 if (virq == 0)
510 continue;
511
512 if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
513 writel(irq, per_cpu_int_base +
514 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
515 else
516 writel(irq, main_int_base +
517 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
518
519 data = irq_get_irq_data(virq);
520 if (!irqd_irq_disabled(data))
521 armada_370_xp_irq_unmask(data);
522 }
523
524 /* Reconfigure doorbells for IPIs and MSIs */
525 writel(doorbell_mask_reg,
526 per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
527 if (doorbell_mask_reg & IPI_DOORBELL_MASK)
528 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
529 if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
530 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
531}
532
533struct syscore_ops armada_370_xp_mpic_syscore_ops = {
534 .suspend = armada_370_xp_mpic_suspend,
535 .resume = armada_370_xp_mpic_resume,
536};
537
488static int __init armada_370_xp_mpic_of_init(struct device_node *node, 538static int __init armada_370_xp_mpic_of_init(struct device_node *node,
489 struct device_node *parent) 539 struct device_node *parent)
490{ 540{
@@ -541,6 +591,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
541 armada_370_xp_mpic_handle_cascade_irq); 591 armada_370_xp_mpic_handle_cascade_irq);
542 } 592 }
543 593
594 register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
595
544 return 0; 596 return 0;
545} 597}
546 598
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 6ae3cdee0681..d111ac779c40 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -167,6 +167,32 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
167 iounmap(regs); 167 iounmap(regs);
168} 168}
169 169
170#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
171#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
172#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
173
174void __init aic_common_rtt_irq_fixup(struct device_node *root)
175{
176 struct device_node *np;
177 void __iomem *regs;
178
179 /*
180 * The at91sam9263 SoC has 2 instances of the RTT block, hence we
181 * iterate over the DT to find each occurrence.
182 */
183 for_each_compatible_node(np, NULL, "atmel,at91sam9260-rtt") {
184 regs = of_iomap(np, 0);
185 if (!regs)
186 continue;
187
188 writel(readl(regs + AT91_RTT_MR) &
189 ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN),
190 regs + AT91_RTT_MR);
191
192 iounmap(regs);
193 }
194}
195
170void __init aic_common_irq_fixup(const struct of_device_id *matches) 196void __init aic_common_irq_fixup(const struct of_device_id *matches)
171{ 197{
172 struct device_node *root = of_find_node_by_path("/"); 198 struct device_node *root = of_find_node_by_path("/");
@@ -217,8 +243,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
217 } 243 }
218 244
219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, 245 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
220 handle_level_irq, 0, 0, 246 handle_fasteoi_irq,
221 IRQCHIP_SKIP_SET_WAKE); 247 IRQ_NOREQUEST | IRQ_NOPROBE |
248 IRQ_NOAUTOEN, 0, 0);
222 if (ret) 249 if (ret)
223 goto err_domain_remove; 250 goto err_domain_remove;
224 251
@@ -230,7 +257,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
230 gc->unused = 0; 257 gc->unused = 0;
231 gc->wake_enabled = ~0; 258 gc->wake_enabled = ~0;
232 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; 259 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
233 gc->chip_types[0].handler = handle_fasteoi_irq;
234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; 260 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 261 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; 262 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index 90aa00e918d6..603f0a9d5411 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -34,6 +34,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
34 34
35void __init aic_common_rtc_irq_fixup(struct device_node *root); 35void __init aic_common_rtc_irq_fixup(struct device_node *root);
36 36
37void __init aic_common_rtt_irq_fixup(struct device_node *root);
38
37void __init aic_common_irq_fixup(const struct of_device_id *matches); 39void __init aic_common_irq_fixup(const struct of_device_id *matches);
38 40
39#endif /* __IRQ_ATMEL_AIC_COMMON_H */ 41#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 9a2cf3c1a3a5..dae3604b32a9 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -65,11 +65,11 @@ aic_handle(struct pt_regs *regs)
65 u32 irqnr; 65 u32 irqnr;
66 u32 irqstat; 66 u32 irqstat;
67 67
68 irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR); 68 irqnr = irq_reg_readl(gc, AT91_AIC_IVR);
69 irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR); 69 irqstat = irq_reg_readl(gc, AT91_AIC_ISR);
70 70
71 if (!irqstat) 71 if (!irqstat)
72 irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); 72 irq_reg_writel(gc, 0, AT91_AIC_EOICR);
73 else 73 else
74 handle_domain_irq(aic_domain, irqnr, regs); 74 handle_domain_irq(aic_domain, irqnr, regs);
75} 75}
@@ -80,7 +80,7 @@ static int aic_retrigger(struct irq_data *d)
80 80
81 /* Enable interrupt on AIC5 */ 81 /* Enable interrupt on AIC5 */
82 irq_gc_lock(gc); 82 irq_gc_lock(gc);
83 irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR); 83 irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
84 irq_gc_unlock(gc); 84 irq_gc_unlock(gc);
85 85
86 return 0; 86 return 0;
@@ -92,12 +92,12 @@ static int aic_set_type(struct irq_data *d, unsigned type)
92 unsigned int smr; 92 unsigned int smr;
93 int ret; 93 int ret;
94 94
95 smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq)); 95 smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq));
96 ret = aic_common_set_type(d, type, &smr); 96 ret = aic_common_set_type(d, type, &smr);
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 99
100 irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq)); 100 irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq));
101 101
102 return 0; 102 return 0;
103} 103}
@@ -108,8 +108,8 @@ static void aic_suspend(struct irq_data *d)
108 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 108 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
109 109
110 irq_gc_lock(gc); 110 irq_gc_lock(gc);
111 irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR); 111 irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
112 irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR); 112 irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
113 irq_gc_unlock(gc); 113 irq_gc_unlock(gc);
114} 114}
115 115
@@ -118,8 +118,8 @@ static void aic_resume(struct irq_data *d)
118 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 118 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
119 119
120 irq_gc_lock(gc); 120 irq_gc_lock(gc);
121 irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR); 121 irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
122 irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR); 122 irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
123 irq_gc_unlock(gc); 123 irq_gc_unlock(gc);
124} 124}
125 125
@@ -128,8 +128,8 @@ static void aic_pm_shutdown(struct irq_data *d)
128 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 128 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
129 129
130 irq_gc_lock(gc); 130 irq_gc_lock(gc);
131 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); 131 irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
132 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); 132 irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
133 irq_gc_unlock(gc); 133 irq_gc_unlock(gc);
134} 134}
135#else 135#else
@@ -148,24 +148,24 @@ static void __init aic_hw_init(struct irq_domain *domain)
148 * will not Lock out nIRQ 148 * will not Lock out nIRQ
149 */ 149 */
150 for (i = 0; i < 8; i++) 150 for (i = 0; i < 8; i++)
151 irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); 151 irq_reg_writel(gc, 0, AT91_AIC_EOICR);
152 152
153 /* 153 /*
154 * Spurious Interrupt ID in Spurious Vector Register. 154 * Spurious Interrupt ID in Spurious Vector Register.
155 * When there is no current interrupt, the IRQ Vector Register 155 * When there is no current interrupt, the IRQ Vector Register
156 * reads the value stored in AIC_SPU 156 * reads the value stored in AIC_SPU
157 */ 157 */
158 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU); 158 irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU);
159 159
160 /* No debugging in AIC: Debug (Protect) Control Register */ 160 /* No debugging in AIC: Debug (Protect) Control Register */
161 irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR); 161 irq_reg_writel(gc, 0, AT91_AIC_DCR);
162 162
163 /* Disable and clear all interrupts initially */ 163 /* Disable and clear all interrupts initially */
164 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); 164 irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
165 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); 165 irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
166 166
167 for (i = 0; i < 32; i++) 167 for (i = 0; i < 32; i++)
168 irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i)); 168 irq_reg_writel(gc, i, AT91_AIC_SVR(i));
169} 169}
170 170
171static int aic_irq_domain_xlate(struct irq_domain *d, 171static int aic_irq_domain_xlate(struct irq_domain *d,
@@ -195,10 +195,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
195 gc = dgc->gc[idx]; 195 gc = dgc->gc[idx];
196 196
197 irq_gc_lock(gc); 197 irq_gc_lock(gc);
198 smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq)); 198 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
199 ret = aic_common_set_priority(intspec[2], &smr); 199 ret = aic_common_set_priority(intspec[2], &smr);
200 if (!ret) 200 if (!ret)
201 irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq)); 201 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
202 irq_gc_unlock(gc); 202 irq_gc_unlock(gc);
203 203
204 return ret; 204 return ret;
@@ -209,16 +209,32 @@ static const struct irq_domain_ops aic_irq_ops = {
209 .xlate = aic_irq_domain_xlate, 209 .xlate = aic_irq_domain_xlate,
210}; 210};
211 211
212static void __init at91sam9_aic_irq_fixup(struct device_node *root) 212static void __init at91rm9200_aic_irq_fixup(struct device_node *root)
213{ 213{
214 aic_common_rtc_irq_fixup(root); 214 aic_common_rtc_irq_fixup(root);
215} 215}
216 216
217static void __init at91sam9260_aic_irq_fixup(struct device_node *root)
218{
219 aic_common_rtt_irq_fixup(root);
220}
221
222static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
223{
224 aic_common_rtc_irq_fixup(root);
225 aic_common_rtt_irq_fixup(root);
226}
227
217static const struct of_device_id __initdata aic_irq_fixups[] = { 228static const struct of_device_id __initdata aic_irq_fixups[] = {
218 { .compatible = "atmel,at91sam9g45", .data = at91sam9_aic_irq_fixup }, 229 { .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
219 { .compatible = "atmel,at91sam9n12", .data = at91sam9_aic_irq_fixup }, 230 { .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
220 { .compatible = "atmel,at91sam9rl", .data = at91sam9_aic_irq_fixup }, 231 { .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
221 { .compatible = "atmel,at91sam9x5", .data = at91sam9_aic_irq_fixup }, 232 { .compatible = "atmel,at91sam9rl", .data = at91sam9g45_aic_irq_fixup },
233 { .compatible = "atmel,at91sam9x5", .data = at91rm9200_aic_irq_fixup },
234 { .compatible = "atmel,at91sam9260", .data = at91sam9260_aic_irq_fixup },
235 { .compatible = "atmel,at91sam9261", .data = at91sam9260_aic_irq_fixup },
236 { .compatible = "atmel,at91sam9263", .data = at91sam9260_aic_irq_fixup },
237 { .compatible = "atmel,at91sam9g20", .data = at91sam9260_aic_irq_fixup },
222 { /* sentinel */ }, 238 { /* sentinel */ },
223}; 239};
224 240
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index a11aae8fb006..a2e8c3f876cb 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -75,11 +75,11 @@ aic5_handle(struct pt_regs *regs)
75 u32 irqnr; 75 u32 irqnr;
76 u32 irqstat; 76 u32 irqstat;
77 77
78 irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR); 78 irqnr = irq_reg_readl(gc, AT91_AIC5_IVR);
79 irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR); 79 irqstat = irq_reg_readl(gc, AT91_AIC5_ISR);
80 80
81 if (!irqstat) 81 if (!irqstat)
82 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); 82 irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
83 else 83 else
84 handle_domain_irq(aic5_domain, irqnr, regs); 84 handle_domain_irq(aic5_domain, irqnr, regs);
85} 85}
@@ -92,8 +92,8 @@ static void aic5_mask(struct irq_data *d)
92 92
93 /* Disable interrupt on AIC5 */ 93 /* Disable interrupt on AIC5 */
94 irq_gc_lock(gc); 94 irq_gc_lock(gc);
95 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); 95 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
96 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); 96 irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
97 gc->mask_cache &= ~d->mask; 97 gc->mask_cache &= ~d->mask;
98 irq_gc_unlock(gc); 98 irq_gc_unlock(gc);
99} 99}
@@ -106,8 +106,8 @@ static void aic5_unmask(struct irq_data *d)
106 106
107 /* Enable interrupt on AIC5 */ 107 /* Enable interrupt on AIC5 */
108 irq_gc_lock(gc); 108 irq_gc_lock(gc);
109 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); 109 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
110 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR); 110 irq_reg_writel(gc, 1, AT91_AIC5_IECR);
111 gc->mask_cache |= d->mask; 111 gc->mask_cache |= d->mask;
112 irq_gc_unlock(gc); 112 irq_gc_unlock(gc);
113} 113}
@@ -120,8 +120,8 @@ static int aic5_retrigger(struct irq_data *d)
120 120
121 /* Enable interrupt on AIC5 */ 121 /* Enable interrupt on AIC5 */
122 irq_gc_lock(gc); 122 irq_gc_lock(gc);
123 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); 123 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
124 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR); 124 irq_reg_writel(gc, 1, AT91_AIC5_ISCR);
125 irq_gc_unlock(gc); 125 irq_gc_unlock(gc);
126 126
127 return 0; 127 return 0;
@@ -136,11 +136,11 @@ static int aic5_set_type(struct irq_data *d, unsigned type)
136 int ret; 136 int ret;
137 137
138 irq_gc_lock(gc); 138 irq_gc_lock(gc);
139 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); 139 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
140 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); 140 smr = irq_reg_readl(gc, AT91_AIC5_SMR);
141 ret = aic_common_set_type(d, type, &smr); 141 ret = aic_common_set_type(d, type, &smr);
142 if (!ret) 142 if (!ret)
143 irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR); 143 irq_reg_writel(gc, smr, AT91_AIC5_SMR);
144 irq_gc_unlock(gc); 144 irq_gc_unlock(gc);
145 145
146 return ret; 146 return ret;
@@ -162,12 +162,11 @@ static void aic5_suspend(struct irq_data *d)
162 if ((mask & gc->mask_cache) == (mask & gc->wake_active)) 162 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
163 continue; 163 continue;
164 164
165 irq_reg_writel(i + gc->irq_base, 165 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
166 bgc->reg_base + AT91_AIC5_SSR);
167 if (mask & gc->wake_active) 166 if (mask & gc->wake_active)
168 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); 167 irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
169 else 168 else
170 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); 169 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
171 } 170 }
172 irq_gc_unlock(bgc); 171 irq_gc_unlock(bgc);
173} 172}
@@ -187,12 +186,11 @@ static void aic5_resume(struct irq_data *d)
187 if ((mask & gc->mask_cache) == (mask & gc->wake_active)) 186 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
188 continue; 187 continue;
189 188
190 irq_reg_writel(i + gc->irq_base, 189 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
191 bgc->reg_base + AT91_AIC5_SSR);
192 if (mask & gc->mask_cache) 190 if (mask & gc->mask_cache)
193 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); 191 irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
194 else 192 else
195 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); 193 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
196 } 194 }
197 irq_gc_unlock(bgc); 195 irq_gc_unlock(bgc);
198} 196}
@@ -207,10 +205,9 @@ static void aic5_pm_shutdown(struct irq_data *d)
207 205
208 irq_gc_lock(bgc); 206 irq_gc_lock(bgc);
209 for (i = 0; i < dgc->irqs_per_chip; i++) { 207 for (i = 0; i < dgc->irqs_per_chip; i++) {
210 irq_reg_writel(i + gc->irq_base, 208 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
211 bgc->reg_base + AT91_AIC5_SSR); 209 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
212 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); 210 irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
213 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR);
214 } 211 }
215 irq_gc_unlock(bgc); 212 irq_gc_unlock(bgc);
216} 213}
@@ -230,24 +227,24 @@ static void __init aic5_hw_init(struct irq_domain *domain)
230 * will not Lock out nIRQ 227 * will not Lock out nIRQ
231 */ 228 */
232 for (i = 0; i < 8; i++) 229 for (i = 0; i < 8; i++)
233 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); 230 irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
234 231
235 /* 232 /*
236 * Spurious Interrupt ID in Spurious Vector Register. 233 * Spurious Interrupt ID in Spurious Vector Register.
237 * When there is no current interrupt, the IRQ Vector Register 234 * When there is no current interrupt, the IRQ Vector Register
238 * reads the value stored in AIC_SPU 235 * reads the value stored in AIC_SPU
239 */ 236 */
240 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU); 237 irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU);
241 238
242 /* No debugging in AIC: Debug (Protect) Control Register */ 239 /* No debugging in AIC: Debug (Protect) Control Register */
243 irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR); 240 irq_reg_writel(gc, 0, AT91_AIC5_DCR);
244 241
245 /* Disable and clear all interrupts initially */ 242 /* Disable and clear all interrupts initially */
246 for (i = 0; i < domain->revmap_size; i++) { 243 for (i = 0; i < domain->revmap_size; i++) {
247 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR); 244 irq_reg_writel(gc, i, AT91_AIC5_SSR);
248 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR); 245 irq_reg_writel(gc, i, AT91_AIC5_SVR);
249 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); 246 irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
250 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR); 247 irq_reg_writel(gc, 1, AT91_AIC5_ICCR);
251 } 248 }
252} 249}
253 250
@@ -273,11 +270,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
273 gc = dgc->gc[0]; 270 gc = dgc->gc[0];
274 271
275 irq_gc_lock(gc); 272 irq_gc_lock(gc);
276 irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR); 273 irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR);
277 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); 274 smr = irq_reg_readl(gc, AT91_AIC5_SMR);
278 ret = aic_common_set_priority(intspec[2], &smr); 275 ret = aic_common_set_priority(intspec[2], &smr);
279 if (!ret) 276 if (!ret)
280 irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR); 277 irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR);
281 irq_gc_unlock(gc); 278 irq_gc_unlock(gc);
282 279
283 return ret; 280 return ret;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index b9f4fb808e49..8eec8e1201d9 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/kconfig.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/of.h> 18#include <linux/of.h>
18#include <linux/of_irq.h> 19#include <linux/of_irq.h>
@@ -23,47 +24,52 @@
23#include <linux/io.h> 24#include <linux/io.h>
24#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
25#include <linux/reboot.h> 26#include <linux/reboot.h>
27#include <linux/bitops.h>
26#include <linux/irqchip/chained_irq.h> 28#include <linux/irqchip/chained_irq.h>
27 29
28#include "irqchip.h" 30#include "irqchip.h"
29 31
30#include <asm/mach/irq.h>
31
32/* Register offset in the L2 interrupt controller */ 32/* Register offset in the L2 interrupt controller */
33#define IRQEN 0x00 33#define IRQEN 0x00
34#define IRQSTAT 0x04 34#define IRQSTAT 0x04
35 35
36#define MAX_WORDS 4
37#define IRQS_PER_WORD 32
38
36struct bcm7120_l2_intc_data { 39struct bcm7120_l2_intc_data {
37 void __iomem *base; 40 unsigned int n_words;
41 void __iomem *base[MAX_WORDS];
38 struct irq_domain *domain; 42 struct irq_domain *domain;
39 bool can_wake; 43 bool can_wake;
40 u32 irq_fwd_mask; 44 u32 irq_fwd_mask[MAX_WORDS];
41 u32 irq_map_mask; 45 u32 irq_map_mask[MAX_WORDS];
42 u32 saved_mask;
43}; 46};
44 47
45static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 48static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
46{ 49{
47 struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc); 50 struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc);
48 struct irq_chip *chip = irq_desc_get_chip(desc); 51 struct irq_chip *chip = irq_desc_get_chip(desc);
49 u32 status; 52 unsigned int idx;
50 53
51 chained_irq_enter(chip, desc); 54 chained_irq_enter(chip, desc);
52 55
53 status = __raw_readl(b->base + IRQSTAT); 56 for (idx = 0; idx < b->n_words; idx++) {
54 57 int base = idx * IRQS_PER_WORD;
55 if (status == 0) { 58 struct irq_chip_generic *gc =
56 do_bad_IRQ(irq, desc); 59 irq_get_domain_generic_chip(b->domain, base);
57 goto out; 60 unsigned long pending;
61 int hwirq;
62
63 irq_gc_lock(gc);
64 pending = irq_reg_readl(gc, IRQSTAT) & gc->mask_cache;
65 irq_gc_unlock(gc);
66
67 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
68 generic_handle_irq(irq_find_mapping(b->domain,
69 base + hwirq));
70 }
58 } 71 }
59 72
60 do {
61 irq = ffs(status) - 1;
62 status &= ~(1 << irq);
63 generic_handle_irq(irq_find_mapping(b->domain, irq));
64 } while (status);
65
66out:
67 chained_irq_exit(chip, desc); 73 chained_irq_exit(chip, desc);
68} 74}
69 75
@@ -71,26 +77,20 @@ static void bcm7120_l2_intc_suspend(struct irq_data *d)
71{ 77{
72 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 78 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
73 struct bcm7120_l2_intc_data *b = gc->private; 79 struct bcm7120_l2_intc_data *b = gc->private;
74 u32 reg;
75 80
76 irq_gc_lock(gc); 81 irq_gc_lock(gc);
77 /* Save the current mask and the interrupt forward mask */ 82 if (b->can_wake)
78 b->saved_mask = __raw_readl(b->base) | b->irq_fwd_mask; 83 irq_reg_writel(gc, gc->mask_cache | gc->wake_active, IRQEN);
79 if (b->can_wake) {
80 reg = b->saved_mask | gc->wake_active;
81 __raw_writel(reg, b->base);
82 }
83 irq_gc_unlock(gc); 84 irq_gc_unlock(gc);
84} 85}
85 86
86static void bcm7120_l2_intc_resume(struct irq_data *d) 87static void bcm7120_l2_intc_resume(struct irq_data *d)
87{ 88{
88 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 89 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
89 struct bcm7120_l2_intc_data *b = gc->private;
90 90
91 /* Restore the saved mask */ 91 /* Restore the saved mask */
92 irq_gc_lock(gc); 92 irq_gc_lock(gc);
93 __raw_writel(b->saved_mask, b->base); 93 irq_reg_writel(gc, gc->mask_cache, IRQEN);
94 irq_gc_unlock(gc); 94 irq_gc_unlock(gc);
95} 95}
96 96
@@ -99,14 +99,20 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
99 int irq, const __be32 *map_mask) 99 int irq, const __be32 *map_mask)
100{ 100{
101 int parent_irq; 101 int parent_irq;
102 unsigned int idx;
102 103
103 parent_irq = irq_of_parse_and_map(dn, irq); 104 parent_irq = irq_of_parse_and_map(dn, irq);
104 if (parent_irq < 0) { 105 if (!parent_irq) {
105 pr_err("failed to map interrupt %d\n", irq); 106 pr_err("failed to map interrupt %d\n", irq);
106 return parent_irq; 107 return -EINVAL;
107 } 108 }
108 109
109 data->irq_map_mask |= be32_to_cpup(map_mask + irq); 110 /* For multiple parent IRQs with multiple words, this looks like:
111 * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
112 */
113 for (idx = 0; idx < data->n_words; idx++)
114 data->irq_map_mask[idx] |=
115 be32_to_cpup(map_mask + irq * data->n_words + idx);
110 116
111 irq_set_handler_data(parent_irq, data); 117 irq_set_handler_data(parent_irq, data);
112 irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle); 118 irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
@@ -123,26 +129,41 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
123 struct irq_chip_type *ct; 129 struct irq_chip_type *ct;
124 const __be32 *map_mask; 130 const __be32 *map_mask;
125 int num_parent_irqs; 131 int num_parent_irqs;
126 int ret = 0, len, irq; 132 int ret = 0, len;
133 unsigned int idx, irq, flags;
127 134
128 data = kzalloc(sizeof(*data), GFP_KERNEL); 135 data = kzalloc(sizeof(*data), GFP_KERNEL);
129 if (!data) 136 if (!data)
130 return -ENOMEM; 137 return -ENOMEM;
131 138
132 data->base = of_iomap(dn, 0); 139 for (idx = 0; idx < MAX_WORDS; idx++) {
133 if (!data->base) { 140 data->base[idx] = of_iomap(dn, idx);
141 if (!data->base[idx])
142 break;
143 data->n_words = idx + 1;
144 }
145 if (!data->n_words) {
134 pr_err("failed to remap intc L2 registers\n"); 146 pr_err("failed to remap intc L2 registers\n");
135 ret = -ENOMEM; 147 ret = -ENOMEM;
136 goto out_free; 148 goto out_unmap;
137 } 149 }
138 150
139 if (of_property_read_u32(dn, "brcm,int-fwd-mask", &data->irq_fwd_mask)) 151 /* Enable all interrupts specified in the interrupt forward mask;
140 data->irq_fwd_mask = 0; 152 * disable all others. If the property doesn't exist (-EINVAL),
141 153 * assume all zeroes.
142 /* Enable all interrupt specified in the interrupt forward mask and have
143 * the other disabled
144 */ 154 */
145 __raw_writel(data->irq_fwd_mask, data->base + IRQEN); 155 ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask",
156 data->irq_fwd_mask, data->n_words);
157 if (ret == 0 || ret == -EINVAL) {
158 for (idx = 0; idx < data->n_words; idx++)
159 __raw_writel(data->irq_fwd_mask[idx],
160 data->base[idx] + IRQEN);
161 } else {
162 /* property exists but has the wrong number of words */
163 pr_err("invalid int-fwd-mask property\n");
164 ret = -EINVAL;
165 goto out_unmap;
166 }
146 167
147 num_parent_irqs = of_irq_count(dn); 168 num_parent_irqs = of_irq_count(dn);
148 if (num_parent_irqs <= 0) { 169 if (num_parent_irqs <= 0) {
@@ -152,7 +173,8 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
152 } 173 }
153 174
154 map_mask = of_get_property(dn, "brcm,int-map-mask", &len); 175 map_mask = of_get_property(dn, "brcm,int-map-mask", &len);
155 if (!map_mask || (len != (sizeof(*map_mask) * num_parent_irqs))) { 176 if (!map_mask ||
177 (len != (sizeof(*map_mask) * num_parent_irqs * data->n_words))) {
156 pr_err("invalid brcm,int-map-mask property\n"); 178 pr_err("invalid brcm,int-map-mask property\n");
157 ret = -EINVAL; 179 ret = -EINVAL;
158 goto out_unmap; 180 goto out_unmap;
@@ -164,56 +186,70 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn,
164 goto out_unmap; 186 goto out_unmap;
165 } 187 }
166 188
167 data->domain = irq_domain_add_linear(dn, 32, 189 data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
168 &irq_generic_chip_ops, NULL); 190 &irq_generic_chip_ops, NULL);
169 if (!data->domain) { 191 if (!data->domain) {
170 ret = -ENOMEM; 192 ret = -ENOMEM;
171 goto out_unmap; 193 goto out_unmap;
172 } 194 }
173 195
174 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, 196 /* MIPS chips strapped for BE will automagically configure the
175 dn->full_name, handle_level_irq, clr, 0, 197 * peripheral registers for CPU-native byte order.
176 IRQ_GC_INIT_MASK_CACHE); 198 */
199 flags = IRQ_GC_INIT_MASK_CACHE;
200 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
201 flags |= IRQ_GC_BE_IO;
202
203 ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
204 dn->full_name, handle_level_irq, clr, 0, flags);
177 if (ret) { 205 if (ret) {
178 pr_err("failed to allocate generic irq chip\n"); 206 pr_err("failed to allocate generic irq chip\n");
179 goto out_free_domain; 207 goto out_free_domain;
180 } 208 }
181 209
182 gc = irq_get_domain_generic_chip(data->domain, 0); 210 if (of_property_read_bool(dn, "brcm,irq-can-wake"))
183 gc->unused = 0xfffffff & ~data->irq_map_mask;
184 gc->reg_base = data->base;
185 gc->private = data;
186 ct = gc->chip_types;
187
188 ct->regs.mask = IRQEN;
189 ct->chip.irq_mask = irq_gc_mask_clr_bit;
190 ct->chip.irq_unmask = irq_gc_mask_set_bit;
191 ct->chip.irq_ack = irq_gc_noop;
192 ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
193 ct->chip.irq_resume = bcm7120_l2_intc_resume;
194
195 if (of_property_read_bool(dn, "brcm,irq-can-wake")) {
196 data->can_wake = true; 211 data->can_wake = true;
197 /* This IRQ chip can wake the system, set all relevant child 212
198 * interupts in wake_enabled mask 213 for (idx = 0; idx < data->n_words; idx++) {
199 */ 214 irq = idx * IRQS_PER_WORD;
200 gc->wake_enabled = 0xffffffff; 215 gc = irq_get_domain_generic_chip(data->domain, irq);
201 gc->wake_enabled &= ~gc->unused; 216
202 ct->chip.irq_set_wake = irq_gc_set_wake; 217 gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
218 gc->reg_base = data->base[idx];
219 gc->private = data;
220 ct = gc->chip_types;
221
222 ct->regs.mask = IRQEN;
223 ct->chip.irq_mask = irq_gc_mask_clr_bit;
224 ct->chip.irq_unmask = irq_gc_mask_set_bit;
225 ct->chip.irq_ack = irq_gc_noop;
226 ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
227 ct->chip.irq_resume = bcm7120_l2_intc_resume;
228
229 if (data->can_wake) {
230 /* This IRQ chip can wake the system, set all
231 * relevant child interupts in wake_enabled mask
232 */
233 gc->wake_enabled = 0xffffffff;
234 gc->wake_enabled &= ~gc->unused;
235 ct->chip.irq_set_wake = irq_gc_set_wake;
236 }
203 } 237 }
204 238
205 pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n", 239 pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n",
206 data->base, num_parent_irqs); 240 data->base[0], num_parent_irqs);
207 241
208 return 0; 242 return 0;
209 243
210out_free_domain: 244out_free_domain:
211 irq_domain_remove(data->domain); 245 irq_domain_remove(data->domain);
212out_unmap: 246out_unmap:
213 iounmap(data->base); 247 for (idx = 0; idx < MAX_WORDS; idx++) {
214out_free: 248 if (data->base[idx])
249 iounmap(data->base[idx]);
250 }
215 kfree(data); 251 kfree(data);
216 return ret; 252 return ret;
217} 253}
218IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,bcm7120-l2-intc", 254IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
219 bcm7120_l2_intc_of_init); 255 bcm7120_l2_intc_of_init);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index c15c840987d2..313c2c64498a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -18,7 +18,9 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kconfig.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/of_irq.h> 25#include <linux/of_irq.h>
24#include <linux/of_address.h> 26#include <linux/of_address.h>
@@ -30,8 +32,6 @@
30#include <linux/irqchip.h> 32#include <linux/irqchip.h>
31#include <linux/irqchip/chained_irq.h> 33#include <linux/irqchip/chained_irq.h>
32 34
33#include <asm/mach/irq.h>
34
35#include "irqchip.h" 35#include "irqchip.h"
36 36
37/* Register offsets in the L2 interrupt controller */ 37/* Register offsets in the L2 interrupt controller */
@@ -54,23 +54,26 @@ struct brcmstb_l2_intc_data {
54static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 54static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
55{ 55{
56 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); 56 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
57 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
57 struct irq_chip *chip = irq_desc_get_chip(desc); 58 struct irq_chip *chip = irq_desc_get_chip(desc);
58 u32 status; 59 u32 status;
59 60
60 chained_irq_enter(chip, desc); 61 chained_irq_enter(chip, desc);
61 62
62 status = __raw_readl(b->base + CPU_STATUS) & 63 status = irq_reg_readl(gc, CPU_STATUS) &
63 ~(__raw_readl(b->base + CPU_MASK_STATUS)); 64 ~(irq_reg_readl(gc, CPU_MASK_STATUS));
64 65
65 if (status == 0) { 66 if (status == 0) {
66 do_bad_IRQ(irq, desc); 67 raw_spin_lock(&desc->lock);
68 handle_bad_irq(irq, desc);
69 raw_spin_unlock(&desc->lock);
67 goto out; 70 goto out;
68 } 71 }
69 72
70 do { 73 do {
71 irq = ffs(status) - 1; 74 irq = ffs(status) - 1;
72 /* ack at our level */ 75 /* ack at our level */
73 __raw_writel(1 << irq, b->base + CPU_CLEAR); 76 irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
74 status &= ~(1 << irq); 77 status &= ~(1 << irq);
75 generic_handle_irq(irq_find_mapping(b->domain, irq)); 78 generic_handle_irq(irq_find_mapping(b->domain, irq));
76 } while (status); 79 } while (status);
@@ -85,12 +88,12 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
85 88
86 irq_gc_lock(gc); 89 irq_gc_lock(gc);
87 /* Save the current mask */ 90 /* Save the current mask */
88 b->saved_mask = __raw_readl(b->base + CPU_MASK_STATUS); 91 b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS);
89 92
90 if (b->can_wake) { 93 if (b->can_wake) {
91 /* Program the wakeup mask */ 94 /* Program the wakeup mask */
92 __raw_writel(~gc->wake_active, b->base + CPU_MASK_SET); 95 irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET);
93 __raw_writel(gc->wake_active, b->base + CPU_MASK_CLEAR); 96 irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR);
94 } 97 }
95 irq_gc_unlock(gc); 98 irq_gc_unlock(gc);
96} 99}
@@ -102,11 +105,11 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
102 105
103 irq_gc_lock(gc); 106 irq_gc_lock(gc);
104 /* Clear unmasked non-wakeup interrupts */ 107 /* Clear unmasked non-wakeup interrupts */
105 __raw_writel(~b->saved_mask & ~gc->wake_active, b->base + CPU_CLEAR); 108 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR);
106 109
107 /* Restore the saved mask */ 110 /* Restore the saved mask */
108 __raw_writel(b->saved_mask, b->base + CPU_MASK_SET); 111 irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET);
109 __raw_writel(~b->saved_mask, b->base + CPU_MASK_CLEAR); 112 irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR);
110 irq_gc_unlock(gc); 113 irq_gc_unlock(gc);
111} 114}
112 115
@@ -118,6 +121,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
118 struct irq_chip_generic *gc; 121 struct irq_chip_generic *gc;
119 struct irq_chip_type *ct; 122 struct irq_chip_type *ct;
120 int ret; 123 int ret;
124 unsigned int flags;
121 125
122 data = kzalloc(sizeof(*data), GFP_KERNEL); 126 data = kzalloc(sizeof(*data), GFP_KERNEL);
123 if (!data) 127 if (!data)
@@ -131,13 +135,13 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
131 } 135 }
132 136
133 /* Disable all interrupts by default */ 137 /* Disable all interrupts by default */
134 __raw_writel(0xffffffff, data->base + CPU_MASK_SET); 138 writel(0xffffffff, data->base + CPU_MASK_SET);
135 __raw_writel(0xffffffff, data->base + CPU_CLEAR); 139 writel(0xffffffff, data->base + CPU_CLEAR);
136 140
137 data->parent_irq = irq_of_parse_and_map(np, 0); 141 data->parent_irq = irq_of_parse_and_map(np, 0);
138 if (data->parent_irq < 0) { 142 if (!data->parent_irq) {
139 pr_err("failed to find parent interrupt\n"); 143 pr_err("failed to find parent interrupt\n");
140 ret = data->parent_irq; 144 ret = -EINVAL;
141 goto out_unmap; 145 goto out_unmap;
142 } 146 }
143 147
@@ -148,9 +152,16 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
148 goto out_unmap; 152 goto out_unmap;
149 } 153 }
150 154
155 /* MIPS chips strapped for BE will automagically configure the
156 * peripheral registers for CPU-native byte order.
157 */
158 flags = 0;
159 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
160 flags |= IRQ_GC_BE_IO;
161
151 /* Allocate a single Generic IRQ chip for this node */ 162 /* Allocate a single Generic IRQ chip for this node */
152 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, 163 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
153 np->full_name, handle_edge_irq, clr, 0, 0); 164 np->full_name, handle_edge_irq, clr, 0, flags);
154 if (ret) { 165 if (ret) {
155 pr_err("failed to allocate generic irq chip\n"); 166 pr_err("failed to allocate generic irq chip\n");
156 goto out_free_domain; 167 goto out_free_domain;
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 31e231e1f566..53bb7326a60a 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -50,6 +50,21 @@ static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
50 chained_irq_exit(chip, desc); 50 chained_irq_exit(chip, desc);
51} 51}
52 52
53#ifdef CONFIG_PM
54static void dw_apb_ictl_resume(struct irq_data *d)
55{
56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
57 struct irq_chip_type *ct = irq_data_get_chip_type(d);
58
59 irq_gc_lock(gc);
60 writel_relaxed(~0, gc->reg_base + ct->regs.enable);
61 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
62 irq_gc_unlock(gc);
63}
64#else
65#define dw_apb_ictl_resume NULL
66#endif /* CONFIG_PM */
67
53static int __init dw_apb_ictl_init(struct device_node *np, 68static int __init dw_apb_ictl_init(struct device_node *np,
54 struct device_node *parent) 69 struct device_node *parent)
55{ 70{
@@ -94,16 +109,16 @@ static int __init dw_apb_ictl_init(struct device_node *np,
94 */ 109 */
95 110
96 /* mask and enable all interrupts */ 111 /* mask and enable all interrupts */
97 writel(~0, iobase + APB_INT_MASK_L); 112 writel_relaxed(~0, iobase + APB_INT_MASK_L);
98 writel(~0, iobase + APB_INT_MASK_H); 113 writel_relaxed(~0, iobase + APB_INT_MASK_H);
99 writel(~0, iobase + APB_INT_ENABLE_L); 114 writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
100 writel(~0, iobase + APB_INT_ENABLE_H); 115 writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
101 116
102 reg = readl(iobase + APB_INT_ENABLE_H); 117 reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
103 if (reg) 118 if (reg)
104 nrirqs = 32 + fls(reg); 119 nrirqs = 32 + fls(reg);
105 else 120 else
106 nrirqs = fls(readl(iobase + APB_INT_ENABLE_L)); 121 nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
107 122
108 domain = irq_domain_add_linear(np, nrirqs, 123 domain = irq_domain_add_linear(np, nrirqs,
109 &irq_generic_chip_ops, NULL); 124 &irq_generic_chip_ops, NULL);
@@ -115,6 +130,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
115 130
116 ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1, 131 ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
117 np->name, handle_level_irq, clr, 0, 132 np->name, handle_level_irq, clr, 0,
133 IRQ_GC_MASK_CACHE_PER_TYPE |
118 IRQ_GC_INIT_MASK_CACHE); 134 IRQ_GC_INIT_MASK_CACHE);
119 if (ret) { 135 if (ret) {
120 pr_err("%s: unable to alloc irq domain gc\n", np->full_name); 136 pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
@@ -126,13 +142,17 @@ static int __init dw_apb_ictl_init(struct device_node *np,
126 gc->reg_base = iobase; 142 gc->reg_base = iobase;
127 143
128 gc->chip_types[0].regs.mask = APB_INT_MASK_L; 144 gc->chip_types[0].regs.mask = APB_INT_MASK_L;
145 gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
129 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; 146 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
130 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; 147 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
148 gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
131 149
132 if (nrirqs > 32) { 150 if (nrirqs > 32) {
133 gc->chip_types[1].regs.mask = APB_INT_MASK_H; 151 gc->chip_types[1].regs.mask = APB_INT_MASK_H;
152 gc->chip_types[1].regs.enable = APB_INT_ENABLE_H;
134 gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit; 153 gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
135 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit; 154 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
155 gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume;
136 } 156 }
137 157
138 irq_set_handler_data(irq, gc); 158 irq_set_handler_data(irq, gc);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 38493ff28fa5..7f9be0785c6a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1041,6 +1041,8 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1041 return 0; 1041 return 0;
1042} 1042}
1043IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); 1043IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1044IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1045IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1044IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1046IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1045IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1047IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1046IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1048IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9c8f833522e6..29b8f21b74d0 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -176,8 +176,7 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
176 irqnr = irqstat & GICC_IAR_INT_ID_MASK; 176 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
177 177
178 if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) { 178 if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
179 irqnr = irq_find_mapping(hip04_data.domain, irqnr); 179 handle_domain_irq(hip04_data.domain, irqnr, regs);
180 handle_IRQ(irqnr, regs);
181 continue; 180 continue;
182 } 181 }
183 if (irqnr < 16) { 182 if (irqnr < 16) {
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 12f547a44ae4..4a9ce5b50c5b 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -50,12 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
50static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, 50static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
51 u32 val) 51 u32 val)
52{ 52{
53 irq_reg_writel(val, gc->reg_base + off); 53 irq_reg_writel(gc, val, off);
54} 54}
55 55
56static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) 56static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
57{ 57{
58 return irq_reg_readl(gc->reg_base + off); 58 return irq_reg_readl(gc, off);
59} 59}
60 60
61static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) 61static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 7c44c99bf1f2..accc20036a3c 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -43,12 +43,12 @@
43static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, 43static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
44 u32 val) 44 u32 val)
45{ 45{
46 irq_reg_writel(val, gc->reg_base + reg); 46 irq_reg_writel(gc, val, reg);
47} 47}
48 48
49static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) 49static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
50{ 50{
51 return irq_reg_readl(gc->reg_base + reg); 51 return irq_reg_readl(gc, reg);
52} 52}
53 53
54static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) 54static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index b4518c8751c8..868e6fc17cba 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -12,25 +12,23 @@
12 */ 12 */
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/gpio.h> 14#include <linux/gpio.h>
15#include <linux/gpio/consumer.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/leds.h> 17#include <linux/leds.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_gpio.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/property.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24 23
25struct gpio_led_data { 24struct gpio_led_data {
26 struct led_classdev cdev; 25 struct led_classdev cdev;
27 unsigned gpio; 26 struct gpio_desc *gpiod;
28 struct work_struct work; 27 struct work_struct work;
29 u8 new_level; 28 u8 new_level;
30 u8 can_sleep; 29 u8 can_sleep;
31 u8 active_low;
32 u8 blinking; 30 u8 blinking;
33 int (*platform_gpio_blink_set)(unsigned gpio, int state, 31 int (*platform_gpio_blink_set)(struct gpio_desc *desc, int state,
34 unsigned long *delay_on, unsigned long *delay_off); 32 unsigned long *delay_on, unsigned long *delay_off);
35}; 33};
36 34
@@ -40,12 +38,11 @@ static void gpio_led_work(struct work_struct *work)
40 container_of(work, struct gpio_led_data, work); 38 container_of(work, struct gpio_led_data, work);
41 39
42 if (led_dat->blinking) { 40 if (led_dat->blinking) {
43 led_dat->platform_gpio_blink_set(led_dat->gpio, 41 led_dat->platform_gpio_blink_set(led_dat->gpiod,
44 led_dat->new_level, 42 led_dat->new_level, NULL, NULL);
45 NULL, NULL);
46 led_dat->blinking = 0; 43 led_dat->blinking = 0;
47 } else 44 } else
48 gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); 45 gpiod_set_value_cansleep(led_dat->gpiod, led_dat->new_level);
49} 46}
50 47
51static void gpio_led_set(struct led_classdev *led_cdev, 48static void gpio_led_set(struct led_classdev *led_cdev,
@@ -60,9 +57,6 @@ static void gpio_led_set(struct led_classdev *led_cdev,
60 else 57 else
61 level = 1; 58 level = 1;
62 59
63 if (led_dat->active_low)
64 level = !level;
65
66 /* Setting GPIOs with I2C/etc requires a task context, and we don't 60 /* Setting GPIOs with I2C/etc requires a task context, and we don't
67 * seem to have a reliable way to know if we're already in one; so 61 * seem to have a reliable way to know if we're already in one; so
68 * let's just assume the worst. 62 * let's just assume the worst.
@@ -72,11 +66,11 @@ static void gpio_led_set(struct led_classdev *led_cdev,
72 schedule_work(&led_dat->work); 66 schedule_work(&led_dat->work);
73 } else { 67 } else {
74 if (led_dat->blinking) { 68 if (led_dat->blinking) {
75 led_dat->platform_gpio_blink_set(led_dat->gpio, level, 69 led_dat->platform_gpio_blink_set(led_dat->gpiod, level,
76 NULL, NULL); 70 NULL, NULL);
77 led_dat->blinking = 0; 71 led_dat->blinking = 0;
78 } else 72 } else
79 gpio_set_value(led_dat->gpio, level); 73 gpiod_set_value(led_dat->gpiod, level);
80 } 74 }
81} 75}
82 76
@@ -87,34 +81,49 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
87 container_of(led_cdev, struct gpio_led_data, cdev); 81 container_of(led_cdev, struct gpio_led_data, cdev);
88 82
89 led_dat->blinking = 1; 83 led_dat->blinking = 1;
90 return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK, 84 return led_dat->platform_gpio_blink_set(led_dat->gpiod, GPIO_LED_BLINK,
91 delay_on, delay_off); 85 delay_on, delay_off);
92} 86}
93 87
94static int create_gpio_led(const struct gpio_led *template, 88static int create_gpio_led(const struct gpio_led *template,
95 struct gpio_led_data *led_dat, struct device *parent, 89 struct gpio_led_data *led_dat, struct device *parent,
96 int (*blink_set)(unsigned, int, unsigned long *, unsigned long *)) 90 int (*blink_set)(struct gpio_desc *, int, unsigned long *,
91 unsigned long *))
97{ 92{
98 int ret, state; 93 int ret, state;
99 94
100 led_dat->gpio = -1; 95 led_dat->gpiod = template->gpiod;
96 if (!led_dat->gpiod) {
97 /*
98 * This is the legacy code path for platform code that
99 * still uses GPIO numbers. Ultimately we would like to get
100 * rid of this block completely.
101 */
102 unsigned long flags = 0;
103
104 /* skip leds that aren't available */
105 if (!gpio_is_valid(template->gpio)) {
106 dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
107 template->gpio, template->name);
108 return 0;
109 }
101 110
102 /* skip leds that aren't available */ 111 if (template->active_low)
103 if (!gpio_is_valid(template->gpio)) { 112 flags |= GPIOF_ACTIVE_LOW;
104 dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
105 template->gpio, template->name);
106 return 0;
107 }
108 113
109 ret = devm_gpio_request(parent, template->gpio, template->name); 114 ret = devm_gpio_request_one(parent, template->gpio, flags,
110 if (ret < 0) 115 template->name);
111 return ret; 116 if (ret < 0)
117 return ret;
118
119 led_dat->gpiod = gpio_to_desc(template->gpio);
120 if (IS_ERR(led_dat->gpiod))
121 return PTR_ERR(led_dat->gpiod);
122 }
112 123
113 led_dat->cdev.name = template->name; 124 led_dat->cdev.name = template->name;
114 led_dat->cdev.default_trigger = template->default_trigger; 125 led_dat->cdev.default_trigger = template->default_trigger;
115 led_dat->gpio = template->gpio; 126 led_dat->can_sleep = gpiod_cansleep(led_dat->gpiod);
116 led_dat->can_sleep = gpio_cansleep(template->gpio);
117 led_dat->active_low = template->active_low;
118 led_dat->blinking = 0; 127 led_dat->blinking = 0;
119 if (blink_set) { 128 if (blink_set) {
120 led_dat->platform_gpio_blink_set = blink_set; 129 led_dat->platform_gpio_blink_set = blink_set;
@@ -122,30 +131,24 @@ static int create_gpio_led(const struct gpio_led *template,
122 } 131 }
123 led_dat->cdev.brightness_set = gpio_led_set; 132 led_dat->cdev.brightness_set = gpio_led_set;
124 if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) 133 if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
125 state = !!gpio_get_value_cansleep(led_dat->gpio) ^ led_dat->active_low; 134 state = !!gpiod_get_value_cansleep(led_dat->gpiod);
126 else 135 else
127 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON); 136 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
128 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF; 137 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
129 if (!template->retain_state_suspended) 138 if (!template->retain_state_suspended)
130 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 139 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
131 140
132 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); 141 ret = gpiod_direction_output(led_dat->gpiod, state);
133 if (ret < 0) 142 if (ret < 0)
134 return ret; 143 return ret;
135 144
136 INIT_WORK(&led_dat->work, gpio_led_work); 145 INIT_WORK(&led_dat->work, gpio_led_work);
137 146
138 ret = led_classdev_register(parent, &led_dat->cdev); 147 return led_classdev_register(parent, &led_dat->cdev);
139 if (ret < 0)
140 return ret;
141
142 return 0;
143} 148}
144 149
145static void delete_gpio_led(struct gpio_led_data *led) 150static void delete_gpio_led(struct gpio_led_data *led)
146{ 151{
147 if (!gpio_is_valid(led->gpio))
148 return;
149 led_classdev_unregister(&led->cdev); 152 led_classdev_unregister(&led->cdev);
150 cancel_work_sync(&led->work); 153 cancel_work_sync(&led->work);
151} 154}
@@ -161,40 +164,47 @@ static inline int sizeof_gpio_leds_priv(int num_leds)
161 (sizeof(struct gpio_led_data) * num_leds); 164 (sizeof(struct gpio_led_data) * num_leds);
162} 165}
163 166
164/* Code to create from OpenFirmware platform devices */ 167static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
165#ifdef CONFIG_OF_GPIO
166static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
167{ 168{
168 struct device_node *np = pdev->dev.of_node, *child; 169 struct device *dev = &pdev->dev;
170 struct fwnode_handle *child;
169 struct gpio_leds_priv *priv; 171 struct gpio_leds_priv *priv;
170 int count, ret; 172 int count, ret;
173 struct device_node *np;
171 174
172 /* count LEDs in this device, so we know how much to allocate */ 175 count = device_get_child_node_count(dev);
173 count = of_get_available_child_count(np);
174 if (!count) 176 if (!count)
175 return ERR_PTR(-ENODEV); 177 return ERR_PTR(-ENODEV);
176 178
177 for_each_available_child_of_node(np, child) 179 priv = devm_kzalloc(dev, sizeof_gpio_leds_priv(count), GFP_KERNEL);
178 if (of_get_gpio(child, 0) == -EPROBE_DEFER)
179 return ERR_PTR(-EPROBE_DEFER);
180
181 priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(count),
182 GFP_KERNEL);
183 if (!priv) 180 if (!priv)
184 return ERR_PTR(-ENOMEM); 181 return ERR_PTR(-ENOMEM);
185 182
186 for_each_available_child_of_node(np, child) { 183 device_for_each_child_node(dev, child) {
187 struct gpio_led led = {}; 184 struct gpio_led led = {};
188 enum of_gpio_flags flags; 185 const char *state = NULL;
189 const char *state; 186
190 187 led.gpiod = devm_get_gpiod_from_child(dev, child);
191 led.gpio = of_get_gpio_flags(child, 0, &flags); 188 if (IS_ERR(led.gpiod)) {
192 led.active_low = flags & OF_GPIO_ACTIVE_LOW; 189 fwnode_handle_put(child);
193 led.name = of_get_property(child, "label", NULL) ? : child->name; 190 goto err;
194 led.default_trigger = 191 }
195 of_get_property(child, "linux,default-trigger", NULL); 192
196 state = of_get_property(child, "default-state", NULL); 193 np = of_node(child);
197 if (state) { 194
195 if (fwnode_property_present(child, "label")) {
196 fwnode_property_read_string(child, "label", &led.name);
197 } else {
198 if (IS_ENABLED(CONFIG_OF) && !led.name && np)
199 led.name = np->name;
200 if (!led.name)
201 return ERR_PTR(-EINVAL);
202 }
203 fwnode_property_read_string(child, "linux,default-trigger",
204 &led.default_trigger);
205
206 if (!fwnode_property_read_string(child, "linux,default_state",
207 &state)) {
198 if (!strcmp(state, "keep")) 208 if (!strcmp(state, "keep"))
199 led.default_state = LEDS_GPIO_DEFSTATE_KEEP; 209 led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
200 else if (!strcmp(state, "on")) 210 else if (!strcmp(state, "on"))
@@ -203,13 +213,13 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
203 led.default_state = LEDS_GPIO_DEFSTATE_OFF; 213 led.default_state = LEDS_GPIO_DEFSTATE_OFF;
204 } 214 }
205 215
206 if (of_get_property(child, "retain-state-suspended", NULL)) 216 if (fwnode_property_present(child, "retain-state-suspended"))
207 led.retain_state_suspended = 1; 217 led.retain_state_suspended = 1;
208 218
209 ret = create_gpio_led(&led, &priv->leds[priv->num_leds++], 219 ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
210 &pdev->dev, NULL); 220 dev, NULL);
211 if (ret < 0) { 221 if (ret < 0) {
212 of_node_put(child); 222 fwnode_handle_put(child);
213 goto err; 223 goto err;
214 } 224 }
215 } 225 }
@@ -228,12 +238,6 @@ static const struct of_device_id of_gpio_leds_match[] = {
228}; 238};
229 239
230MODULE_DEVICE_TABLE(of, of_gpio_leds_match); 240MODULE_DEVICE_TABLE(of, of_gpio_leds_match);
231#else /* CONFIG_OF_GPIO */
232static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
233{
234 return ERR_PTR(-ENODEV);
235}
236#endif /* CONFIG_OF_GPIO */
237 241
238static int gpio_led_probe(struct platform_device *pdev) 242static int gpio_led_probe(struct platform_device *pdev)
239{ 243{
@@ -261,7 +265,7 @@ static int gpio_led_probe(struct platform_device *pdev)
261 } 265 }
262 } 266 }
263 } else { 267 } else {
264 priv = gpio_leds_create_of(pdev); 268 priv = gpio_leds_create(pdev);
265 if (IS_ERR(priv)) 269 if (IS_ERR(priv))
266 return PTR_ERR(priv); 270 return PTR_ERR(priv);
267 } 271 }
@@ -288,7 +292,7 @@ static struct platform_driver gpio_led_driver = {
288 .driver = { 292 .driver = {
289 .name = "leds-gpio", 293 .name = "leds-gpio",
290 .owner = THIS_MODULE, 294 .owner = THIS_MODULE,
291 .of_match_table = of_match_ptr(of_gpio_leds_match), 295 .of_match_table = of_gpio_leds_match,
292 }, 296 },
293}; 297};
294 298
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index d0a1d8a45c81..89088d6538fd 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -94,7 +94,7 @@ static unsigned desc_size(const struct lguest_device_desc *desc)
94} 94}
95 95
96/* This gets the device's feature bits. */ 96/* This gets the device's feature bits. */
97static u32 lg_get_features(struct virtio_device *vdev) 97static u64 lg_get_features(struct virtio_device *vdev)
98{ 98{
99 unsigned int i; 99 unsigned int i;
100 u32 features = 0; 100 u32 features = 0;
@@ -126,7 +126,7 @@ static void status_notify(struct virtio_device *vdev)
126 * sorted out, this routine is called so we can tell the Host which features we 126 * sorted out, this routine is called so we can tell the Host which features we
127 * understand and accept. 127 * understand and accept.
128 */ 128 */
129static void lg_finalize_features(struct virtio_device *vdev) 129static int lg_finalize_features(struct virtio_device *vdev)
130{ 130{
131 unsigned int i, bits; 131 unsigned int i, bits;
132 struct lguest_device_desc *desc = to_lgdev(vdev)->desc; 132 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
@@ -136,20 +136,25 @@ static void lg_finalize_features(struct virtio_device *vdev)
136 /* Give virtio_ring a chance to accept features. */ 136 /* Give virtio_ring a chance to accept features. */
137 vring_transport_features(vdev); 137 vring_transport_features(vdev);
138 138
139 /* Make sure we don't have any features > 32 bits! */
140 BUG_ON((u32)vdev->features != vdev->features);
141
139 /* 142 /*
140 * The vdev->feature array is a Linux bitmask: this isn't the same as a 143 * Since lguest is currently x86-only, we're little-endian. That
141 * the simple array of bits used by lguest devices for features. So we 144 * means we could just memcpy. But it's not time critical, and in
142 * do this slow, manual conversion which is completely general. 145 * case someone copies this code, we do it the slow, obvious way.
143 */ 146 */
144 memset(out_features, 0, desc->feature_len); 147 memset(out_features, 0, desc->feature_len);
145 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; 148 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
146 for (i = 0; i < bits; i++) { 149 for (i = 0; i < bits; i++) {
147 if (test_bit(i, vdev->features)) 150 if (__virtio_test_bit(vdev, i))
148 out_features[i / 8] |= (1 << (i % 8)); 151 out_features[i / 8] |= (1 << (i % 8));
149 } 152 }
150 153
151 /* Tell Host we've finished with this device's feature negotiation */ 154 /* Tell Host we've finished with this device's feature negotiation */
152 status_notify(vdev); 155 status_notify(vdev);
156
157 return 0;
153} 158}
154 159
155/* Once they've found a field, getting a copy of it is easy. */ 160/* Once they've found a field, getting a copy of it is easy. */
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 9fd9c6717e0c..c04fed9eb15d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -33,4 +33,16 @@ config OMAP_MBOX_KFIFO_SIZE
33 Specify the default size of mailbox's kfifo buffers (bytes). 33 Specify the default size of mailbox's kfifo buffers (bytes).
34 This can also be changed at runtime (via the mbox_kfifo_size 34 This can also be changed at runtime (via the mbox_kfifo_size
35 module parameter). 35 module parameter).
36
37config PCC
38 bool "Platform Communication Channel Driver"
39 depends on ACPI
40 help
41 ACPI 5.0+ spec defines a generic mode of communication
42 between the OS and a platform such as the BMC. This medium
43 (PCC) is typically used by CPPC (ACPI CPU Performance management),
44 RAS (ACPI reliability protocol) and MPST (ACPI Memory power
45 states). Select this driver if your platform implements the
46 PCC clients mentioned above.
47
36endif 48endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 94ed7cefb14d..dd412c22208b 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_MAILBOX) += mailbox.o
5obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o 5obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
6 6
7obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o 7obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
8
9obj-$(CONFIG_PCC) += pcc.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index afcb430508ec..59aad4d5da53 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -21,13 +21,13 @@
21#include <linux/mailbox_client.h> 21#include <linux/mailbox_client.h>
22#include <linux/mailbox_controller.h> 22#include <linux/mailbox_controller.h>
23 23
24#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ 24#include "mailbox.h"
25#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
26#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
27 25
28static LIST_HEAD(mbox_cons); 26static LIST_HEAD(mbox_cons);
29static DEFINE_MUTEX(con_mutex); 27static DEFINE_MUTEX(con_mutex);
30 28
29static void poll_txdone(unsigned long data);
30
31static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 31static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
32{ 32{
33 int idx; 33 int idx;
@@ -60,7 +60,7 @@ static void msg_submit(struct mbox_chan *chan)
60 unsigned count, idx; 60 unsigned count, idx;
61 unsigned long flags; 61 unsigned long flags;
62 void *data; 62 void *data;
63 int err; 63 int err = -EBUSY;
64 64
65 spin_lock_irqsave(&chan->lock, flags); 65 spin_lock_irqsave(&chan->lock, flags);
66 66
@@ -76,6 +76,8 @@ static void msg_submit(struct mbox_chan *chan)
76 76
77 data = chan->msg_data[idx]; 77 data = chan->msg_data[idx];
78 78
79 if (chan->cl->tx_prepare)
80 chan->cl->tx_prepare(chan->cl, data);
79 /* Try to submit a message to the MBOX controller */ 81 /* Try to submit a message to the MBOX controller */
80 err = chan->mbox->ops->send_data(chan, data); 82 err = chan->mbox->ops->send_data(chan, data);
81 if (!err) { 83 if (!err) {
@@ -84,6 +86,9 @@ static void msg_submit(struct mbox_chan *chan)
84 } 86 }
85exit: 87exit:
86 spin_unlock_irqrestore(&chan->lock, flags); 88 spin_unlock_irqrestore(&chan->lock, flags);
89
90 if (!err && chan->txdone_method == TXDONE_BY_POLL)
91 poll_txdone((unsigned long)chan->mbox);
87} 92}
88 93
89static void tx_tick(struct mbox_chan *chan, int r) 94static void tx_tick(struct mbox_chan *chan, int r)
@@ -117,10 +122,11 @@ static void poll_txdone(unsigned long data)
117 struct mbox_chan *chan = &mbox->chans[i]; 122 struct mbox_chan *chan = &mbox->chans[i];
118 123
119 if (chan->active_req && chan->cl) { 124 if (chan->active_req && chan->cl) {
120 resched = true;
121 txdone = chan->mbox->ops->last_tx_done(chan); 125 txdone = chan->mbox->ops->last_tx_done(chan);
122 if (txdone) 126 if (txdone)
123 tx_tick(chan, 0); 127 tx_tick(chan, 0);
128 else
129 resched = true;
124 } 130 }
125 } 131 }
126 132
@@ -252,9 +258,6 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
252 258
253 msg_submit(chan); 259 msg_submit(chan);
254 260
255 if (chan->txdone_method == TXDONE_BY_POLL)
256 poll_txdone((unsigned long)chan->mbox);
257
258 if (chan->cl->tx_block && chan->active_req) { 261 if (chan->cl->tx_block && chan->active_req) {
259 unsigned long wait; 262 unsigned long wait;
260 int ret; 263 int ret;
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
new file mode 100644
index 000000000000..456ba68513bb
--- /dev/null
+++ b/drivers/mailbox/mailbox.h
@@ -0,0 +1,14 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#ifndef __MAILBOX_H
8#define __MAILBOX_H
9
10#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
11#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
12#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
13
14#endif /* __MAILBOX_H */
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index bcc7ee129276..66b83ca94dcf 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -29,13 +29,14 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/kfifo.h> 30#include <linux/kfifo.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/notifier.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/of_device.h> 33#include <linux/of_device.h>
35#include <linux/platform_device.h> 34#include <linux/platform_device.h>
36#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
37#include <linux/platform_data/mailbox-omap.h> 36#include <linux/platform_data/mailbox-omap.h>
38#include <linux/omap-mailbox.h> 37#include <linux/omap-mailbox.h>
38#include <linux/mailbox_controller.h>
39#include <linux/mailbox_client.h>
39 40
40#define MAILBOX_REVISION 0x000 41#define MAILBOX_REVISION 0x000
41#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) 42#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
@@ -80,7 +81,6 @@ struct omap_mbox_queue {
80 spinlock_t lock; 81 spinlock_t lock;
81 struct kfifo fifo; 82 struct kfifo fifo;
82 struct work_struct work; 83 struct work_struct work;
83 struct tasklet_struct tasklet;
84 struct omap_mbox *mbox; 84 struct omap_mbox *mbox;
85 bool full; 85 bool full;
86}; 86};
@@ -92,6 +92,7 @@ struct omap_mbox_device {
92 u32 num_users; 92 u32 num_users;
93 u32 num_fifos; 93 u32 num_fifos;
94 struct omap_mbox **mboxes; 94 struct omap_mbox **mboxes;
95 struct mbox_controller controller;
95 struct list_head elem; 96 struct list_head elem;
96}; 97};
97 98
@@ -110,15 +111,14 @@ struct omap_mbox_fifo_info {
110struct omap_mbox { 111struct omap_mbox {
111 const char *name; 112 const char *name;
112 int irq; 113 int irq;
113 struct omap_mbox_queue *txq, *rxq; 114 struct omap_mbox_queue *rxq;
114 struct device *dev; 115 struct device *dev;
115 struct omap_mbox_device *parent; 116 struct omap_mbox_device *parent;
116 struct omap_mbox_fifo tx_fifo; 117 struct omap_mbox_fifo tx_fifo;
117 struct omap_mbox_fifo rx_fifo; 118 struct omap_mbox_fifo rx_fifo;
118 u32 ctx[OMAP4_MBOX_NR_REGS]; 119 u32 ctx[OMAP4_MBOX_NR_REGS];
119 u32 intr_type; 120 u32 intr_type;
120 int use_count; 121 struct mbox_chan *chan;
121 struct blocking_notifier_head notifier;
122}; 122};
123 123
124/* global variables for the mailbox devices */ 124/* global variables for the mailbox devices */
@@ -129,6 +129,14 @@ static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
129module_param(mbox_kfifo_size, uint, S_IRUGO); 129module_param(mbox_kfifo_size, uint, S_IRUGO);
130MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)"); 130MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
131 131
132static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
133{
134 if (!chan || !chan->con_priv)
135 return NULL;
136
137 return (struct omap_mbox *)chan->con_priv;
138}
139
132static inline 140static inline
133unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs) 141unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
134{ 142{
@@ -194,41 +202,14 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
194 return (int)(enable & status & bit); 202 return (int)(enable & status & bit);
195} 203}
196 204
197/* 205void omap_mbox_save_ctx(struct mbox_chan *chan)
198 * message sender
199 */
200int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
201{
202 struct omap_mbox_queue *mq = mbox->txq;
203 int ret = 0, len;
204
205 spin_lock_bh(&mq->lock);
206
207 if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
208 ret = -ENOMEM;
209 goto out;
210 }
211
212 if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
213 mbox_fifo_write(mbox, msg);
214 goto out;
215 }
216
217 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
218 WARN_ON(len != sizeof(msg));
219
220 tasklet_schedule(&mbox->txq->tasklet);
221
222out:
223 spin_unlock_bh(&mq->lock);
224 return ret;
225}
226EXPORT_SYMBOL(omap_mbox_msg_send);
227
228void omap_mbox_save_ctx(struct omap_mbox *mbox)
229{ 206{
230 int i; 207 int i;
231 int nr_regs; 208 int nr_regs;
209 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
210
211 if (WARN_ON(!mbox))
212 return;
232 213
233 if (mbox->intr_type) 214 if (mbox->intr_type)
234 nr_regs = OMAP4_MBOX_NR_REGS; 215 nr_regs = OMAP4_MBOX_NR_REGS;
@@ -243,10 +224,14 @@ void omap_mbox_save_ctx(struct omap_mbox *mbox)
243} 224}
244EXPORT_SYMBOL(omap_mbox_save_ctx); 225EXPORT_SYMBOL(omap_mbox_save_ctx);
245 226
246void omap_mbox_restore_ctx(struct omap_mbox *mbox) 227void omap_mbox_restore_ctx(struct mbox_chan *chan)
247{ 228{
248 int i; 229 int i;
249 int nr_regs; 230 int nr_regs;
231 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
232
233 if (WARN_ON(!mbox))
234 return;
250 235
251 if (mbox->intr_type) 236 if (mbox->intr_type)
252 nr_regs = OMAP4_MBOX_NR_REGS; 237 nr_regs = OMAP4_MBOX_NR_REGS;
@@ -254,14 +239,13 @@ void omap_mbox_restore_ctx(struct omap_mbox *mbox)
254 nr_regs = MBOX_NR_REGS; 239 nr_regs = MBOX_NR_REGS;
255 for (i = 0; i < nr_regs; i++) { 240 for (i = 0; i < nr_regs; i++) {
256 mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32)); 241 mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
257
258 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, 242 dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
259 i, mbox->ctx[i]); 243 i, mbox->ctx[i]);
260 } 244 }
261} 245}
262EXPORT_SYMBOL(omap_mbox_restore_ctx); 246EXPORT_SYMBOL(omap_mbox_restore_ctx);
263 247
264void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 248static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
265{ 249{
266 u32 l; 250 u32 l;
267 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 251 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
@@ -273,9 +257,8 @@ void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
273 l |= bit; 257 l |= bit;
274 mbox_write_reg(mbox->parent, l, irqenable); 258 mbox_write_reg(mbox->parent, l, irqenable);
275} 259}
276EXPORT_SYMBOL(omap_mbox_enable_irq);
277 260
278void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) 261static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
279{ 262{
280 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? 263 struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
281 &mbox->tx_fifo : &mbox->rx_fifo; 264 &mbox->tx_fifo : &mbox->rx_fifo;
@@ -291,28 +274,28 @@ void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
291 274
292 mbox_write_reg(mbox->parent, bit, irqdisable); 275 mbox_write_reg(mbox->parent, bit, irqdisable);
293} 276}
294EXPORT_SYMBOL(omap_mbox_disable_irq);
295 277
296static void mbox_tx_tasklet(unsigned long tx_data) 278void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
297{ 279{
298 struct omap_mbox *mbox = (struct omap_mbox *)tx_data; 280 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
299 struct omap_mbox_queue *mq = mbox->txq;
300 mbox_msg_t msg;
301 int ret;
302 281
303 while (kfifo_len(&mq->fifo)) { 282 if (WARN_ON(!mbox))
304 if (mbox_fifo_full(mbox)) { 283 return;
305 omap_mbox_enable_irq(mbox, IRQ_TX);
306 break;
307 }
308 284
309 ret = kfifo_out(&mq->fifo, (unsigned char *)&msg, 285 _omap_mbox_enable_irq(mbox, irq);
310 sizeof(msg)); 286}
311 WARN_ON(ret != sizeof(msg)); 287EXPORT_SYMBOL(omap_mbox_enable_irq);
312 288
313 mbox_fifo_write(mbox, msg); 289void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
314 } 290{
291 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
292
293 if (WARN_ON(!mbox))
294 return;
295
296 _omap_mbox_disable_irq(mbox, irq);
315} 297}
298EXPORT_SYMBOL(omap_mbox_disable_irq);
316 299
317/* 300/*
318 * Message receiver(workqueue) 301 * Message receiver(workqueue)
@@ -328,12 +311,11 @@ static void mbox_rx_work(struct work_struct *work)
328 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); 311 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
329 WARN_ON(len != sizeof(msg)); 312 WARN_ON(len != sizeof(msg));
330 313
331 blocking_notifier_call_chain(&mq->mbox->notifier, len, 314 mbox_chan_received_data(mq->mbox->chan, (void *)msg);
332 (void *)msg);
333 spin_lock_irq(&mq->lock); 315 spin_lock_irq(&mq->lock);
334 if (mq->full) { 316 if (mq->full) {
335 mq->full = false; 317 mq->full = false;
336 omap_mbox_enable_irq(mq->mbox, IRQ_RX); 318 _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
337 } 319 }
338 spin_unlock_irq(&mq->lock); 320 spin_unlock_irq(&mq->lock);
339 } 321 }
@@ -344,9 +326,9 @@ static void mbox_rx_work(struct work_struct *work)
344 */ 326 */
345static void __mbox_tx_interrupt(struct omap_mbox *mbox) 327static void __mbox_tx_interrupt(struct omap_mbox *mbox)
346{ 328{
347 omap_mbox_disable_irq(mbox, IRQ_TX); 329 _omap_mbox_disable_irq(mbox, IRQ_TX);
348 ack_mbox_irq(mbox, IRQ_TX); 330 ack_mbox_irq(mbox, IRQ_TX);
349 tasklet_schedule(&mbox->txq->tasklet); 331 mbox_chan_txdone(mbox->chan, 0);
350} 332}
351 333
352static void __mbox_rx_interrupt(struct omap_mbox *mbox) 334static void __mbox_rx_interrupt(struct omap_mbox *mbox)
@@ -357,7 +339,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
357 339
358 while (!mbox_fifo_empty(mbox)) { 340 while (!mbox_fifo_empty(mbox)) {
359 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) { 341 if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
360 omap_mbox_disable_irq(mbox, IRQ_RX); 342 _omap_mbox_disable_irq(mbox, IRQ_RX);
361 mq->full = true; 343 mq->full = true;
362 goto nomem; 344 goto nomem;
363 } 345 }
@@ -388,11 +370,13 @@ static irqreturn_t mbox_interrupt(int irq, void *p)
388} 370}
389 371
390static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, 372static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
391 void (*work) (struct work_struct *), 373 void (*work)(struct work_struct *))
392 void (*tasklet)(unsigned long))
393{ 374{
394 struct omap_mbox_queue *mq; 375 struct omap_mbox_queue *mq;
395 376
377 if (!work)
378 return NULL;
379
396 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); 380 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
397 if (!mq) 381 if (!mq)
398 return NULL; 382 return NULL;
@@ -402,12 +386,9 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
402 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL)) 386 if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
403 goto error; 387 goto error;
404 388
405 if (work) 389 INIT_WORK(&mq->work, work);
406 INIT_WORK(&mq->work, work);
407
408 if (tasklet)
409 tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
410 return mq; 390 return mq;
391
411error: 392error:
412 kfree(mq); 393 kfree(mq);
413 return NULL; 394 return NULL;
@@ -423,71 +404,35 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
423{ 404{
424 int ret = 0; 405 int ret = 0;
425 struct omap_mbox_queue *mq; 406 struct omap_mbox_queue *mq;
426 struct omap_mbox_device *mdev = mbox->parent;
427 407
428 mutex_lock(&mdev->cfg_lock); 408 mq = mbox_queue_alloc(mbox, mbox_rx_work);
429 ret = pm_runtime_get_sync(mdev->dev); 409 if (!mq)
430 if (unlikely(ret < 0)) 410 return -ENOMEM;
431 goto fail_startup; 411 mbox->rxq = mq;
432 412 mq->mbox = mbox;
433 if (!mbox->use_count++) { 413
434 mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet); 414 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
435 if (!mq) { 415 mbox->name, mbox);
436 ret = -ENOMEM; 416 if (unlikely(ret)) {
437 goto fail_alloc_txq; 417 pr_err("failed to register mailbox interrupt:%d\n", ret);
438 } 418 goto fail_request_irq;
439 mbox->txq = mq; 419 }
440 420
441 mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL); 421 _omap_mbox_enable_irq(mbox, IRQ_RX);
442 if (!mq) {
443 ret = -ENOMEM;
444 goto fail_alloc_rxq;
445 }
446 mbox->rxq = mq;
447 mq->mbox = mbox;
448 ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
449 mbox->name, mbox);
450 if (unlikely(ret)) {
451 pr_err("failed to register mailbox interrupt:%d\n",
452 ret);
453 goto fail_request_irq;
454 }
455 422
456 omap_mbox_enable_irq(mbox, IRQ_RX);
457 }
458 mutex_unlock(&mdev->cfg_lock);
459 return 0; 423 return 0;
460 424
461fail_request_irq: 425fail_request_irq:
462 mbox_queue_free(mbox->rxq); 426 mbox_queue_free(mbox->rxq);
463fail_alloc_rxq:
464 mbox_queue_free(mbox->txq);
465fail_alloc_txq:
466 pm_runtime_put_sync(mdev->dev);
467 mbox->use_count--;
468fail_startup:
469 mutex_unlock(&mdev->cfg_lock);
470 return ret; 427 return ret;
471} 428}
472 429
473static void omap_mbox_fini(struct omap_mbox *mbox) 430static void omap_mbox_fini(struct omap_mbox *mbox)
474{ 431{
475 struct omap_mbox_device *mdev = mbox->parent; 432 _omap_mbox_disable_irq(mbox, IRQ_RX);
476 433 free_irq(mbox->irq, mbox);
477 mutex_lock(&mdev->cfg_lock); 434 flush_work(&mbox->rxq->work);
478 435 mbox_queue_free(mbox->rxq);
479 if (!--mbox->use_count) {
480 omap_mbox_disable_irq(mbox, IRQ_RX);
481 free_irq(mbox->irq, mbox);
482 tasklet_kill(&mbox->txq->tasklet);
483 flush_work(&mbox->rxq->work);
484 mbox_queue_free(mbox->txq);
485 mbox_queue_free(mbox->rxq);
486 }
487
488 pm_runtime_put_sync(mdev->dev);
489
490 mutex_unlock(&mdev->cfg_lock);
491} 436}
492 437
493static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev, 438static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
@@ -509,42 +454,55 @@ static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
509 return mbox; 454 return mbox;
510} 455}
511 456
512struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) 457struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
458 const char *chan_name)
513{ 459{
460 struct device *dev = cl->dev;
514 struct omap_mbox *mbox = NULL; 461 struct omap_mbox *mbox = NULL;
515 struct omap_mbox_device *mdev; 462 struct omap_mbox_device *mdev;
463 struct mbox_chan *chan;
464 unsigned long flags;
516 int ret; 465 int ret;
517 466
467 if (!dev)
468 return ERR_PTR(-ENODEV);
469
470 if (dev->of_node) {
471 pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
472 __func__);
473 return ERR_PTR(-ENODEV);
474 }
475
518 mutex_lock(&omap_mbox_devices_lock); 476 mutex_lock(&omap_mbox_devices_lock);
519 list_for_each_entry(mdev, &omap_mbox_devices, elem) { 477 list_for_each_entry(mdev, &omap_mbox_devices, elem) {
520 mbox = omap_mbox_device_find(mdev, name); 478 mbox = omap_mbox_device_find(mdev, chan_name);
521 if (mbox) 479 if (mbox)
522 break; 480 break;
523 } 481 }
524 mutex_unlock(&omap_mbox_devices_lock); 482 mutex_unlock(&omap_mbox_devices_lock);
525 483
526 if (!mbox) 484 if (!mbox || !mbox->chan)
527 return ERR_PTR(-ENOENT); 485 return ERR_PTR(-ENOENT);
528 486
529 if (nb) 487 chan = mbox->chan;
530 blocking_notifier_chain_register(&mbox->notifier, nb); 488 spin_lock_irqsave(&chan->lock, flags);
489 chan->msg_free = 0;
490 chan->msg_count = 0;
491 chan->active_req = NULL;
492 chan->cl = cl;
493 init_completion(&chan->tx_complete);
494 spin_unlock_irqrestore(&chan->lock, flags);
531 495
532 ret = omap_mbox_startup(mbox); 496 ret = chan->mbox->ops->startup(chan);
533 if (ret) { 497 if (ret) {
534 blocking_notifier_chain_unregister(&mbox->notifier, nb); 498 pr_err("Unable to startup the chan (%d)\n", ret);
535 return ERR_PTR(-ENODEV); 499 mbox_free_channel(chan);
500 chan = ERR_PTR(ret);
536 } 501 }
537 502
538 return mbox; 503 return chan;
539}
540EXPORT_SYMBOL(omap_mbox_get);
541
542void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
543{
544 blocking_notifier_chain_unregister(&mbox->notifier, nb);
545 omap_mbox_fini(mbox);
546} 504}
547EXPORT_SYMBOL(omap_mbox_put); 505EXPORT_SYMBOL(omap_mbox_request_channel);
548 506
549static struct class omap_mbox_class = { .name = "mbox", }; 507static struct class omap_mbox_class = { .name = "mbox", };
550 508
@@ -560,25 +518,25 @@ static int omap_mbox_register(struct omap_mbox_device *mdev)
560 mboxes = mdev->mboxes; 518 mboxes = mdev->mboxes;
561 for (i = 0; mboxes[i]; i++) { 519 for (i = 0; mboxes[i]; i++) {
562 struct omap_mbox *mbox = mboxes[i]; 520 struct omap_mbox *mbox = mboxes[i];
563 mbox->dev = device_create(&omap_mbox_class, 521 mbox->dev = device_create(&omap_mbox_class, mdev->dev,
564 mdev->dev, 0, mbox, "%s", mbox->name); 522 0, mbox, "%s", mbox->name);
565 if (IS_ERR(mbox->dev)) { 523 if (IS_ERR(mbox->dev)) {
566 ret = PTR_ERR(mbox->dev); 524 ret = PTR_ERR(mbox->dev);
567 goto err_out; 525 goto err_out;
568 } 526 }
569
570 BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
571 } 527 }
572 528
573 mutex_lock(&omap_mbox_devices_lock); 529 mutex_lock(&omap_mbox_devices_lock);
574 list_add(&mdev->elem, &omap_mbox_devices); 530 list_add(&mdev->elem, &omap_mbox_devices);
575 mutex_unlock(&omap_mbox_devices_lock); 531 mutex_unlock(&omap_mbox_devices_lock);
576 532
577 return 0; 533 ret = mbox_controller_register(&mdev->controller);
578 534
579err_out: 535err_out:
580 while (i--) 536 if (ret) {
581 device_unregister(mboxes[i]->dev); 537 while (i--)
538 device_unregister(mboxes[i]->dev);
539 }
582 return ret; 540 return ret;
583} 541}
584 542
@@ -594,12 +552,64 @@ static int omap_mbox_unregister(struct omap_mbox_device *mdev)
594 list_del(&mdev->elem); 552 list_del(&mdev->elem);
595 mutex_unlock(&omap_mbox_devices_lock); 553 mutex_unlock(&omap_mbox_devices_lock);
596 554
555 mbox_controller_unregister(&mdev->controller);
556
597 mboxes = mdev->mboxes; 557 mboxes = mdev->mboxes;
598 for (i = 0; mboxes[i]; i++) 558 for (i = 0; mboxes[i]; i++)
599 device_unregister(mboxes[i]->dev); 559 device_unregister(mboxes[i]->dev);
600 return 0; 560 return 0;
601} 561}
602 562
563static int omap_mbox_chan_startup(struct mbox_chan *chan)
564{
565 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
566 struct omap_mbox_device *mdev = mbox->parent;
567 int ret = 0;
568
569 mutex_lock(&mdev->cfg_lock);
570 pm_runtime_get_sync(mdev->dev);
571 ret = omap_mbox_startup(mbox);
572 if (ret)
573 pm_runtime_put_sync(mdev->dev);
574 mutex_unlock(&mdev->cfg_lock);
575 return ret;
576}
577
578static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
579{
580 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
581 struct omap_mbox_device *mdev = mbox->parent;
582
583 mutex_lock(&mdev->cfg_lock);
584 omap_mbox_fini(mbox);
585 pm_runtime_put_sync(mdev->dev);
586 mutex_unlock(&mdev->cfg_lock);
587}
588
589static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
590{
591 struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
592 int ret = -EBUSY;
593
594 if (!mbox)
595 return -EINVAL;
596
597 if (!mbox_fifo_full(mbox)) {
598 mbox_fifo_write(mbox, (mbox_msg_t)data);
599 ret = 0;
600 }
601
602 /* always enable the interrupt */
603 _omap_mbox_enable_irq(mbox, IRQ_TX);
604 return ret;
605}
606
607static struct mbox_chan_ops omap_mbox_chan_ops = {
608 .startup = omap_mbox_chan_startup,
609 .send_data = omap_mbox_chan_send_data,
610 .shutdown = omap_mbox_chan_shutdown,
611};
612
603static const struct of_device_id omap_mailbox_of_match[] = { 613static const struct of_device_id omap_mailbox_of_match[] = {
604 { 614 {
605 .compatible = "ti,omap2-mailbox", 615 .compatible = "ti,omap2-mailbox",
@@ -619,10 +629,35 @@ static const struct of_device_id omap_mailbox_of_match[] = {
619}; 629};
620MODULE_DEVICE_TABLE(of, omap_mailbox_of_match); 630MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
621 631
632static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
633 const struct of_phandle_args *sp)
634{
635 phandle phandle = sp->args[0];
636 struct device_node *node;
637 struct omap_mbox_device *mdev;
638 struct omap_mbox *mbox;
639
640 mdev = container_of(controller, struct omap_mbox_device, controller);
641 if (WARN_ON(!mdev))
642 return NULL;
643
644 node = of_find_node_by_phandle(phandle);
645 if (!node) {
646 pr_err("%s: could not find node phandle 0x%x\n",
647 __func__, phandle);
648 return NULL;
649 }
650
651 mbox = omap_mbox_device_find(mdev, node->name);
652 of_node_put(node);
653 return mbox ? mbox->chan : NULL;
654}
655
622static int omap_mbox_probe(struct platform_device *pdev) 656static int omap_mbox_probe(struct platform_device *pdev)
623{ 657{
624 struct resource *mem; 658 struct resource *mem;
625 int ret; 659 int ret;
660 struct mbox_chan *chnls;
626 struct omap_mbox **list, *mbox, *mboxblk; 661 struct omap_mbox **list, *mbox, *mboxblk;
627 struct omap_mbox_pdata *pdata = pdev->dev.platform_data; 662 struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
628 struct omap_mbox_dev_info *info = NULL; 663 struct omap_mbox_dev_info *info = NULL;
@@ -727,6 +762,11 @@ static int omap_mbox_probe(struct platform_device *pdev)
727 if (!list) 762 if (!list)
728 return -ENOMEM; 763 return -ENOMEM;
729 764
765 chnls = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*chnls),
766 GFP_KERNEL);
767 if (!chnls)
768 return -ENOMEM;
769
730 mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox), 770 mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
731 GFP_KERNEL); 771 GFP_KERNEL);
732 if (!mboxblk) 772 if (!mboxblk)
@@ -758,6 +798,8 @@ static int omap_mbox_probe(struct platform_device *pdev)
758 mbox->irq = platform_get_irq(pdev, finfo->tx_irq); 798 mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
759 if (mbox->irq < 0) 799 if (mbox->irq < 0)
760 return mbox->irq; 800 return mbox->irq;
801 mbox->chan = &chnls[i];
802 chnls[i].con_priv = mbox;
761 list[i] = mbox++; 803 list[i] = mbox++;
762 } 804 }
763 805
@@ -766,6 +808,14 @@ static int omap_mbox_probe(struct platform_device *pdev)
766 mdev->num_users = num_users; 808 mdev->num_users = num_users;
767 mdev->num_fifos = num_fifos; 809 mdev->num_fifos = num_fifos;
768 mdev->mboxes = list; 810 mdev->mboxes = list;
811
812 /* OMAP does not have a Tx-Done IRQ, but rather a Tx-Ready IRQ */
813 mdev->controller.txdone_irq = true;
814 mdev->controller.dev = mdev->dev;
815 mdev->controller.ops = &omap_mbox_chan_ops;
816 mdev->controller.chans = chnls;
817 mdev->controller.num_chans = info_count;
818 mdev->controller.of_xlate = omap_mbox_of_xlate;
769 ret = omap_mbox_register(mdev); 819 ret = omap_mbox_register(mdev);
770 if (ret) 820 if (ret)
771 return ret; 821 return ret;
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
new file mode 100644
index 000000000000..6dbf6fcbdfaf
--- /dev/null
+++ b/drivers/mailbox/pcc.c
@@ -0,0 +1,403 @@
1/*
2 * Copyright (C) 2014 Linaro Ltd.
3 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * PCC (Platform Communication Channel) is defined in the ACPI 5.0+
16 * specification. It is a mailbox like mechanism to allow clients
17 * such as CPPC (Collaborative Processor Performance Control), RAS
18 * (Reliability, Availability and Serviceability) and MPST (Memory
19 * Node Power State Table) to talk to the platform (e.g. BMC) through
20 * shared memory regions as defined in the PCC table entries. The PCC
21 * specification supports a Doorbell mechanism for the PCC clients
22 * to notify the platform about new data. This Doorbell information
23 * is also specified in each PCC table entry. See pcc_send_data()
24 * and pcc_tx_done() for basic mode of operation.
25 *
26 * For more details about PCC, please see the ACPI specification from
27 * http://www.uefi.org/ACPIv5.1 Section 14.
28 *
29 * This file implements PCC as a Mailbox controller and allows for PCC
30 * clients to be implemented as its Mailbox Client Channels.
31 */
32
33#include <linux/acpi.h>
34#include <linux/delay.h>
35#include <linux/io.h>
36#include <linux/init.h>
37#include <linux/list.h>
38#include <linux/platform_device.h>
39#include <linux/mailbox_controller.h>
40#include <linux/mailbox_client.h>
41
42#include "mailbox.h"
43
44#define MAX_PCC_SUBSPACES 256
45#define PCCS_SS_SIG_MAGIC 0x50434300
46#define PCC_CMD_COMPLETE 0x1
47
48static struct mbox_chan *pcc_mbox_channels;
49
50static struct mbox_controller pcc_mbox_ctrl = {};
51/**
52 * get_pcc_channel - Given a PCC subspace idx, get
53 * the respective mbox_channel.
54 * @id: PCC subspace index.
55 *
56 * Return: ERR_PTR(errno) if error, else pointer
57 * to mbox channel.
58 */
59static struct mbox_chan *get_pcc_channel(int id)
60{
61 struct mbox_chan *pcc_chan;
62
63 if (id < 0 || id > pcc_mbox_ctrl.num_chans)
64 return ERR_PTR(-ENOENT);
65
66 pcc_chan = (struct mbox_chan *)
67 (unsigned long) pcc_mbox_channels +
68 (id * sizeof(*pcc_chan));
69
70 return pcc_chan;
71}
72
73/**
74 * get_subspace_id - Given a Mailbox channel, find out the
75 * PCC subspace id.
76 * @chan: Pointer to Mailbox Channel from which we want
77 * the index.
78 * Return: Errno if not found, else positive index number.
79 */
80static int get_subspace_id(struct mbox_chan *chan)
81{
82 unsigned int id = chan - pcc_mbox_channels;
83
84 if (id < 0 || id > pcc_mbox_ctrl.num_chans)
85 return -ENOENT;
86
87 return id;
88}
89
90/**
91 * pcc_mbox_request_channel - PCC clients call this function to
92 * request a pointer to their PCC subspace, from which they
93 * can get the details of communicating with the remote.
94 * @cl: Pointer to Mailbox client, so we know where to bind the
95 * Channel.
96 * @subspace_id: The PCC Subspace index as parsed in the PCC client
97 * ACPI package. This is used to lookup the array of PCC
98 * subspaces as parsed by the PCC Mailbox controller.
99 *
100 * Return: Pointer to the Mailbox Channel if successful or
101 * ERR_PTR.
102 */
103struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
104 int subspace_id)
105{
106 struct device *dev = pcc_mbox_ctrl.dev;
107 struct mbox_chan *chan;
108 unsigned long flags;
109
110 /*
111 * Each PCC Subspace is a Mailbox Channel.
112 * The PCC Clients get their PCC Subspace ID
113 * from their own tables and pass it here.
114 * This returns a pointer to the PCC subspace
115 * for the Client to operate on.
116 */
117 chan = get_pcc_channel(subspace_id);
118
119 if (!chan || chan->cl) {
120 dev_err(dev, "%s: PCC mailbox not free\n", __func__);
121 return ERR_PTR(-EBUSY);
122 }
123
124 spin_lock_irqsave(&chan->lock, flags);
125 chan->msg_free = 0;
126 chan->msg_count = 0;
127 chan->active_req = NULL;
128 chan->cl = cl;
129 init_completion(&chan->tx_complete);
130
131 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
132 chan->txdone_method |= TXDONE_BY_ACK;
133
134 spin_unlock_irqrestore(&chan->lock, flags);
135
136 return chan;
137}
138EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
139
140/**
141 * pcc_mbox_free_channel - Clients call this to free their Channel.
142 *
143 * @chan: Pointer to the mailbox channel as returned by
144 * pcc_mbox_request_channel()
145 */
146void pcc_mbox_free_channel(struct mbox_chan *chan)
147{
148 unsigned long flags;
149
150 if (!chan || !chan->cl)
151 return;
152
153 spin_lock_irqsave(&chan->lock, flags);
154 chan->cl = NULL;
155 chan->active_req = NULL;
156 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
157 chan->txdone_method = TXDONE_BY_POLL;
158
159 spin_unlock_irqrestore(&chan->lock, flags);
160}
161EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
162
163/**
164 * pcc_tx_done - Callback from Mailbox controller code to
165 * check if PCC message transmission completed.
166 * @chan: Pointer to Mailbox channel on which previous
167 * transmission occurred.
168 *
169 * Return: TRUE if succeeded.
170 */
171static bool pcc_tx_done(struct mbox_chan *chan)
172{
173 struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
174 struct acpi_pcct_shared_memory *generic_comm_base =
175 (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
176 u16 cmd_delay = pcct_ss->latency;
177 unsigned int retries = 0;
178
179 /* Try a few times while waiting for platform to consume */
180 while (!(readw_relaxed(&generic_comm_base->status)
181 & PCC_CMD_COMPLETE)) {
182
183 if (retries++ < 5)
184 udelay(cmd_delay);
185 else {
186 /*
187 * If the remote is dead, this will cause the Mbox
188 * controller to timeout after mbox client.tx_tout
189 * msecs.
190 */
191 pr_err("PCC platform did not respond.\n");
192 return false;
193 }
194 }
195 return true;
196}
197
198/**
199 * pcc_send_data - Called from Mailbox Controller code to finally
200 * transmit data over channel.
201 * @chan: Pointer to Mailbox channel over which to send data.
202 * @data: Actual data to be written over channel.
203 *
204 * Return: Err if something failed else 0 for success.
205 */
206static int pcc_send_data(struct mbox_chan *chan, void *data)
207{
208 struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
209 struct acpi_pcct_shared_memory *generic_comm_base =
210 (struct acpi_pcct_shared_memory *) pcct_ss->base_address;
211 struct acpi_generic_address doorbell;
212 u64 doorbell_preserve;
213 u64 doorbell_val;
214 u64 doorbell_write;
215 u16 cmd = *(u16 *) data;
216 u16 ss_idx = -1;
217
218 ss_idx = get_subspace_id(chan);
219
220 if (ss_idx < 0) {
221 pr_err("Invalid Subspace ID from PCC client\n");
222 return -EINVAL;
223 }
224
225 doorbell = pcct_ss->doorbell_register;
226 doorbell_preserve = pcct_ss->preserve_mask;
227 doorbell_write = pcct_ss->write_mask;
228
229 /* Write to the shared comm region. */
230 writew(cmd, &generic_comm_base->command);
231
232 /* Write Subspace MAGIC value so platform can identify destination. */
233 writel((PCCS_SS_SIG_MAGIC | ss_idx), &generic_comm_base->signature);
234
235 /* Flip CMD COMPLETE bit */
236 writew(0, &generic_comm_base->status);
237
238 /* Sync notification from OSPM to Platform. */
239 acpi_read(&doorbell_val, &doorbell);
240 acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
241 &doorbell);
242
243 return 0;
244}
245
246static struct mbox_chan_ops pcc_chan_ops = {
247 .send_data = pcc_send_data,
248 .last_tx_done = pcc_tx_done,
249};
250
251/**
252 * parse_pcc_subspace - Parse the PCC table and verify PCC subspace
253 * entries. There should be one entry per PCC client.
254 * @header: Pointer to the ACPI subtable header under the PCCT.
255 * @end: End of subtable entry.
256 *
257 * Return: 0 for Success, else errno.
258 *
259 * This gets called for each entry in the PCC table.
260 */
261static int parse_pcc_subspace(struct acpi_subtable_header *header,
262 const unsigned long end)
263{
264 struct acpi_pcct_hw_reduced *pcct_ss;
265
266 if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
267 pcct_ss = (struct acpi_pcct_hw_reduced *) header;
268
269 if (pcct_ss->header.type !=
270 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) {
271 pr_err("Incorrect PCC Subspace type detected\n");
272 return -EINVAL;
273 }
274 }
275
276 return 0;
277}
278
279/**
280 * acpi_pcc_probe - Parse the ACPI tree for the PCCT.
281 *
282 * Return: 0 for Success, else errno.
283 */
284static int __init acpi_pcc_probe(void)
285{
286 acpi_size pcct_tbl_header_size;
287 struct acpi_table_header *pcct_tbl;
288 struct acpi_subtable_header *pcct_entry;
289 int count, i;
290 acpi_status status = AE_OK;
291
292 /* Search for PCCT */
293 status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
294 &pcct_tbl,
295 &pcct_tbl_header_size);
296
297 if (ACPI_FAILURE(status) || !pcct_tbl) {
298 pr_warn("PCCT header not found.\n");
299 return -ENODEV;
300 }
301
302 count = acpi_table_parse_entries(ACPI_SIG_PCCT,
303 sizeof(struct acpi_table_pcct),
304 ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE,
305 parse_pcc_subspace, MAX_PCC_SUBSPACES);
306
307 if (count <= 0) {
308 pr_err("Error parsing PCC subspaces from PCCT\n");
309 return -EINVAL;
310 }
311
312 pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) *
313 count, GFP_KERNEL);
314
315 if (!pcc_mbox_channels) {
316 pr_err("Could not allocate space for PCC mbox channels\n");
317 return -ENOMEM;
318 }
319
320 /* Point to the first PCC subspace entry */
321 pcct_entry = (struct acpi_subtable_header *) (
322 (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
323
324 for (i = 0; i < count; i++) {
325 pcc_mbox_channels[i].con_priv = pcct_entry;
326 pcct_entry = (struct acpi_subtable_header *)
327 ((unsigned long) pcct_entry + pcct_entry->length);
328 }
329
330 pcc_mbox_ctrl.num_chans = count;
331
332 pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
333
334 return 0;
335}
336
337/**
338 * pcc_mbox_probe - Called when we find a match for the
339 * PCCT platform device. This is purely used to represent
340 * the PCCT as a virtual device for registering with the
341 * generic Mailbox framework.
342 *
343 * @pdev: Pointer to platform device returned when a match
344 * is found.
345 *
346 * Return: 0 for Success, else errno.
347 */
348static int pcc_mbox_probe(struct platform_device *pdev)
349{
350 int ret = 0;
351
352 pcc_mbox_ctrl.chans = pcc_mbox_channels;
353 pcc_mbox_ctrl.ops = &pcc_chan_ops;
354 pcc_mbox_ctrl.txdone_poll = true;
355 pcc_mbox_ctrl.txpoll_period = 10;
356 pcc_mbox_ctrl.dev = &pdev->dev;
357
358 pr_info("Registering PCC driver as Mailbox controller\n");
359 ret = mbox_controller_register(&pcc_mbox_ctrl);
360
361 if (ret) {
362 pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
363 ret = -ENODEV;
364 }
365
366 return ret;
367}
368
369struct platform_driver pcc_mbox_driver = {
370 .probe = pcc_mbox_probe,
371 .driver = {
372 .name = "PCCT",
373 .owner = THIS_MODULE,
374 },
375};
376
377static int __init pcc_init(void)
378{
379 int ret;
380 struct platform_device *pcc_pdev;
381
382 if (acpi_disabled)
383 return -ENODEV;
384
385 /* Check if PCC support is available. */
386 ret = acpi_pcc_probe();
387
388 if (ret) {
389 pr_err("ACPI PCC probe failed.\n");
390 return -ENODEV;
391 }
392
393 pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
394 pcc_mbox_probe, NULL, 0, NULL, 0);
395
396 if (!pcc_pdev) {
397 pr_err("Err creating PCC platform bundle\n");
398 return -ENODEV;
399 }
400
401 return 0;
402}
403device_initcall(pcc_init);
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index f752d12081ff..be065300e93c 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -14,68 +14,38 @@
14 14
15/*----------------------------------------------------------------*/ 15/*----------------------------------------------------------------*/
16 16
17struct bucket { 17#define MIN_CELLS 1024
18 spinlock_t lock;
19 struct hlist_head cells;
20};
21 18
22struct dm_bio_prison { 19struct dm_bio_prison {
20 spinlock_t lock;
23 mempool_t *cell_pool; 21 mempool_t *cell_pool;
24 22 struct rb_root cells;
25 unsigned nr_buckets;
26 unsigned hash_mask;
27 struct bucket *buckets;
28}; 23};
29 24
30/*----------------------------------------------------------------*/
31
32static uint32_t calc_nr_buckets(unsigned nr_cells)
33{
34 uint32_t n = 128;
35
36 nr_cells /= 4;
37 nr_cells = min(nr_cells, 8192u);
38
39 while (n < nr_cells)
40 n <<= 1;
41
42 return n;
43}
44
45static struct kmem_cache *_cell_cache; 25static struct kmem_cache *_cell_cache;
46 26
47static void init_bucket(struct bucket *b) 27/*----------------------------------------------------------------*/
48{
49 spin_lock_init(&b->lock);
50 INIT_HLIST_HEAD(&b->cells);
51}
52 28
53/* 29/*
54 * @nr_cells should be the number of cells you want in use _concurrently_. 30 * @nr_cells should be the number of cells you want in use _concurrently_.
55 * Don't confuse it with the number of distinct keys. 31 * Don't confuse it with the number of distinct keys.
56 */ 32 */
57struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells) 33struct dm_bio_prison *dm_bio_prison_create(void)
58{ 34{
59 unsigned i; 35 struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
60 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
61 size_t len = sizeof(struct dm_bio_prison) +
62 (sizeof(struct bucket) * nr_buckets);
63 struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
64 36
65 if (!prison) 37 if (!prison)
66 return NULL; 38 return NULL;
67 39
68 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); 40 spin_lock_init(&prison->lock);
41
42 prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
69 if (!prison->cell_pool) { 43 if (!prison->cell_pool) {
70 kfree(prison); 44 kfree(prison);
71 return NULL; 45 return NULL;
72 } 46 }
73 47
74 prison->nr_buckets = nr_buckets; 48 prison->cells = RB_ROOT;
75 prison->hash_mask = nr_buckets - 1;
76 prison->buckets = (struct bucket *) (prison + 1);
77 for (i = 0; i < nr_buckets; i++)
78 init_bucket(prison->buckets + i);
79 49
80 return prison; 50 return prison;
81} 51}
@@ -101,68 +71,73 @@ void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
101} 71}
102EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell); 72EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
103 73
104static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key) 74static void __setup_new_cell(struct dm_cell_key *key,
75 struct bio *holder,
76 struct dm_bio_prison_cell *cell)
105{ 77{
106 const unsigned long BIG_PRIME = 4294967291UL; 78 memcpy(&cell->key, key, sizeof(cell->key));
107 uint64_t hash = key->block * BIG_PRIME; 79 cell->holder = holder;
108 80 bio_list_init(&cell->bios);
109 return (uint32_t) (hash & prison->hash_mask);
110} 81}
111 82
112static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs) 83static int cmp_keys(struct dm_cell_key *lhs,
84 struct dm_cell_key *rhs)
113{ 85{
114 return (lhs->virtual == rhs->virtual) && 86 if (lhs->virtual < rhs->virtual)
115 (lhs->dev == rhs->dev) && 87 return -1;
116 (lhs->block == rhs->block);
117}
118 88
119static struct bucket *get_bucket(struct dm_bio_prison *prison, 89 if (lhs->virtual > rhs->virtual)
120 struct dm_cell_key *key) 90 return 1;
121{
122 return prison->buckets + hash_key(prison, key);
123}
124 91
125static struct dm_bio_prison_cell *__search_bucket(struct bucket *b, 92 if (lhs->dev < rhs->dev)
126 struct dm_cell_key *key) 93 return -1;
127{
128 struct dm_bio_prison_cell *cell;
129 94
130 hlist_for_each_entry(cell, &b->cells, list) 95 if (lhs->dev > rhs->dev)
131 if (keys_equal(&cell->key, key)) 96 return 1;
132 return cell;
133 97
134 return NULL; 98 if (lhs->block_end <= rhs->block_begin)
135} 99 return -1;
136 100
137static void __setup_new_cell(struct bucket *b, 101 if (lhs->block_begin >= rhs->block_end)
138 struct dm_cell_key *key, 102 return 1;
139 struct bio *holder, 103
140 struct dm_bio_prison_cell *cell) 104 return 0;
141{
142 memcpy(&cell->key, key, sizeof(cell->key));
143 cell->holder = holder;
144 bio_list_init(&cell->bios);
145 hlist_add_head(&cell->list, &b->cells);
146} 105}
147 106
148static int __bio_detain(struct bucket *b, 107static int __bio_detain(struct dm_bio_prison *prison,
149 struct dm_cell_key *key, 108 struct dm_cell_key *key,
150 struct bio *inmate, 109 struct bio *inmate,
151 struct dm_bio_prison_cell *cell_prealloc, 110 struct dm_bio_prison_cell *cell_prealloc,
152 struct dm_bio_prison_cell **cell_result) 111 struct dm_bio_prison_cell **cell_result)
153{ 112{
154 struct dm_bio_prison_cell *cell; 113 int r;
155 114 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
156 cell = __search_bucket(b, key); 115
157 if (cell) { 116 while (*new) {
158 if (inmate) 117 struct dm_bio_prison_cell *cell =
159 bio_list_add(&cell->bios, inmate); 118 container_of(*new, struct dm_bio_prison_cell, node);
160 *cell_result = cell; 119
161 return 1; 120 r = cmp_keys(key, &cell->key);
121
122 parent = *new;
123 if (r < 0)
124 new = &((*new)->rb_left);
125 else if (r > 0)
126 new = &((*new)->rb_right);
127 else {
128 if (inmate)
129 bio_list_add(&cell->bios, inmate);
130 *cell_result = cell;
131 return 1;
132 }
162 } 133 }
163 134
164 __setup_new_cell(b, key, inmate, cell_prealloc); 135 __setup_new_cell(key, inmate, cell_prealloc);
165 *cell_result = cell_prealloc; 136 *cell_result = cell_prealloc;
137
138 rb_link_node(&cell_prealloc->node, parent, new);
139 rb_insert_color(&cell_prealloc->node, &prison->cells);
140
166 return 0; 141 return 0;
167} 142}
168 143
@@ -174,11 +149,10 @@ static int bio_detain(struct dm_bio_prison *prison,
174{ 149{
175 int r; 150 int r;
176 unsigned long flags; 151 unsigned long flags;
177 struct bucket *b = get_bucket(prison, key);
178 152
179 spin_lock_irqsave(&b->lock, flags); 153 spin_lock_irqsave(&prison->lock, flags);
180 r = __bio_detain(b, key, inmate, cell_prealloc, cell_result); 154 r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
181 spin_unlock_irqrestore(&b->lock, flags); 155 spin_unlock_irqrestore(&prison->lock, flags);
182 156
183 return r; 157 return r;
184} 158}
@@ -205,10 +179,11 @@ EXPORT_SYMBOL_GPL(dm_get_cell);
205/* 179/*
206 * @inmates must have been initialised prior to this call 180 * @inmates must have been initialised prior to this call
207 */ 181 */
208static void __cell_release(struct dm_bio_prison_cell *cell, 182static void __cell_release(struct dm_bio_prison *prison,
183 struct dm_bio_prison_cell *cell,
209 struct bio_list *inmates) 184 struct bio_list *inmates)
210{ 185{
211 hlist_del(&cell->list); 186 rb_erase(&cell->node, &prison->cells);
212 187
213 if (inmates) { 188 if (inmates) {
214 if (cell->holder) 189 if (cell->holder)
@@ -222,21 +197,21 @@ void dm_cell_release(struct dm_bio_prison *prison,
222 struct bio_list *bios) 197 struct bio_list *bios)
223{ 198{
224 unsigned long flags; 199 unsigned long flags;
225 struct bucket *b = get_bucket(prison, &cell->key);
226 200
227 spin_lock_irqsave(&b->lock, flags); 201 spin_lock_irqsave(&prison->lock, flags);
228 __cell_release(cell, bios); 202 __cell_release(prison, cell, bios);
229 spin_unlock_irqrestore(&b->lock, flags); 203 spin_unlock_irqrestore(&prison->lock, flags);
230} 204}
231EXPORT_SYMBOL_GPL(dm_cell_release); 205EXPORT_SYMBOL_GPL(dm_cell_release);
232 206
233/* 207/*
234 * Sometimes we don't want the holder, just the additional bios. 208 * Sometimes we don't want the holder, just the additional bios.
235 */ 209 */
236static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, 210static void __cell_release_no_holder(struct dm_bio_prison *prison,
211 struct dm_bio_prison_cell *cell,
237 struct bio_list *inmates) 212 struct bio_list *inmates)
238{ 213{
239 hlist_del(&cell->list); 214 rb_erase(&cell->node, &prison->cells);
240 bio_list_merge(inmates, &cell->bios); 215 bio_list_merge(inmates, &cell->bios);
241} 216}
242 217
@@ -245,11 +220,10 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
245 struct bio_list *inmates) 220 struct bio_list *inmates)
246{ 221{
247 unsigned long flags; 222 unsigned long flags;
248 struct bucket *b = get_bucket(prison, &cell->key);
249 223
250 spin_lock_irqsave(&b->lock, flags); 224 spin_lock_irqsave(&prison->lock, flags);
251 __cell_release_no_holder(cell, inmates); 225 __cell_release_no_holder(prison, cell, inmates);
252 spin_unlock_irqrestore(&b->lock, flags); 226 spin_unlock_irqrestore(&prison->lock, flags);
253} 227}
254EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); 228EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
255 229
@@ -267,6 +241,20 @@ void dm_cell_error(struct dm_bio_prison *prison,
267} 241}
268EXPORT_SYMBOL_GPL(dm_cell_error); 242EXPORT_SYMBOL_GPL(dm_cell_error);
269 243
244void dm_cell_visit_release(struct dm_bio_prison *prison,
245 void (*visit_fn)(void *, struct dm_bio_prison_cell *),
246 void *context,
247 struct dm_bio_prison_cell *cell)
248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&prison->lock, flags);
252 visit_fn(context, cell);
253 rb_erase(&cell->node, &prison->cells);
254 spin_unlock_irqrestore(&prison->lock, flags);
255}
256EXPORT_SYMBOL_GPL(dm_cell_visit_release);
257
270/*----------------------------------------------------------------*/ 258/*----------------------------------------------------------------*/
271 259
272#define DEFERRED_SET_SIZE 64 260#define DEFERRED_SET_SIZE 64
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 6805a142b750..74cf01144b1f 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -10,8 +10,8 @@
10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ 10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ 11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
12 12
13#include <linux/list.h>
14#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/rbtree.h>
15 15
16/*----------------------------------------------------------------*/ 16/*----------------------------------------------------------------*/
17 17
@@ -23,11 +23,14 @@
23 */ 23 */
24struct dm_bio_prison; 24struct dm_bio_prison;
25 25
26/* FIXME: this needs to be more abstract */ 26/*
27 * Keys define a range of blocks within either a virtual or physical
28 * device.
29 */
27struct dm_cell_key { 30struct dm_cell_key {
28 int virtual; 31 int virtual;
29 dm_thin_id dev; 32 dm_thin_id dev;
30 dm_block_t block; 33 dm_block_t block_begin, block_end;
31}; 34};
32 35
33/* 36/*
@@ -35,13 +38,15 @@ struct dm_cell_key {
35 * themselves. 38 * themselves.
36 */ 39 */
37struct dm_bio_prison_cell { 40struct dm_bio_prison_cell {
38 struct hlist_node list; 41 struct list_head user_list; /* for client use */
42 struct rb_node node;
43
39 struct dm_cell_key key; 44 struct dm_cell_key key;
40 struct bio *holder; 45 struct bio *holder;
41 struct bio_list bios; 46 struct bio_list bios;
42}; 47};
43 48
44struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells); 49struct dm_bio_prison *dm_bio_prison_create(void);
45void dm_bio_prison_destroy(struct dm_bio_prison *prison); 50void dm_bio_prison_destroy(struct dm_bio_prison *prison);
46 51
47/* 52/*
@@ -57,7 +62,7 @@ void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
57 struct dm_bio_prison_cell *cell); 62 struct dm_bio_prison_cell *cell);
58 63
59/* 64/*
60 * Creates, or retrieves a cell for the given key. 65 * Creates, or retrieves a cell that overlaps the given key.
61 * 66 *
62 * Returns 1 if pre-existing cell returned, zero if new cell created using 67 * Returns 1 if pre-existing cell returned, zero if new cell created using
63 * @cell_prealloc. 68 * @cell_prealloc.
@@ -68,7 +73,8 @@ int dm_get_cell(struct dm_bio_prison *prison,
68 struct dm_bio_prison_cell **cell_result); 73 struct dm_bio_prison_cell **cell_result);
69 74
70/* 75/*
71 * An atomic op that combines retrieving a cell, and adding a bio to it. 76 * An atomic op that combines retrieving or creating a cell, and adding a
77 * bio to it.
72 * 78 *
73 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
74 */ 80 */
@@ -87,6 +93,14 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
87void dm_cell_error(struct dm_bio_prison *prison, 93void dm_cell_error(struct dm_bio_prison *prison,
88 struct dm_bio_prison_cell *cell, int error); 94 struct dm_bio_prison_cell *cell, int error);
89 95
96/*
97 * Visits the cell and then releases. Guarantees no new inmates are
98 * inserted between the visit and release.
99 */
100void dm_cell_visit_release(struct dm_bio_prison *prison,
101 void (*visit_fn)(void *, struct dm_bio_prison_cell *),
102 void *context, struct dm_bio_prison_cell *cell);
103
90/*----------------------------------------------------------------*/ 104/*----------------------------------------------------------------*/
91 105
92/* 106/*
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index afe79719ea32..c33b49792b87 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -14,6 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/shrinker.h> 15#include <linux/shrinker.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/rbtree.h>
17 18
18#define DM_MSG_PREFIX "bufio" 19#define DM_MSG_PREFIX "bufio"
19 20
@@ -34,26 +35,23 @@
34/* 35/*
35 * Check buffer ages in this interval (seconds) 36 * Check buffer ages in this interval (seconds)
36 */ 37 */
37#define DM_BUFIO_WORK_TIMER_SECS 10 38#define DM_BUFIO_WORK_TIMER_SECS 30
38 39
39/* 40/*
40 * Free buffers when they are older than this (seconds) 41 * Free buffers when they are older than this (seconds)
41 */ 42 */
42#define DM_BUFIO_DEFAULT_AGE_SECS 60 43#define DM_BUFIO_DEFAULT_AGE_SECS 300
43 44
44/* 45/*
45 * The number of bvec entries that are embedded directly in the buffer. 46 * The nr of bytes of cached data to keep around.
46 * If the chunk size is larger, dm-io is used to do the io.
47 */ 47 */
48#define DM_BUFIO_INLINE_VECS 16 48#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
49 49
50/* 50/*
51 * Buffer hash 51 * The number of bvec entries that are embedded directly in the buffer.
52 * If the chunk size is larger, dm-io is used to do the io.
52 */ 53 */
53#define DM_BUFIO_HASH_BITS 20 54#define DM_BUFIO_INLINE_VECS 16
54#define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
57 55
58/* 56/*
59 * Don't try to use kmem_cache_alloc for blocks larger than this. 57 * Don't try to use kmem_cache_alloc for blocks larger than this.
@@ -106,7 +104,7 @@ struct dm_bufio_client {
106 104
107 unsigned minimum_buffers; 105 unsigned minimum_buffers;
108 106
109 struct hlist_head *cache_hash; 107 struct rb_root buffer_tree;
110 wait_queue_head_t free_buffer_wait; 108 wait_queue_head_t free_buffer_wait;
111 109
112 int async_write_error; 110 int async_write_error;
@@ -135,7 +133,7 @@ enum data_mode {
135}; 133};
136 134
137struct dm_buffer { 135struct dm_buffer {
138 struct hlist_node hash_list; 136 struct rb_node node;
139 struct list_head lru_list; 137 struct list_head lru_list;
140 sector_t block; 138 sector_t block;
141 void *data; 139 void *data;
@@ -223,6 +221,7 @@ static DEFINE_SPINLOCK(param_spinlock);
223 * Buffers are freed after this timeout 221 * Buffers are freed after this timeout
224 */ 222 */
225static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 223static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
224static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
226 225
227static unsigned long dm_bufio_peak_allocated; 226static unsigned long dm_bufio_peak_allocated;
228static unsigned long dm_bufio_allocated_kmem_cache; 227static unsigned long dm_bufio_allocated_kmem_cache;
@@ -253,6 +252,53 @@ static LIST_HEAD(dm_bufio_all_clients);
253 */ 252 */
254static DEFINE_MUTEX(dm_bufio_clients_lock); 253static DEFINE_MUTEX(dm_bufio_clients_lock);
255 254
255/*----------------------------------------------------------------
256 * A red/black tree acts as an index for all the buffers.
257 *--------------------------------------------------------------*/
258static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
259{
260 struct rb_node *n = c->buffer_tree.rb_node;
261 struct dm_buffer *b;
262
263 while (n) {
264 b = container_of(n, struct dm_buffer, node);
265
266 if (b->block == block)
267 return b;
268
269 n = (b->block < block) ? n->rb_left : n->rb_right;
270 }
271
272 return NULL;
273}
274
275static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
276{
277 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
278 struct dm_buffer *found;
279
280 while (*new) {
281 found = container_of(*new, struct dm_buffer, node);
282
283 if (found->block == b->block) {
284 BUG_ON(found != b);
285 return;
286 }
287
288 parent = *new;
289 new = (found->block < b->block) ?
290 &((*new)->rb_left) : &((*new)->rb_right);
291 }
292
293 rb_link_node(&b->node, parent, new);
294 rb_insert_color(&b->node, &c->buffer_tree);
295}
296
297static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
298{
299 rb_erase(&b->node, &c->buffer_tree);
300}
301
256/*----------------------------------------------------------------*/ 302/*----------------------------------------------------------------*/
257 303
258static void adjust_total_allocated(enum data_mode data_mode, long diff) 304static void adjust_total_allocated(enum data_mode data_mode, long diff)
@@ -434,7 +480,7 @@ static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
434 b->block = block; 480 b->block = block;
435 b->list_mode = dirty; 481 b->list_mode = dirty;
436 list_add(&b->lru_list, &c->lru[dirty]); 482 list_add(&b->lru_list, &c->lru[dirty]);
437 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]); 483 __insert(b->c, b);
438 b->last_accessed = jiffies; 484 b->last_accessed = jiffies;
439} 485}
440 486
@@ -448,7 +494,7 @@ static void __unlink_buffer(struct dm_buffer *b)
448 BUG_ON(!c->n_buffers[b->list_mode]); 494 BUG_ON(!c->n_buffers[b->list_mode]);
449 495
450 c->n_buffers[b->list_mode]--; 496 c->n_buffers[b->list_mode]--;
451 hlist_del(&b->hash_list); 497 __remove(b->c, b);
452 list_del(&b->lru_list); 498 list_del(&b->lru_list);
453} 499}
454 500
@@ -532,6 +578,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
532 end_io(&b->bio, r); 578 end_io(&b->bio, r);
533} 579}
534 580
581static void inline_endio(struct bio *bio, int error)
582{
583 bio_end_io_t *end_fn = bio->bi_private;
584
585 /*
586 * Reset the bio to free any attached resources
587 * (e.g. bio integrity profiles).
588 */
589 bio_reset(bio);
590
591 end_fn(bio, error);
592}
593
535static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, 594static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
536 bio_end_io_t *end_io) 595 bio_end_io_t *end_io)
537{ 596{
@@ -543,7 +602,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
543 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 602 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
544 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; 603 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
545 b->bio.bi_bdev = b->c->bdev; 604 b->bio.bi_bdev = b->c->bdev;
546 b->bio.bi_end_io = end_io; 605 b->bio.bi_end_io = inline_endio;
606 /*
607 * Use of .bi_private isn't a problem here because
608 * the dm_buffer's inline bio is local to bufio.
609 */
610 b->bio.bi_private = end_io;
547 611
548 /* 612 /*
549 * We assume that if len >= PAGE_SIZE ptr is page-aligned. 613 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
@@ -887,23 +951,6 @@ static void __check_watermark(struct dm_bufio_client *c,
887 __write_dirty_buffers_async(c, 1, write_list); 951 __write_dirty_buffers_async(c, 1, write_list);
888} 952}
889 953
890/*
891 * Find a buffer in the hash.
892 */
893static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
894{
895 struct dm_buffer *b;
896
897 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
898 hash_list) {
899 dm_bufio_cond_resched();
900 if (b->block == block)
901 return b;
902 }
903
904 return NULL;
905}
906
907/*---------------------------------------------------------------- 954/*----------------------------------------------------------------
908 * Getting a buffer 955 * Getting a buffer
909 *--------------------------------------------------------------*/ 956 *--------------------------------------------------------------*/
@@ -1433,45 +1480,52 @@ static void drop_buffers(struct dm_bufio_client *c)
1433} 1480}
1434 1481
1435/* 1482/*
1436 * Test if the buffer is unused and too old, and commit it. 1483 * We may not be able to evict this buffer if IO pending or the client
1484 * is still using it. Caller is expected to know buffer is too old.
1485 *
1437 * And if GFP_NOFS is used, we must not do any I/O because we hold 1486 * And if GFP_NOFS is used, we must not do any I/O because we hold
1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets 1487 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1439 * rerouted to different bufio client. 1488 * rerouted to different bufio client.
1440 */ 1489 */
1441static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, 1490static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1442 unsigned long max_jiffies)
1443{ 1491{
1444 if (jiffies - b->last_accessed < max_jiffies)
1445 return 0;
1446
1447 if (!(gfp & __GFP_FS)) { 1492 if (!(gfp & __GFP_FS)) {
1448 if (test_bit(B_READING, &b->state) || 1493 if (test_bit(B_READING, &b->state) ||
1449 test_bit(B_WRITING, &b->state) || 1494 test_bit(B_WRITING, &b->state) ||
1450 test_bit(B_DIRTY, &b->state)) 1495 test_bit(B_DIRTY, &b->state))
1451 return 0; 1496 return false;
1452 } 1497 }
1453 1498
1454 if (b->hold_count) 1499 if (b->hold_count)
1455 return 0; 1500 return false;
1456 1501
1457 __make_buffer_clean(b); 1502 __make_buffer_clean(b);
1458 __unlink_buffer(b); 1503 __unlink_buffer(b);
1459 __free_buffer_wake(b); 1504 __free_buffer_wake(b);
1460 1505
1461 return 1; 1506 return true;
1462} 1507}
1463 1508
1464static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, 1509static unsigned get_retain_buffers(struct dm_bufio_client *c)
1465 gfp_t gfp_mask) 1510{
1511 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1512 return retain_bytes / c->block_size;
1513}
1514
1515static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1516 gfp_t gfp_mask)
1466{ 1517{
1467 int l; 1518 int l;
1468 struct dm_buffer *b, *tmp; 1519 struct dm_buffer *b, *tmp;
1469 long freed = 0; 1520 unsigned long freed = 0;
1521 unsigned long count = nr_to_scan;
1522 unsigned retain_target = get_retain_buffers(c);
1470 1523
1471 for (l = 0; l < LIST_SIZE; l++) { 1524 for (l = 0; l < LIST_SIZE; l++) {
1472 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { 1525 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1473 freed += __cleanup_old_buffer(b, gfp_mask, 0); 1526 if (__try_evict_buffer(b, gfp_mask))
1474 if (!--nr_to_scan) 1527 freed++;
1528 if (!--nr_to_scan || ((count - freed) <= retain_target))
1475 return freed; 1529 return freed;
1476 dm_bufio_cond_resched(); 1530 dm_bufio_cond_resched();
1477 } 1531 }
@@ -1533,11 +1587,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
1533 r = -ENOMEM; 1587 r = -ENOMEM;
1534 goto bad_client; 1588 goto bad_client;
1535 } 1589 }
1536 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS); 1590 c->buffer_tree = RB_ROOT;
1537 if (!c->cache_hash) {
1538 r = -ENOMEM;
1539 goto bad_hash;
1540 }
1541 1591
1542 c->bdev = bdev; 1592 c->bdev = bdev;
1543 c->block_size = block_size; 1593 c->block_size = block_size;
@@ -1556,9 +1606,6 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
1556 c->n_buffers[i] = 0; 1606 c->n_buffers[i] = 0;
1557 } 1607 }
1558 1608
1559 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1560 INIT_HLIST_HEAD(&c->cache_hash[i]);
1561
1562 mutex_init(&c->lock); 1609 mutex_init(&c->lock);
1563 INIT_LIST_HEAD(&c->reserved_buffers); 1610 INIT_LIST_HEAD(&c->reserved_buffers);
1564 c->need_reserved_buffers = reserved_buffers; 1611 c->need_reserved_buffers = reserved_buffers;
@@ -1632,8 +1679,6 @@ bad_cache:
1632 } 1679 }
1633 dm_io_client_destroy(c->dm_io); 1680 dm_io_client_destroy(c->dm_io);
1634bad_dm_io: 1681bad_dm_io:
1635 vfree(c->cache_hash);
1636bad_hash:
1637 kfree(c); 1682 kfree(c);
1638bad_client: 1683bad_client:
1639 return ERR_PTR(r); 1684 return ERR_PTR(r);
@@ -1660,9 +1705,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
1660 1705
1661 mutex_unlock(&dm_bufio_clients_lock); 1706 mutex_unlock(&dm_bufio_clients_lock);
1662 1707
1663 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++) 1708 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1664 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1665
1666 BUG_ON(c->need_reserved_buffers); 1709 BUG_ON(c->need_reserved_buffers);
1667 1710
1668 while (!list_empty(&c->reserved_buffers)) { 1711 while (!list_empty(&c->reserved_buffers)) {
@@ -1680,36 +1723,60 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
1680 BUG_ON(c->n_buffers[i]); 1723 BUG_ON(c->n_buffers[i]);
1681 1724
1682 dm_io_client_destroy(c->dm_io); 1725 dm_io_client_destroy(c->dm_io);
1683 vfree(c->cache_hash);
1684 kfree(c); 1726 kfree(c);
1685} 1727}
1686EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); 1728EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1687 1729
1688static void cleanup_old_buffers(void) 1730static unsigned get_max_age_hz(void)
1689{ 1731{
1690 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age); 1732 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1691 struct dm_bufio_client *c;
1692 1733
1693 if (max_age > ULONG_MAX / HZ) 1734 if (max_age > UINT_MAX / HZ)
1694 max_age = ULONG_MAX / HZ; 1735 max_age = UINT_MAX / HZ;
1695 1736
1696 mutex_lock(&dm_bufio_clients_lock); 1737 return max_age * HZ;
1697 list_for_each_entry(c, &dm_bufio_all_clients, client_list) { 1738}
1698 if (!dm_bufio_trylock(c))
1699 continue;
1700 1739
1701 while (!list_empty(&c->lru[LIST_CLEAN])) { 1740static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1702 struct dm_buffer *b; 1741{
1703 b = list_entry(c->lru[LIST_CLEAN].prev, 1742 return (jiffies - b->last_accessed) >= age_hz;
1704 struct dm_buffer, lru_list); 1743}
1705 if (!__cleanup_old_buffer(b, 0, max_age * HZ)) 1744
1706 break; 1745static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1707 dm_bufio_cond_resched(); 1746{
1708 } 1747 struct dm_buffer *b, *tmp;
1748 unsigned retain_target = get_retain_buffers(c);
1749 unsigned count;
1750
1751 dm_bufio_lock(c);
1752
1753 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1754 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1755 if (count <= retain_target)
1756 break;
1757
1758 if (!older_than(b, age_hz))
1759 break;
1760
1761 if (__try_evict_buffer(b, 0))
1762 count--;
1709 1763
1710 dm_bufio_unlock(c);
1711 dm_bufio_cond_resched(); 1764 dm_bufio_cond_resched();
1712 } 1765 }
1766
1767 dm_bufio_unlock(c);
1768}
1769
1770static void cleanup_old_buffers(void)
1771{
1772 unsigned long max_age_hz = get_max_age_hz();
1773 struct dm_bufio_client *c;
1774
1775 mutex_lock(&dm_bufio_clients_lock);
1776
1777 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1778 __evict_old_buffers(c, max_age_hz);
1779
1713 mutex_unlock(&dm_bufio_clients_lock); 1780 mutex_unlock(&dm_bufio_clients_lock);
1714} 1781}
1715 1782
@@ -1834,6 +1901,9 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1834module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1901module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1835MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1902MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1836 1903
1904module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1905MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1906
1837module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); 1907module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1838MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); 1908MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1839 1909
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index aac0e2df06be..bed4ad4e1b7c 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -19,6 +19,7 @@
19 19
20typedef dm_block_t __bitwise__ dm_oblock_t; 20typedef dm_block_t __bitwise__ dm_oblock_t;
21typedef uint32_t __bitwise__ dm_cblock_t; 21typedef uint32_t __bitwise__ dm_cblock_t;
22typedef dm_block_t __bitwise__ dm_dblock_t;
22 23
23static inline dm_oblock_t to_oblock(dm_block_t b) 24static inline dm_oblock_t to_oblock(dm_block_t b)
24{ 25{
@@ -40,4 +41,14 @@ static inline uint32_t from_cblock(dm_cblock_t b)
40 return (__force uint32_t) b; 41 return (__force uint32_t) b;
41} 42}
42 43
44static inline dm_dblock_t to_dblock(dm_block_t b)
45{
46 return (__force dm_dblock_t) b;
47}
48
49static inline dm_block_t from_dblock(dm_dblock_t b)
50{
51 return (__force dm_block_t) b;
52}
53
43#endif /* DM_CACHE_BLOCK_TYPES_H */ 54#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 06709257adde..9fc616c2755e 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -109,7 +109,7 @@ struct dm_cache_metadata {
109 dm_block_t discard_root; 109 dm_block_t discard_root;
110 110
111 sector_t discard_block_size; 111 sector_t discard_block_size;
112 dm_oblock_t discard_nr_blocks; 112 dm_dblock_t discard_nr_blocks;
113 113
114 sector_t data_block_size; 114 sector_t data_block_size;
115 dm_cblock_t cache_blocks; 115 dm_cblock_t cache_blocks;
@@ -329,7 +329,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
329 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 329 disk_super->hint_root = cpu_to_le64(cmd->hint_root);
330 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 330 disk_super->discard_root = cpu_to_le64(cmd->discard_root);
331 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 331 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
332 disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 332 disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
333 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE); 333 disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
334 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); 334 disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
335 disk_super->cache_blocks = cpu_to_le32(0); 335 disk_super->cache_blocks = cpu_to_le32(0);
@@ -528,7 +528,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
528 cmd->hint_root = le64_to_cpu(disk_super->hint_root); 528 cmd->hint_root = le64_to_cpu(disk_super->hint_root);
529 cmd->discard_root = le64_to_cpu(disk_super->discard_root); 529 cmd->discard_root = le64_to_cpu(disk_super->discard_root);
530 cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); 530 cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
531 cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks)); 531 cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
532 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); 532 cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
533 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); 533 cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
534 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); 534 strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
@@ -626,7 +626,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
626 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 626 disk_super->hint_root = cpu_to_le64(cmd->hint_root);
627 disk_super->discard_root = cpu_to_le64(cmd->discard_root); 627 disk_super->discard_root = cpu_to_le64(cmd->discard_root);
628 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); 628 disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
629 disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); 629 disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
630 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); 630 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
631 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); 631 strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
632 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); 632 disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
@@ -797,15 +797,15 @@ out:
797 797
798int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 798int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
799 sector_t discard_block_size, 799 sector_t discard_block_size,
800 dm_oblock_t new_nr_entries) 800 dm_dblock_t new_nr_entries)
801{ 801{
802 int r; 802 int r;
803 803
804 down_write(&cmd->root_lock); 804 down_write(&cmd->root_lock);
805 r = dm_bitset_resize(&cmd->discard_info, 805 r = dm_bitset_resize(&cmd->discard_info,
806 cmd->discard_root, 806 cmd->discard_root,
807 from_oblock(cmd->discard_nr_blocks), 807 from_dblock(cmd->discard_nr_blocks),
808 from_oblock(new_nr_entries), 808 from_dblock(new_nr_entries),
809 false, &cmd->discard_root); 809 false, &cmd->discard_root);
810 if (!r) { 810 if (!r) {
811 cmd->discard_block_size = discard_block_size; 811 cmd->discard_block_size = discard_block_size;
@@ -818,28 +818,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
818 return r; 818 return r;
819} 819}
820 820
821static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 821static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
822{ 822{
823 return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, 823 return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
824 from_oblock(b), &cmd->discard_root); 824 from_dblock(b), &cmd->discard_root);
825} 825}
826 826
827static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) 827static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
828{ 828{
829 return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, 829 return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
830 from_oblock(b), &cmd->discard_root); 830 from_dblock(b), &cmd->discard_root);
831} 831}
832 832
833static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b, 833static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
834 bool *is_discarded) 834 bool *is_discarded)
835{ 835{
836 return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, 836 return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
837 from_oblock(b), &cmd->discard_root, 837 from_dblock(b), &cmd->discard_root,
838 is_discarded); 838 is_discarded);
839} 839}
840 840
841static int __discard(struct dm_cache_metadata *cmd, 841static int __discard(struct dm_cache_metadata *cmd,
842 dm_oblock_t dblock, bool discard) 842 dm_dblock_t dblock, bool discard)
843{ 843{
844 int r; 844 int r;
845 845
@@ -852,7 +852,7 @@ static int __discard(struct dm_cache_metadata *cmd,
852} 852}
853 853
854int dm_cache_set_discard(struct dm_cache_metadata *cmd, 854int dm_cache_set_discard(struct dm_cache_metadata *cmd,
855 dm_oblock_t dblock, bool discard) 855 dm_dblock_t dblock, bool discard)
856{ 856{
857 int r; 857 int r;
858 858
@@ -870,8 +870,8 @@ static int __load_discards(struct dm_cache_metadata *cmd,
870 dm_block_t b; 870 dm_block_t b;
871 bool discard; 871 bool discard;
872 872
873 for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { 873 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
874 dm_oblock_t dblock = to_oblock(b); 874 dm_dblock_t dblock = to_dblock(b);
875 875
876 if (cmd->clean_when_opened) { 876 if (cmd->clean_when_opened) {
877 r = __is_discarded(cmd, dblock, &discard); 877 r = __is_discarded(cmd, dblock, &discard);
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 7383c90ccdb8..4ecc403be283 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -70,14 +70,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
70 70
71int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, 71int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
72 sector_t discard_block_size, 72 sector_t discard_block_size,
73 dm_oblock_t new_nr_entries); 73 dm_dblock_t new_nr_entries);
74 74
75typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, 75typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
76 dm_oblock_t dblock, bool discarded); 76 dm_dblock_t dblock, bool discarded);
77int dm_cache_load_discards(struct dm_cache_metadata *cmd, 77int dm_cache_load_discards(struct dm_cache_metadata *cmd,
78 load_discard_fn fn, void *context); 78 load_discard_fn fn, void *context);
79 79
80int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); 80int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
81 81
82int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); 82int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
83int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock); 83int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 0e385e40909e..13f547a4eeb6 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -181,24 +181,30 @@ static void queue_shift_down(struct queue *q)
181 * Gives us the oldest entry of the lowest popoulated level. If the first 181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level. 182 * level is emptied then we shift down one level.
183 */ 183 */
184static struct list_head *queue_pop(struct queue *q) 184static struct list_head *queue_peek(struct queue *q)
185{ 185{
186 unsigned level; 186 unsigned level;
187 struct list_head *r;
188 187
189 for (level = 0; level < NR_QUEUE_LEVELS; level++) 188 for (level = 0; level < NR_QUEUE_LEVELS; level++)
190 if (!list_empty(q->qs + level)) { 189 if (!list_empty(q->qs + level))
191 r = q->qs[level].next; 190 return q->qs[level].next;
192 list_del(r);
193 191
194 /* have we just emptied the bottom level? */ 192 return NULL;
195 if (level == 0 && list_empty(q->qs)) 193}
196 queue_shift_down(q);
197 194
198 return r; 195static struct list_head *queue_pop(struct queue *q)
199 } 196{
197 struct list_head *r = queue_peek(q);
200 198
201 return NULL; 199 if (r) {
200 list_del(r);
201
202 /* have we just emptied the bottom level? */
203 if (list_empty(q->qs))
204 queue_shift_down(q);
205 }
206
207 return r;
202} 208}
203 209
204static struct list_head *list_pop(struct list_head *lh) 210static struct list_head *list_pop(struct list_head *lh)
@@ -383,13 +389,6 @@ struct mq_policy {
383 unsigned generation; 389 unsigned generation;
384 unsigned generation_period; /* in lookups (will probably change) */ 390 unsigned generation_period; /* in lookups (will probably change) */
385 391
386 /*
387 * Entries in the pre_cache whose hit count passes the promotion
388 * threshold move to the cache proper. Working out the correct
389 * value for the promotion_threshold is crucial to this policy.
390 */
391 unsigned promote_threshold;
392
393 unsigned discard_promote_adjustment; 392 unsigned discard_promote_adjustment;
394 unsigned read_promote_adjustment; 393 unsigned read_promote_adjustment;
395 unsigned write_promote_adjustment; 394 unsigned write_promote_adjustment;
@@ -406,6 +405,7 @@ struct mq_policy {
406#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1 405#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
407#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4 406#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
408#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8 407#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
408#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
409 409
410/*----------------------------------------------------------------*/ 410/*----------------------------------------------------------------*/
411 411
@@ -518,6 +518,12 @@ static struct entry *pop(struct mq_policy *mq, struct queue *q)
518 return e; 518 return e;
519} 519}
520 520
521static struct entry *peek(struct queue *q)
522{
523 struct list_head *h = queue_peek(q);
524 return h ? container_of(h, struct entry, list) : NULL;
525}
526
521/* 527/*
522 * Has this entry already been updated? 528 * Has this entry already been updated?
523 */ 529 */
@@ -570,10 +576,6 @@ static void check_generation(struct mq_policy *mq)
570 break; 576 break;
571 } 577 }
572 } 578 }
573
574 mq->promote_threshold = nr ? total / nr : 1;
575 if (mq->promote_threshold * nr < total)
576 mq->promote_threshold++;
577 } 579 }
578} 580}
579 581
@@ -641,6 +643,30 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
641} 643}
642 644
643/* 645/*
646 * Entries in the pre_cache whose hit count passes the promotion
647 * threshold move to the cache proper. Working out the correct
648 * value for the promotion_threshold is crucial to this policy.
649 */
650static unsigned promote_threshold(struct mq_policy *mq)
651{
652 struct entry *e;
653
654 if (any_free_cblocks(mq))
655 return 0;
656
657 e = peek(&mq->cache_clean);
658 if (e)
659 return e->hit_count;
660
661 e = peek(&mq->cache_dirty);
662 if (e)
663 return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
664
665 /* This should never happen */
666 return 0;
667}
668
669/*
644 * We modify the basic promotion_threshold depending on the specific io. 670 * We modify the basic promotion_threshold depending on the specific io.
645 * 671 *
646 * If the origin block has been discarded then there's no cost to copy it 672 * If the origin block has been discarded then there's no cost to copy it
@@ -653,7 +679,7 @@ static unsigned adjusted_promote_threshold(struct mq_policy *mq,
653 bool discarded_oblock, int data_dir) 679 bool discarded_oblock, int data_dir)
654{ 680{
655 if (data_dir == READ) 681 if (data_dir == READ)
656 return mq->promote_threshold + mq->read_promote_adjustment; 682 return promote_threshold(mq) + mq->read_promote_adjustment;
657 683
658 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) { 684 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
659 /* 685 /*
@@ -663,7 +689,7 @@ static unsigned adjusted_promote_threshold(struct mq_policy *mq,
663 return mq->discard_promote_adjustment; 689 return mq->discard_promote_adjustment;
664 } 690 }
665 691
666 return mq->promote_threshold + mq->write_promote_adjustment; 692 return promote_threshold(mq) + mq->write_promote_adjustment;
667} 693}
668 694
669static bool should_promote(struct mq_policy *mq, struct entry *e, 695static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -839,7 +865,8 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
839 if (e && in_cache(mq, e)) 865 if (e && in_cache(mq, e))
840 r = cache_entry_found(mq, e, result); 866 r = cache_entry_found(mq, e, result);
841 867
842 else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) 868 else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
869 iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
843 result->op = POLICY_MISS; 870 result->op = POLICY_MISS;
844 871
845 else if (e) 872 else if (e)
@@ -1230,7 +1257,6 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1230 mq->tick = 0; 1257 mq->tick = 0;
1231 mq->hit_count = 0; 1258 mq->hit_count = 0;
1232 mq->generation = 0; 1259 mq->generation = 0;
1233 mq->promote_threshold = 0;
1234 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT; 1260 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
1235 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT; 1261 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
1236 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT; 1262 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
@@ -1265,7 +1291,7 @@ bad_pre_cache_init:
1265 1291
1266static struct dm_cache_policy_type mq_policy_type = { 1292static struct dm_cache_policy_type mq_policy_type = {
1267 .name = "mq", 1293 .name = "mq",
1268 .version = {1, 2, 0}, 1294 .version = {1, 3, 0},
1269 .hint_size = 4, 1295 .hint_size = 4,
1270 .owner = THIS_MODULE, 1296 .owner = THIS_MODULE,
1271 .create = mq_create 1297 .create = mq_create
@@ -1273,7 +1299,7 @@ static struct dm_cache_policy_type mq_policy_type = {
1273 1299
1274static struct dm_cache_policy_type default_policy_type = { 1300static struct dm_cache_policy_type default_policy_type = {
1275 .name = "default", 1301 .name = "default",
1276 .version = {1, 2, 0}, 1302 .version = {1, 3, 0},
1277 .hint_size = 4, 1303 .hint_size = 4,
1278 .owner = THIS_MODULE, 1304 .owner = THIS_MODULE,
1279 .create = mq_create, 1305 .create = mq_create,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 7130505c2425..1e96d7889f51 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -95,7 +95,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
95 95
96/*----------------------------------------------------------------*/ 96/*----------------------------------------------------------------*/
97 97
98#define PRISON_CELLS 1024
99#define MIGRATION_POOL_SIZE 128 98#define MIGRATION_POOL_SIZE 128
100#define COMMIT_PERIOD HZ 99#define COMMIT_PERIOD HZ
101#define MIGRATION_COUNT_WINDOW 10 100#define MIGRATION_COUNT_WINDOW 10
@@ -237,8 +236,9 @@ struct cache {
237 /* 236 /*
238 * origin_blocks entries, discarded if set. 237 * origin_blocks entries, discarded if set.
239 */ 238 */
240 dm_oblock_t discard_nr_blocks; 239 dm_dblock_t discard_nr_blocks;
241 unsigned long *discard_bitset; 240 unsigned long *discard_bitset;
241 uint32_t discard_block_size; /* a power of 2 times sectors per block */
242 242
243 /* 243 /*
244 * Rather than reconstructing the table line for the status we just 244 * Rather than reconstructing the table line for the status we just
@@ -310,6 +310,7 @@ struct dm_cache_migration {
310 dm_cblock_t cblock; 310 dm_cblock_t cblock;
311 311
312 bool err:1; 312 bool err:1;
313 bool discard:1;
313 bool writeback:1; 314 bool writeback:1;
314 bool demote:1; 315 bool demote:1;
315 bool promote:1; 316 bool promote:1;
@@ -433,11 +434,12 @@ static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cel
433 434
434/*----------------------------------------------------------------*/ 435/*----------------------------------------------------------------*/
435 436
436static void build_key(dm_oblock_t oblock, struct dm_cell_key *key) 437static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
437{ 438{
438 key->virtual = 0; 439 key->virtual = 0;
439 key->dev = 0; 440 key->dev = 0;
440 key->block = from_oblock(oblock); 441 key->block_begin = from_oblock(begin);
442 key->block_end = from_oblock(end);
441} 443}
442 444
443/* 445/*
@@ -447,15 +449,15 @@ static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
447 */ 449 */
448typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); 450typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
449 451
450static int bio_detain(struct cache *cache, dm_oblock_t oblock, 452static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
451 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, 453 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
452 cell_free_fn free_fn, void *free_context, 454 cell_free_fn free_fn, void *free_context,
453 struct dm_bio_prison_cell **cell_result) 455 struct dm_bio_prison_cell **cell_result)
454{ 456{
455 int r; 457 int r;
456 struct dm_cell_key key; 458 struct dm_cell_key key;
457 459
458 build_key(oblock, &key); 460 build_key(oblock_begin, oblock_end, &key);
459 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); 461 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
460 if (r) 462 if (r)
461 free_fn(free_context, cell_prealloc); 463 free_fn(free_context, cell_prealloc);
@@ -463,6 +465,16 @@ static int bio_detain(struct cache *cache, dm_oblock_t oblock,
463 return r; 465 return r;
464} 466}
465 467
468static int bio_detain(struct cache *cache, dm_oblock_t oblock,
469 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
470 cell_free_fn free_fn, void *free_context,
471 struct dm_bio_prison_cell **cell_result)
472{
473 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
474 return bio_detain_range(cache, oblock, end, bio,
475 cell_prealloc, free_fn, free_context, cell_result);
476}
477
466static int get_cell(struct cache *cache, 478static int get_cell(struct cache *cache,
467 dm_oblock_t oblock, 479 dm_oblock_t oblock,
468 struct prealloc *structs, 480 struct prealloc *structs,
@@ -474,7 +486,7 @@ static int get_cell(struct cache *cache,
474 486
475 cell_prealloc = prealloc_get_cell(structs); 487 cell_prealloc = prealloc_get_cell(structs);
476 488
477 build_key(oblock, &key); 489 build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
478 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); 490 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
479 if (r) 491 if (r)
480 prealloc_put_cell(structs, cell_prealloc); 492 prealloc_put_cell(structs, cell_prealloc);
@@ -524,33 +536,57 @@ static dm_block_t block_div(dm_block_t b, uint32_t n)
524 return b; 536 return b;
525} 537}
526 538
527static void set_discard(struct cache *cache, dm_oblock_t b) 539static dm_block_t oblocks_per_dblock(struct cache *cache)
540{
541 dm_block_t oblocks = cache->discard_block_size;
542
543 if (block_size_is_power_of_two(cache))
544 oblocks >>= cache->sectors_per_block_shift;
545 else
546 oblocks = block_div(oblocks, cache->sectors_per_block);
547
548 return oblocks;
549}
550
551static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
552{
553 return to_dblock(block_div(from_oblock(oblock),
554 oblocks_per_dblock(cache)));
555}
556
557static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
558{
559 return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
560}
561
562static void set_discard(struct cache *cache, dm_dblock_t b)
528{ 563{
529 unsigned long flags; 564 unsigned long flags;
530 565
566 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
531 atomic_inc(&cache->stats.discard_count); 567 atomic_inc(&cache->stats.discard_count);
532 568
533 spin_lock_irqsave(&cache->lock, flags); 569 spin_lock_irqsave(&cache->lock, flags);
534 set_bit(from_oblock(b), cache->discard_bitset); 570 set_bit(from_dblock(b), cache->discard_bitset);
535 spin_unlock_irqrestore(&cache->lock, flags); 571 spin_unlock_irqrestore(&cache->lock, flags);
536} 572}
537 573
538static void clear_discard(struct cache *cache, dm_oblock_t b) 574static void clear_discard(struct cache *cache, dm_dblock_t b)
539{ 575{
540 unsigned long flags; 576 unsigned long flags;
541 577
542 spin_lock_irqsave(&cache->lock, flags); 578 spin_lock_irqsave(&cache->lock, flags);
543 clear_bit(from_oblock(b), cache->discard_bitset); 579 clear_bit(from_dblock(b), cache->discard_bitset);
544 spin_unlock_irqrestore(&cache->lock, flags); 580 spin_unlock_irqrestore(&cache->lock, flags);
545} 581}
546 582
547static bool is_discarded(struct cache *cache, dm_oblock_t b) 583static bool is_discarded(struct cache *cache, dm_dblock_t b)
548{ 584{
549 int r; 585 int r;
550 unsigned long flags; 586 unsigned long flags;
551 587
552 spin_lock_irqsave(&cache->lock, flags); 588 spin_lock_irqsave(&cache->lock, flags);
553 r = test_bit(from_oblock(b), cache->discard_bitset); 589 r = test_bit(from_dblock(b), cache->discard_bitset);
554 spin_unlock_irqrestore(&cache->lock, flags); 590 spin_unlock_irqrestore(&cache->lock, flags);
555 591
556 return r; 592 return r;
@@ -562,7 +598,8 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
562 unsigned long flags; 598 unsigned long flags;
563 599
564 spin_lock_irqsave(&cache->lock, flags); 600 spin_lock_irqsave(&cache->lock, flags);
565 r = test_bit(from_oblock(b), cache->discard_bitset); 601 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
602 cache->discard_bitset);
566 spin_unlock_irqrestore(&cache->lock, flags); 603 spin_unlock_irqrestore(&cache->lock, flags);
567 604
568 return r; 605 return r;
@@ -687,7 +724,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
687 check_if_tick_bio_needed(cache, bio); 724 check_if_tick_bio_needed(cache, bio);
688 remap_to_origin(cache, bio); 725 remap_to_origin(cache, bio);
689 if (bio_data_dir(bio) == WRITE) 726 if (bio_data_dir(bio) == WRITE)
690 clear_discard(cache, oblock); 727 clear_discard(cache, oblock_to_dblock(cache, oblock));
691} 728}
692 729
693static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, 730static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
@@ -697,7 +734,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
697 remap_to_cache(cache, bio, cblock); 734 remap_to_cache(cache, bio, cblock);
698 if (bio_data_dir(bio) == WRITE) { 735 if (bio_data_dir(bio) == WRITE) {
699 set_dirty(cache, oblock, cblock); 736 set_dirty(cache, oblock, cblock);
700 clear_discard(cache, oblock); 737 clear_discard(cache, oblock_to_dblock(cache, oblock));
701 } 738 }
702} 739}
703 740
@@ -951,10 +988,14 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
951 } 988 }
952 989
953 } else { 990 } else {
954 clear_dirty(cache, mg->new_oblock, mg->cblock); 991 if (mg->requeue_holder) {
955 if (mg->requeue_holder) 992 clear_dirty(cache, mg->new_oblock, mg->cblock);
956 cell_defer(cache, mg->new_ocell, true); 993 cell_defer(cache, mg->new_ocell, true);
957 else { 994 } else {
995 /*
996 * The block was promoted via an overwrite, so it's dirty.
997 */
998 set_dirty(cache, mg->new_oblock, mg->cblock);
958 bio_endio(mg->new_ocell->holder, 0); 999 bio_endio(mg->new_ocell->holder, 0);
959 cell_defer(cache, mg->new_ocell, false); 1000 cell_defer(cache, mg->new_ocell, false);
960 } 1001 }
@@ -978,7 +1019,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
978 wake_worker(cache); 1019 wake_worker(cache);
979} 1020}
980 1021
981static void issue_copy_real(struct dm_cache_migration *mg) 1022static void issue_copy(struct dm_cache_migration *mg)
982{ 1023{
983 int r; 1024 int r;
984 struct dm_io_region o_region, c_region; 1025 struct dm_io_region o_region, c_region;
@@ -1057,11 +1098,46 @@ static void avoid_copy(struct dm_cache_migration *mg)
1057 migration_success_pre_commit(mg); 1098 migration_success_pre_commit(mg);
1058} 1099}
1059 1100
1060static void issue_copy(struct dm_cache_migration *mg) 1101static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1102 dm_dblock_t *b, dm_dblock_t *e)
1103{
1104 sector_t sb = bio->bi_iter.bi_sector;
1105 sector_t se = bio_end_sector(bio);
1106
1107 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1108
1109 if (se - sb < cache->discard_block_size)
1110 *e = *b;
1111 else
1112 *e = to_dblock(block_div(se, cache->discard_block_size));
1113}
1114
1115static void issue_discard(struct dm_cache_migration *mg)
1116{
1117 dm_dblock_t b, e;
1118 struct bio *bio = mg->new_ocell->holder;
1119
1120 calc_discard_block_range(mg->cache, bio, &b, &e);
1121 while (b != e) {
1122 set_discard(mg->cache, b);
1123 b = to_dblock(from_dblock(b) + 1);
1124 }
1125
1126 bio_endio(bio, 0);
1127 cell_defer(mg->cache, mg->new_ocell, false);
1128 free_migration(mg);
1129}
1130
1131static void issue_copy_or_discard(struct dm_cache_migration *mg)
1061{ 1132{
1062 bool avoid; 1133 bool avoid;
1063 struct cache *cache = mg->cache; 1134 struct cache *cache = mg->cache;
1064 1135
1136 if (mg->discard) {
1137 issue_discard(mg);
1138 return;
1139 }
1140
1065 if (mg->writeback || mg->demote) 1141 if (mg->writeback || mg->demote)
1066 avoid = !is_dirty(cache, mg->cblock) || 1142 avoid = !is_dirty(cache, mg->cblock) ||
1067 is_discarded_oblock(cache, mg->old_oblock); 1143 is_discarded_oblock(cache, mg->old_oblock);
@@ -1070,13 +1146,14 @@ static void issue_copy(struct dm_cache_migration *mg)
1070 1146
1071 avoid = is_discarded_oblock(cache, mg->new_oblock); 1147 avoid = is_discarded_oblock(cache, mg->new_oblock);
1072 1148
1073 if (!avoid && bio_writes_complete_block(cache, bio)) { 1149 if (writeback_mode(&cache->features) &&
1150 !avoid && bio_writes_complete_block(cache, bio)) {
1074 issue_overwrite(mg, bio); 1151 issue_overwrite(mg, bio);
1075 return; 1152 return;
1076 } 1153 }
1077 } 1154 }
1078 1155
1079 avoid ? avoid_copy(mg) : issue_copy_real(mg); 1156 avoid ? avoid_copy(mg) : issue_copy(mg);
1080} 1157}
1081 1158
1082static void complete_migration(struct dm_cache_migration *mg) 1159static void complete_migration(struct dm_cache_migration *mg)
@@ -1161,6 +1238,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
1161 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1238 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1162 1239
1163 mg->err = false; 1240 mg->err = false;
1241 mg->discard = false;
1164 mg->writeback = false; 1242 mg->writeback = false;
1165 mg->demote = false; 1243 mg->demote = false;
1166 mg->promote = true; 1244 mg->promote = true;
@@ -1184,6 +1262,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
1184 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1262 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1185 1263
1186 mg->err = false; 1264 mg->err = false;
1265 mg->discard = false;
1187 mg->writeback = true; 1266 mg->writeback = true;
1188 mg->demote = false; 1267 mg->demote = false;
1189 mg->promote = false; 1268 mg->promote = false;
@@ -1209,6 +1288,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1209 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1288 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1210 1289
1211 mg->err = false; 1290 mg->err = false;
1291 mg->discard = false;
1212 mg->writeback = false; 1292 mg->writeback = false;
1213 mg->demote = true; 1293 mg->demote = true;
1214 mg->promote = true; 1294 mg->promote = true;
@@ -1237,6 +1317,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
1237 struct dm_cache_migration *mg = prealloc_get_migration(structs); 1317 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1238 1318
1239 mg->err = false; 1319 mg->err = false;
1320 mg->discard = false;
1240 mg->writeback = false; 1321 mg->writeback = false;
1241 mg->demote = true; 1322 mg->demote = true;
1242 mg->promote = false; 1323 mg->promote = false;
@@ -1253,6 +1334,26 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
1253 quiesce_migration(mg); 1334 quiesce_migration(mg);
1254} 1335}
1255 1336
1337static void discard(struct cache *cache, struct prealloc *structs,
1338 struct dm_bio_prison_cell *cell)
1339{
1340 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1341
1342 mg->err = false;
1343 mg->discard = true;
1344 mg->writeback = false;
1345 mg->demote = false;
1346 mg->promote = false;
1347 mg->requeue_holder = false;
1348 mg->invalidate = false;
1349 mg->cache = cache;
1350 mg->old_ocell = NULL;
1351 mg->new_ocell = cell;
1352 mg->start_jiffies = jiffies;
1353
1354 quiesce_migration(mg);
1355}
1356
1256/*---------------------------------------------------------------- 1357/*----------------------------------------------------------------
1257 * bio processing 1358 * bio processing
1258 *--------------------------------------------------------------*/ 1359 *--------------------------------------------------------------*/
@@ -1286,31 +1387,27 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1286 issue(cache, bio); 1387 issue(cache, bio);
1287} 1388}
1288 1389
1289/* 1390static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1290 * People generally discard large parts of a device, eg, the whole device 1391 struct bio *bio)
1291 * when formatting. Splitting these large discards up into cache block
1292 * sized ios and then quiescing (always neccessary for discard) takes too
1293 * long.
1294 *
1295 * We keep it simple, and allow any size of discard to come in, and just
1296 * mark off blocks on the discard bitset. No passdown occurs!
1297 *
1298 * To implement passdown we need to change the bio_prison such that a cell
1299 * can have a key that spans many blocks.
1300 */
1301static void process_discard_bio(struct cache *cache, struct bio *bio)
1302{ 1392{
1303 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, 1393 int r;
1304 cache->sectors_per_block); 1394 dm_dblock_t b, e;
1305 dm_block_t end_block = bio_end_sector(bio); 1395 struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1306 dm_block_t b;
1307 1396
1308 end_block = block_div(end_block, cache->sectors_per_block); 1397 calc_discard_block_range(cache, bio, &b, &e);
1398 if (b == e) {
1399 bio_endio(bio, 0);
1400 return;
1401 }
1309 1402
1310 for (b = start_block; b < end_block; b++) 1403 cell_prealloc = prealloc_get_cell(structs);
1311 set_discard(cache, to_oblock(b)); 1404 r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
1405 (cell_free_fn) prealloc_put_cell,
1406 structs, &new_ocell);
1407 if (r > 0)
1408 return;
1312 1409
1313 bio_endio(bio, 0); 1410 discard(cache, structs, new_ocell);
1314} 1411}
1315 1412
1316static bool spare_migration_bandwidth(struct cache *cache) 1413static bool spare_migration_bandwidth(struct cache *cache)
@@ -1340,9 +1437,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1340 dm_oblock_t block = get_bio_block(cache, bio); 1437 dm_oblock_t block = get_bio_block(cache, bio);
1341 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1438 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1342 struct policy_result lookup_result; 1439 struct policy_result lookup_result;
1343 bool discarded_block = is_discarded_oblock(cache, block);
1344 bool passthrough = passthrough_mode(&cache->features); 1440 bool passthrough = passthrough_mode(&cache->features);
1345 bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); 1441 bool discarded_block, can_migrate;
1346 1442
1347 /* 1443 /*
1348 * Check to see if that block is currently migrating. 1444 * Check to see if that block is currently migrating.
@@ -1354,6 +1450,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
1354 if (r > 0) 1450 if (r > 0)
1355 return; 1451 return;
1356 1452
1453 discarded_block = is_discarded_oblock(cache, block);
1454 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1455
1357 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, 1456 r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1358 bio, &lookup_result); 1457 bio, &lookup_result);
1359 1458
@@ -1500,7 +1599,7 @@ static void process_deferred_bios(struct cache *cache)
1500 if (bio->bi_rw & REQ_FLUSH) 1599 if (bio->bi_rw & REQ_FLUSH)
1501 process_flush_bio(cache, bio); 1600 process_flush_bio(cache, bio);
1502 else if (bio->bi_rw & REQ_DISCARD) 1601 else if (bio->bi_rw & REQ_DISCARD)
1503 process_discard_bio(cache, bio); 1602 process_discard_bio(cache, &structs, bio);
1504 else 1603 else
1505 process_bio(cache, &structs, bio); 1604 process_bio(cache, &structs, bio);
1506 } 1605 }
@@ -1715,7 +1814,7 @@ static void do_worker(struct work_struct *ws)
1715 process_invalidation_requests(cache); 1814 process_invalidation_requests(cache);
1716 } 1815 }
1717 1816
1718 process_migrations(cache, &cache->quiesced_migrations, issue_copy); 1817 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
1719 process_migrations(cache, &cache->completed_migrations, complete_migration); 1818 process_migrations(cache, &cache->completed_migrations, complete_migration);
1720 1819
1721 if (commit_if_needed(cache)) { 1820 if (commit_if_needed(cache)) {
@@ -2180,6 +2279,45 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2180 return 0; 2279 return 0;
2181} 2280}
2182 2281
2282/*
2283 * We want the discard block size to be at least the size of the cache
2284 * block size and have no more than 2^14 discard blocks across the origin.
2285 */
2286#define MAX_DISCARD_BLOCKS (1 << 14)
2287
2288static bool too_many_discard_blocks(sector_t discard_block_size,
2289 sector_t origin_size)
2290{
2291 (void) sector_div(origin_size, discard_block_size);
2292
2293 return origin_size > MAX_DISCARD_BLOCKS;
2294}
2295
2296static sector_t calculate_discard_block_size(sector_t cache_block_size,
2297 sector_t origin_size)
2298{
2299 sector_t discard_block_size = cache_block_size;
2300
2301 if (origin_size)
2302 while (too_many_discard_blocks(discard_block_size, origin_size))
2303 discard_block_size *= 2;
2304
2305 return discard_block_size;
2306}
2307
2308static void set_cache_size(struct cache *cache, dm_cblock_t size)
2309{
2310 dm_block_t nr_blocks = from_cblock(size);
2311
2312 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2313 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2314 "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2315 "Please consider increasing the cache block size to reduce the overall cache block count.",
2316 (unsigned long long) nr_blocks);
2317
2318 cache->cache_size = size;
2319}
2320
2183#define DEFAULT_MIGRATION_THRESHOLD 2048 2321#define DEFAULT_MIGRATION_THRESHOLD 2048
2184 2322
2185static int cache_create(struct cache_args *ca, struct cache **result) 2323static int cache_create(struct cache_args *ca, struct cache **result)
@@ -2204,8 +2342,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2204 ti->num_discard_bios = 1; 2342 ti->num_discard_bios = 1;
2205 ti->discards_supported = true; 2343 ti->discards_supported = true;
2206 ti->discard_zeroes_data_unsupported = true; 2344 ti->discard_zeroes_data_unsupported = true;
2207 /* Discard bios must be split on a block boundary */ 2345 ti->split_discard_bios = false;
2208 ti->split_discard_bios = true;
2209 2346
2210 cache->features = ca->features; 2347 cache->features = ca->features;
2211 ti->per_bio_data_size = get_per_bio_data_size(cache); 2348 ti->per_bio_data_size = get_per_bio_data_size(cache);
@@ -2235,10 +2372,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2235 2372
2236 cache->sectors_per_block_shift = -1; 2373 cache->sectors_per_block_shift = -1;
2237 cache_size = block_div(cache_size, ca->block_size); 2374 cache_size = block_div(cache_size, ca->block_size);
2238 cache->cache_size = to_cblock(cache_size); 2375 set_cache_size(cache, to_cblock(cache_size));
2239 } else { 2376 } else {
2240 cache->sectors_per_block_shift = __ffs(ca->block_size); 2377 cache->sectors_per_block_shift = __ffs(ca->block_size);
2241 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift); 2378 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2242 } 2379 }
2243 2380
2244 r = create_cache_policy(cache, ca, error); 2381 r = create_cache_policy(cache, ca, error);
@@ -2303,13 +2440,17 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2303 } 2440 }
2304 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); 2441 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2305 2442
2306 cache->discard_nr_blocks = cache->origin_blocks; 2443 cache->discard_block_size =
2307 cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); 2444 calculate_discard_block_size(cache->sectors_per_block,
2445 cache->origin_sectors);
2446 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2447 cache->discard_block_size));
2448 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2308 if (!cache->discard_bitset) { 2449 if (!cache->discard_bitset) {
2309 *error = "could not allocate discard bitset"; 2450 *error = "could not allocate discard bitset";
2310 goto bad; 2451 goto bad;
2311 } 2452 }
2312 clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); 2453 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2313 2454
2314 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); 2455 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2315 if (IS_ERR(cache->copier)) { 2456 if (IS_ERR(cache->copier)) {
@@ -2327,7 +2468,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2327 INIT_DELAYED_WORK(&cache->waker, do_waker); 2468 INIT_DELAYED_WORK(&cache->waker, do_waker);
2328 cache->last_commit_jiffies = jiffies; 2469 cache->last_commit_jiffies = jiffies;
2329 2470
2330 cache->prison = dm_bio_prison_create(PRISON_CELLS); 2471 cache->prison = dm_bio_prison_create();
2331 if (!cache->prison) { 2472 if (!cache->prison) {
2332 *error = "could not create bio prison"; 2473 *error = "could not create bio prison";
2333 goto bad; 2474 goto bad;
@@ -2549,11 +2690,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
2549static int cache_map(struct dm_target *ti, struct bio *bio) 2690static int cache_map(struct dm_target *ti, struct bio *bio)
2550{ 2691{
2551 int r; 2692 int r;
2552 struct dm_bio_prison_cell *cell; 2693 struct dm_bio_prison_cell *cell = NULL;
2553 struct cache *cache = ti->private; 2694 struct cache *cache = ti->private;
2554 2695
2555 r = __cache_map(cache, bio, &cell); 2696 r = __cache_map(cache, bio, &cell);
2556 if (r == DM_MAPIO_REMAPPED) { 2697 if (r == DM_MAPIO_REMAPPED && cell) {
2557 inc_ds(cache, bio, cell); 2698 inc_ds(cache, bio, cell);
2558 cell_defer(cache, cell, false); 2699 cell_defer(cache, cell, false);
2559 } 2700 }
@@ -2599,16 +2740,16 @@ static int write_discard_bitset(struct cache *cache)
2599{ 2740{
2600 unsigned i, r; 2741 unsigned i, r;
2601 2742
2602 r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, 2743 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2603 cache->origin_blocks); 2744 cache->discard_nr_blocks);
2604 if (r) { 2745 if (r) {
2605 DMERR("could not resize on-disk discard bitset"); 2746 DMERR("could not resize on-disk discard bitset");
2606 return r; 2747 return r;
2607 } 2748 }
2608 2749
2609 for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { 2750 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2610 r = dm_cache_set_discard(cache->cmd, to_oblock(i), 2751 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2611 is_discarded(cache, to_oblock(i))); 2752 is_discarded(cache, to_dblock(i)));
2612 if (r) 2753 if (r)
2613 return r; 2754 return r;
2614 } 2755 }
@@ -2680,15 +2821,86 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2680 return 0; 2821 return 0;
2681} 2822}
2682 2823
2824/*
2825 * The discard block size in the on disk metadata is not
2826 * neccessarily the same as we're currently using. So we have to
2827 * be careful to only set the discarded attribute if we know it
2828 * covers a complete block of the new size.
2829 */
2830struct discard_load_info {
2831 struct cache *cache;
2832
2833 /*
2834 * These blocks are sized using the on disk dblock size, rather
2835 * than the current one.
2836 */
2837 dm_block_t block_size;
2838 dm_block_t discard_begin, discard_end;
2839};
2840
2841static void discard_load_info_init(struct cache *cache,
2842 struct discard_load_info *li)
2843{
2844 li->cache = cache;
2845 li->discard_begin = li->discard_end = 0;
2846}
2847
2848static void set_discard_range(struct discard_load_info *li)
2849{
2850 sector_t b, e;
2851
2852 if (li->discard_begin == li->discard_end)
2853 return;
2854
2855 /*
2856 * Convert to sectors.
2857 */
2858 b = li->discard_begin * li->block_size;
2859 e = li->discard_end * li->block_size;
2860
2861 /*
2862 * Then convert back to the current dblock size.
2863 */
2864 b = dm_sector_div_up(b, li->cache->discard_block_size);
2865 sector_div(e, li->cache->discard_block_size);
2866
2867 /*
2868 * The origin may have shrunk, so we need to check we're still in
2869 * bounds.
2870 */
2871 if (e > from_dblock(li->cache->discard_nr_blocks))
2872 e = from_dblock(li->cache->discard_nr_blocks);
2873
2874 for (; b < e; b++)
2875 set_discard(li->cache, to_dblock(b));
2876}
2877
2683static int load_discard(void *context, sector_t discard_block_size, 2878static int load_discard(void *context, sector_t discard_block_size,
2684 dm_oblock_t oblock, bool discard) 2879 dm_dblock_t dblock, bool discard)
2685{ 2880{
2686 struct cache *cache = context; 2881 struct discard_load_info *li = context;
2687 2882
2688 if (discard) 2883 li->block_size = discard_block_size;
2689 set_discard(cache, oblock); 2884
2690 else 2885 if (discard) {
2691 clear_discard(cache, oblock); 2886 if (from_dblock(dblock) == li->discard_end)
2887 /*
2888 * We're already in a discard range, just extend it.
2889 */
2890 li->discard_end = li->discard_end + 1ULL;
2891
2892 else {
2893 /*
2894 * Emit the old range and start a new one.
2895 */
2896 set_discard_range(li);
2897 li->discard_begin = from_dblock(dblock);
2898 li->discard_end = li->discard_begin + 1ULL;
2899 }
2900 } else {
2901 set_discard_range(li);
2902 li->discard_begin = li->discard_end = 0;
2903 }
2692 2904
2693 return 0; 2905 return 0;
2694} 2906}
@@ -2730,7 +2942,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2730 return r; 2942 return r;
2731 } 2943 }
2732 2944
2733 cache->cache_size = new_size; 2945 set_cache_size(cache, new_size);
2734 2946
2735 return 0; 2947 return 0;
2736} 2948}
@@ -2772,11 +2984,22 @@ static int cache_preresume(struct dm_target *ti)
2772 } 2984 }
2773 2985
2774 if (!cache->loaded_discards) { 2986 if (!cache->loaded_discards) {
2775 r = dm_cache_load_discards(cache->cmd, load_discard, cache); 2987 struct discard_load_info li;
2988
2989 /*
2990 * The discard bitset could have been resized, or the
2991 * discard block size changed. To be safe we start by
2992 * setting every dblock to not discarded.
2993 */
2994 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2995
2996 discard_load_info_init(cache, &li);
2997 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2776 if (r) { 2998 if (r) {
2777 DMERR("could not load origin discards"); 2999 DMERR("could not load origin discards");
2778 return r; 3000 return r;
2779 } 3001 }
3002 set_discard_range(&li);
2780 3003
2781 cache->loaded_discards = true; 3004 cache->loaded_discards = true;
2782 } 3005 }
@@ -3079,8 +3302,9 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3079 /* 3302 /*
3080 * FIXME: these limits may be incompatible with the cache device 3303 * FIXME: these limits may be incompatible with the cache device
3081 */ 3304 */
3082 limits->max_discard_sectors = cache->sectors_per_block; 3305 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3083 limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; 3306 cache->origin_sectors);
3307 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3084} 3308}
3085 3309
3086static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) 3310static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -3104,7 +3328,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3104 3328
3105static struct target_type cache_target = { 3329static struct target_type cache_target = {
3106 .name = "cache", 3330 .name = "cache",
3107 .version = {1, 5, 0}, 3331 .version = {1, 6, 0},
3108 .module = THIS_MODULE, 3332 .module = THIS_MODULE,
3109 .ctr = cache_ctr, 3333 .ctr = cache_ctr,
3110 .dtr = cache_dtr, 3334 .dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index fc93b9330af4..08981be7baa1 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -705,7 +705,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
705 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 705 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
706 crypto_xor(data + i * 8, buf, 8); 706 crypto_xor(data + i * 8, buf, 8);
707out: 707out:
708 memset(buf, 0, sizeof(buf)); 708 memzero_explicit(buf, sizeof(buf));
709 return r; 709 return r;
710} 710}
711 711
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0be9381365d7..73f791bb9ea4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -684,11 +684,14 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
684 int srcu_idx; 684 int srcu_idx;
685 685
686 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | 686 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
687 DM_ACTIVE_PRESENT_FLAG); 687 DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG);
688 688
689 if (dm_suspended_md(md)) 689 if (dm_suspended_md(md))
690 param->flags |= DM_SUSPEND_FLAG; 690 param->flags |= DM_SUSPEND_FLAG;
691 691
692 if (dm_suspended_internally_md(md))
693 param->flags |= DM_INTERNAL_SUSPEND_FLAG;
694
692 if (dm_test_deferred_remove_flag(md)) 695 if (dm_test_deferred_remove_flag(md))
693 param->flags |= DM_DEFERRED_REMOVE; 696 param->flags |= DM_DEFERRED_REMOVE;
694 697
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 87f86c77b094..f478a4c96d2f 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -824,7 +824,7 @@ static int message_stats_create(struct mapped_device *md,
824 return 1; 824 return 1;
825 825
826 id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data, 826 id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
827 dm_internal_suspend, dm_internal_resume, md); 827 dm_internal_suspend_fast, dm_internal_resume_fast, md);
828 if (id < 0) 828 if (id < 0)
829 return id; 829 return id;
830 830
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b2bd1ebf4562..3afae9e062f8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1521,18 +1521,32 @@ fmode_t dm_table_get_mode(struct dm_table *t)
1521} 1521}
1522EXPORT_SYMBOL(dm_table_get_mode); 1522EXPORT_SYMBOL(dm_table_get_mode);
1523 1523
1524static void suspend_targets(struct dm_table *t, unsigned postsuspend) 1524enum suspend_mode {
1525 PRESUSPEND,
1526 PRESUSPEND_UNDO,
1527 POSTSUSPEND,
1528};
1529
1530static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
1525{ 1531{
1526 int i = t->num_targets; 1532 int i = t->num_targets;
1527 struct dm_target *ti = t->targets; 1533 struct dm_target *ti = t->targets;
1528 1534
1529 while (i--) { 1535 while (i--) {
1530 if (postsuspend) { 1536 switch (mode) {
1537 case PRESUSPEND:
1538 if (ti->type->presuspend)
1539 ti->type->presuspend(ti);
1540 break;
1541 case PRESUSPEND_UNDO:
1542 if (ti->type->presuspend_undo)
1543 ti->type->presuspend_undo(ti);
1544 break;
1545 case POSTSUSPEND:
1531 if (ti->type->postsuspend) 1546 if (ti->type->postsuspend)
1532 ti->type->postsuspend(ti); 1547 ti->type->postsuspend(ti);
1533 } else if (ti->type->presuspend) 1548 break;
1534 ti->type->presuspend(ti); 1549 }
1535
1536 ti++; 1550 ti++;
1537 } 1551 }
1538} 1552}
@@ -1542,7 +1556,15 @@ void dm_table_presuspend_targets(struct dm_table *t)
1542 if (!t) 1556 if (!t)
1543 return; 1557 return;
1544 1558
1545 suspend_targets(t, 0); 1559 suspend_targets(t, PRESUSPEND);
1560}
1561
1562void dm_table_presuspend_undo_targets(struct dm_table *t)
1563{
1564 if (!t)
1565 return;
1566
1567 suspend_targets(t, PRESUSPEND_UNDO);
1546} 1568}
1547 1569
1548void dm_table_postsuspend_targets(struct dm_table *t) 1570void dm_table_postsuspend_targets(struct dm_table *t)
@@ -1550,7 +1572,7 @@ void dm_table_postsuspend_targets(struct dm_table *t)
1550 if (!t) 1572 if (!t)
1551 return; 1573 return;
1552 1574
1553 suspend_targets(t, 1); 1575 suspend_targets(t, POSTSUSPEND);
1554} 1576}
1555 1577
1556int dm_table_resume_targets(struct dm_table *t) 1578int dm_table_resume_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index e9d33ad59df5..43adbb863f5a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1384,42 +1384,38 @@ static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1384} 1384}
1385 1385
1386int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, 1386int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1387 int can_block, struct dm_thin_lookup_result *result) 1387 int can_issue_io, struct dm_thin_lookup_result *result)
1388{ 1388{
1389 int r = -EINVAL; 1389 int r;
1390 uint64_t block_time = 0;
1391 __le64 value; 1390 __le64 value;
1392 struct dm_pool_metadata *pmd = td->pmd; 1391 struct dm_pool_metadata *pmd = td->pmd;
1393 dm_block_t keys[2] = { td->id, block }; 1392 dm_block_t keys[2] = { td->id, block };
1394 struct dm_btree_info *info; 1393 struct dm_btree_info *info;
1395 1394
1396 if (can_block) {
1397 down_read(&pmd->root_lock);
1398 info = &pmd->info;
1399 } else if (down_read_trylock(&pmd->root_lock))
1400 info = &pmd->nb_info;
1401 else
1402 return -EWOULDBLOCK;
1403
1404 if (pmd->fail_io) 1395 if (pmd->fail_io)
1405 goto out; 1396 return -EINVAL;
1406 1397
1407 r = dm_btree_lookup(info, pmd->root, keys, &value); 1398 down_read(&pmd->root_lock);
1408 if (!r)
1409 block_time = le64_to_cpu(value);
1410 1399
1411out: 1400 if (can_issue_io) {
1412 up_read(&pmd->root_lock); 1401 info = &pmd->info;
1402 } else
1403 info = &pmd->nb_info;
1413 1404
1405 r = dm_btree_lookup(info, pmd->root, keys, &value);
1414 if (!r) { 1406 if (!r) {
1407 uint64_t block_time = 0;
1415 dm_block_t exception_block; 1408 dm_block_t exception_block;
1416 uint32_t exception_time; 1409 uint32_t exception_time;
1410
1411 block_time = le64_to_cpu(value);
1417 unpack_block_time(block_time, &exception_block, 1412 unpack_block_time(block_time, &exception_block,
1418 &exception_time); 1413 &exception_time);
1419 result->block = exception_block; 1414 result->block = exception_block;
1420 result->shared = __snapshotted_since(td, exception_time); 1415 result->shared = __snapshotted_since(td, exception_time);
1421 } 1416 }
1422 1417
1418 up_read(&pmd->root_lock);
1423 return r; 1419 return r;
1424} 1420}
1425 1421
@@ -1813,3 +1809,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
1813 1809
1814 return needs_check; 1810 return needs_check;
1815} 1811}
1812
1813void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
1814{
1815 dm_tm_issue_prefetches(pmd->tm);
1816}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index e3c857db195a..921d15ee56a0 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -139,12 +139,12 @@ struct dm_thin_lookup_result {
139 139
140/* 140/*
141 * Returns: 141 * Returns:
142 * -EWOULDBLOCK iff @can_block is set and would block. 142 * -EWOULDBLOCK iff @can_issue_io is set and would issue IO
143 * -ENODATA iff that mapping is not present. 143 * -ENODATA iff that mapping is not present.
144 * 0 success 144 * 0 success
145 */ 145 */
146int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, 146int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
147 int can_block, struct dm_thin_lookup_result *result); 147 int can_issue_io, struct dm_thin_lookup_result *result);
148 148
149/* 149/*
150 * Obtain an unused block. 150 * Obtain an unused block.
@@ -213,6 +213,11 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
213int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); 213int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
214bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); 214bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
215 215
216/*
217 * Issue any prefetches that may be useful.
218 */
219void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
220
216/*----------------------------------------------------------------*/ 221/*----------------------------------------------------------------*/
217 222
218#endif 223#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 0f86d802b533..8735543eacdb 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -11,11 +11,13 @@
11#include <linux/device-mapper.h> 11#include <linux/device-mapper.h>
12#include <linux/dm-io.h> 12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h> 13#include <linux/dm-kcopyd.h>
14#include <linux/log2.h>
14#include <linux/list.h> 15#include <linux/list.h>
15#include <linux/rculist.h> 16#include <linux/rculist.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/sort.h>
19#include <linux/rbtree.h> 21#include <linux/rbtree.h>
20 22
21#define DM_MSG_PREFIX "thin" 23#define DM_MSG_PREFIX "thin"
@@ -25,7 +27,6 @@
25 */ 27 */
26#define ENDIO_HOOK_POOL_SIZE 1024 28#define ENDIO_HOOK_POOL_SIZE 1024
27#define MAPPING_POOL_SIZE 1024 29#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 30#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT_SECS 60 31#define NO_SPACE_TIMEOUT_SECS 60
31 32
@@ -114,7 +115,8 @@ static void build_data_key(struct dm_thin_device *td,
114{ 115{
115 key->virtual = 0; 116 key->virtual = 0;
116 key->dev = dm_thin_dev_id(td); 117 key->dev = dm_thin_dev_id(td);
117 key->block = b; 118 key->block_begin = b;
119 key->block_end = b + 1ULL;
118} 120}
119 121
120static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, 122static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
@@ -122,7 +124,55 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
122{ 124{
123 key->virtual = 1; 125 key->virtual = 1;
124 key->dev = dm_thin_dev_id(td); 126 key->dev = dm_thin_dev_id(td);
125 key->block = b; 127 key->block_begin = b;
128 key->block_end = b + 1ULL;
129}
130
131/*----------------------------------------------------------------*/
132
133#define THROTTLE_THRESHOLD (1 * HZ)
134
135struct throttle {
136 struct rw_semaphore lock;
137 unsigned long threshold;
138 bool throttle_applied;
139};
140
141static void throttle_init(struct throttle *t)
142{
143 init_rwsem(&t->lock);
144 t->throttle_applied = false;
145}
146
147static void throttle_work_start(struct throttle *t)
148{
149 t->threshold = jiffies + THROTTLE_THRESHOLD;
150}
151
152static void throttle_work_update(struct throttle *t)
153{
154 if (!t->throttle_applied && jiffies > t->threshold) {
155 down_write(&t->lock);
156 t->throttle_applied = true;
157 }
158}
159
160static void throttle_work_complete(struct throttle *t)
161{
162 if (t->throttle_applied) {
163 t->throttle_applied = false;
164 up_write(&t->lock);
165 }
166}
167
168static void throttle_lock(struct throttle *t)
169{
170 down_read(&t->lock);
171}
172
173static void throttle_unlock(struct throttle *t)
174{
175 up_read(&t->lock);
126} 176}
127 177
128/*----------------------------------------------------------------*/ 178/*----------------------------------------------------------------*/
@@ -155,8 +205,11 @@ struct pool_features {
155 205
156struct thin_c; 206struct thin_c;
157typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 207typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
208typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
158typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 209typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
159 210
211#define CELL_SORT_ARRAY_SIZE 8192
212
160struct pool { 213struct pool {
161 struct list_head list; 214 struct list_head list;
162 struct dm_target *ti; /* Only set if a pool target is bound */ 215 struct dm_target *ti; /* Only set if a pool target is bound */
@@ -171,11 +224,13 @@ struct pool {
171 224
172 struct pool_features pf; 225 struct pool_features pf;
173 bool low_water_triggered:1; /* A dm event has been sent */ 226 bool low_water_triggered:1; /* A dm event has been sent */
227 bool suspended:1;
174 228
175 struct dm_bio_prison *prison; 229 struct dm_bio_prison *prison;
176 struct dm_kcopyd_client *copier; 230 struct dm_kcopyd_client *copier;
177 231
178 struct workqueue_struct *wq; 232 struct workqueue_struct *wq;
233 struct throttle throttle;
179 struct work_struct worker; 234 struct work_struct worker;
180 struct delayed_work waker; 235 struct delayed_work waker;
181 struct delayed_work no_space_timeout; 236 struct delayed_work no_space_timeout;
@@ -198,8 +253,13 @@ struct pool {
198 process_bio_fn process_bio; 253 process_bio_fn process_bio;
199 process_bio_fn process_discard; 254 process_bio_fn process_discard;
200 255
256 process_cell_fn process_cell;
257 process_cell_fn process_discard_cell;
258
201 process_mapping_fn process_prepared_mapping; 259 process_mapping_fn process_prepared_mapping;
202 process_mapping_fn process_prepared_discard; 260 process_mapping_fn process_prepared_discard;
261
262 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
203}; 263};
204 264
205static enum pool_mode get_pool_mode(struct pool *pool); 265static enum pool_mode get_pool_mode(struct pool *pool);
@@ -232,8 +292,11 @@ struct thin_c {
232 292
233 struct pool *pool; 293 struct pool *pool;
234 struct dm_thin_device *td; 294 struct dm_thin_device *td;
295 struct mapped_device *thin_md;
296
235 bool requeue_mode:1; 297 bool requeue_mode:1;
236 spinlock_t lock; 298 spinlock_t lock;
299 struct list_head deferred_cells;
237 struct bio_list deferred_bio_list; 300 struct bio_list deferred_bio_list;
238 struct bio_list retry_on_resume_list; 301 struct bio_list retry_on_resume_list;
239 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 302 struct rb_root sort_bio_list; /* sorted list of deferred bios */
@@ -290,6 +353,15 @@ static void cell_release(struct pool *pool,
290 dm_bio_prison_free_cell(pool->prison, cell); 353 dm_bio_prison_free_cell(pool->prison, cell);
291} 354}
292 355
356static void cell_visit_release(struct pool *pool,
357 void (*fn)(void *, struct dm_bio_prison_cell *),
358 void *context,
359 struct dm_bio_prison_cell *cell)
360{
361 dm_cell_visit_release(pool->prison, fn, context, cell);
362 dm_bio_prison_free_cell(pool->prison, cell);
363}
364
293static void cell_release_no_holder(struct pool *pool, 365static void cell_release_no_holder(struct pool *pool,
294 struct dm_bio_prison_cell *cell, 366 struct dm_bio_prison_cell *cell,
295 struct bio_list *bios) 367 struct bio_list *bios)
@@ -298,19 +370,6 @@ static void cell_release_no_holder(struct pool *pool,
298 dm_bio_prison_free_cell(pool->prison, cell); 370 dm_bio_prison_free_cell(pool->prison, cell);
299} 371}
300 372
301static void cell_defer_no_holder_no_free(struct thin_c *tc,
302 struct dm_bio_prison_cell *cell)
303{
304 struct pool *pool = tc->pool;
305 unsigned long flags;
306
307 spin_lock_irqsave(&tc->lock, flags);
308 dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
309 spin_unlock_irqrestore(&tc->lock, flags);
310
311 wake_worker(pool);
312}
313
314static void cell_error_with_code(struct pool *pool, 373static void cell_error_with_code(struct pool *pool,
315 struct dm_bio_prison_cell *cell, int error_code) 374 struct dm_bio_prison_cell *cell, int error_code)
316{ 375{
@@ -323,6 +382,16 @@ static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
323 cell_error_with_code(pool, cell, -EIO); 382 cell_error_with_code(pool, cell, -EIO);
324} 383}
325 384
385static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
386{
387 cell_error_with_code(pool, cell, 0);
388}
389
390static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
391{
392 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
393}
394
326/*----------------------------------------------------------------*/ 395/*----------------------------------------------------------------*/
327 396
328/* 397/*
@@ -393,44 +462,65 @@ struct dm_thin_endio_hook {
393 struct rb_node rb_node; 462 struct rb_node rb_node;
394}; 463};
395 464
396static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) 465static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
466{
467 bio_list_merge(bios, master);
468 bio_list_init(master);
469}
470
471static void error_bio_list(struct bio_list *bios, int error)
397{ 472{
398 struct bio *bio; 473 struct bio *bio;
474
475 while ((bio = bio_list_pop(bios)))
476 bio_endio(bio, error);
477}
478
479static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
480{
399 struct bio_list bios; 481 struct bio_list bios;
400 unsigned long flags; 482 unsigned long flags;
401 483
402 bio_list_init(&bios); 484 bio_list_init(&bios);
403 485
404 spin_lock_irqsave(&tc->lock, flags); 486 spin_lock_irqsave(&tc->lock, flags);
405 bio_list_merge(&bios, master); 487 __merge_bio_list(&bios, master);
406 bio_list_init(master);
407 spin_unlock_irqrestore(&tc->lock, flags); 488 spin_unlock_irqrestore(&tc->lock, flags);
408 489
409 while ((bio = bio_list_pop(&bios))) 490 error_bio_list(&bios, error);
410 bio_endio(bio, DM_ENDIO_REQUEUE);
411} 491}
412 492
413static void requeue_io(struct thin_c *tc) 493static void requeue_deferred_cells(struct thin_c *tc)
414{ 494{
415 requeue_bio_list(tc, &tc->deferred_bio_list); 495 struct pool *pool = tc->pool;
416 requeue_bio_list(tc, &tc->retry_on_resume_list); 496 unsigned long flags;
497 struct list_head cells;
498 struct dm_bio_prison_cell *cell, *tmp;
499
500 INIT_LIST_HEAD(&cells);
501
502 spin_lock_irqsave(&tc->lock, flags);
503 list_splice_init(&tc->deferred_cells, &cells);
504 spin_unlock_irqrestore(&tc->lock, flags);
505
506 list_for_each_entry_safe(cell, tmp, &cells, user_list)
507 cell_requeue(pool, cell);
417} 508}
418 509
419static void error_thin_retry_list(struct thin_c *tc) 510static void requeue_io(struct thin_c *tc)
420{ 511{
421 struct bio *bio;
422 unsigned long flags;
423 struct bio_list bios; 512 struct bio_list bios;
513 unsigned long flags;
424 514
425 bio_list_init(&bios); 515 bio_list_init(&bios);
426 516
427 spin_lock_irqsave(&tc->lock, flags); 517 spin_lock_irqsave(&tc->lock, flags);
428 bio_list_merge(&bios, &tc->retry_on_resume_list); 518 __merge_bio_list(&bios, &tc->deferred_bio_list);
429 bio_list_init(&tc->retry_on_resume_list); 519 __merge_bio_list(&bios, &tc->retry_on_resume_list);
430 spin_unlock_irqrestore(&tc->lock, flags); 520 spin_unlock_irqrestore(&tc->lock, flags);
431 521
432 while ((bio = bio_list_pop(&bios))) 522 error_bio_list(&bios, DM_ENDIO_REQUEUE);
433 bio_io_error(bio); 523 requeue_deferred_cells(tc);
434} 524}
435 525
436static void error_retry_list(struct pool *pool) 526static void error_retry_list(struct pool *pool)
@@ -439,7 +529,7 @@ static void error_retry_list(struct pool *pool)
439 529
440 rcu_read_lock(); 530 rcu_read_lock();
441 list_for_each_entry_rcu(tc, &pool->active_thins, list) 531 list_for_each_entry_rcu(tc, &pool->active_thins, list)
442 error_thin_retry_list(tc); 532 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
443 rcu_read_unlock(); 533 rcu_read_unlock();
444} 534}
445 535
@@ -629,33 +719,75 @@ static void overwrite_endio(struct bio *bio, int err)
629 */ 719 */
630 720
631/* 721/*
632 * This sends the bios in the cell back to the deferred_bios list. 722 * This sends the bios in the cell, except the original holder, back
723 * to the deferred_bios list.
633 */ 724 */
634static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) 725static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
635{ 726{
636 struct pool *pool = tc->pool; 727 struct pool *pool = tc->pool;
637 unsigned long flags; 728 unsigned long flags;
638 729
639 spin_lock_irqsave(&tc->lock, flags); 730 spin_lock_irqsave(&tc->lock, flags);
640 cell_release(pool, cell, &tc->deferred_bio_list); 731 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
641 spin_unlock_irqrestore(&tc->lock, flags); 732 spin_unlock_irqrestore(&tc->lock, flags);
642 733
643 wake_worker(pool); 734 wake_worker(pool);
644} 735}
645 736
646/* 737static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
647 * Same as cell_defer above, except it omits the original holder of the cell. 738
648 */ 739struct remap_info {
649static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 740 struct thin_c *tc;
741 struct bio_list defer_bios;
742 struct bio_list issue_bios;
743};
744
745static void __inc_remap_and_issue_cell(void *context,
746 struct dm_bio_prison_cell *cell)
650{ 747{
651 struct pool *pool = tc->pool; 748 struct remap_info *info = context;
652 unsigned long flags; 749 struct bio *bio;
653 750
654 spin_lock_irqsave(&tc->lock, flags); 751 while ((bio = bio_list_pop(&cell->bios))) {
655 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); 752 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
656 spin_unlock_irqrestore(&tc->lock, flags); 753 bio_list_add(&info->defer_bios, bio);
754 else {
755 inc_all_io_entry(info->tc->pool, bio);
657 756
658 wake_worker(pool); 757 /*
758 * We can't issue the bios with the bio prison lock
759 * held, so we add them to a list to issue on
760 * return from this function.
761 */
762 bio_list_add(&info->issue_bios, bio);
763 }
764 }
765}
766
767static void inc_remap_and_issue_cell(struct thin_c *tc,
768 struct dm_bio_prison_cell *cell,
769 dm_block_t block)
770{
771 struct bio *bio;
772 struct remap_info info;
773
774 info.tc = tc;
775 bio_list_init(&info.defer_bios);
776 bio_list_init(&info.issue_bios);
777
778 /*
779 * We have to be careful to inc any bios we're about to issue
780 * before the cell is released, and avoid a race with new bios
781 * being added to the cell.
782 */
783 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
784 &info, cell);
785
786 while ((bio = bio_list_pop(&info.defer_bios)))
787 thin_defer_bio(tc, bio);
788
789 while ((bio = bio_list_pop(&info.issue_bios)))
790 remap_and_issue(info.tc, bio, block);
659} 791}
660 792
661static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 793static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
@@ -706,10 +838,13 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
706 * the bios in the cell. 838 * the bios in the cell.
707 */ 839 */
708 if (bio) { 840 if (bio) {
709 cell_defer_no_holder(tc, m->cell); 841 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
710 bio_endio(bio, 0); 842 bio_endio(bio, 0);
711 } else 843 } else {
712 cell_defer(tc, m->cell); 844 inc_all_io_entry(tc->pool, m->cell->holder);
845 remap_and_issue(tc, m->cell->holder, m->data_block);
846 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
847 }
713 848
714out: 849out:
715 list_del(&m->list); 850 list_del(&m->list);
@@ -842,6 +977,20 @@ static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
842 } 977 }
843} 978}
844 979
980static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
981 dm_block_t data_block,
982 struct dm_thin_new_mapping *m)
983{
984 struct pool *pool = tc->pool;
985 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
986
987 h->overwrite_mapping = m;
988 m->bio = bio;
989 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
990 inc_all_io_entry(pool, bio);
991 remap_and_issue(tc, bio, data_block);
992}
993
845/* 994/*
846 * A partial copy also needs to zero the uncopied region. 995 * A partial copy also needs to zero the uncopied region.
847 */ 996 */
@@ -876,15 +1025,9 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
876 * If the whole block of data is being overwritten, we can issue the 1025 * If the whole block of data is being overwritten, we can issue the
877 * bio immediately. Otherwise we use kcopyd to clone the data first. 1026 * bio immediately. Otherwise we use kcopyd to clone the data first.
878 */ 1027 */
879 if (io_overwrites_block(pool, bio)) { 1028 if (io_overwrites_block(pool, bio))
880 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1029 remap_and_issue_overwrite(tc, bio, data_dest, m);
881 1030 else {
882 h->overwrite_mapping = m;
883 m->bio = bio;
884 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
885 inc_all_io_entry(pool, bio);
886 remap_and_issue(tc, bio, data_dest);
887 } else {
888 struct dm_io_region from, to; 1031 struct dm_io_region from, to;
889 1032
890 from.bdev = origin->bdev; 1033 from.bdev = origin->bdev;
@@ -953,16 +1096,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
953 if (!pool->pf.zero_new_blocks) 1096 if (!pool->pf.zero_new_blocks)
954 process_prepared_mapping(m); 1097 process_prepared_mapping(m);
955 1098
956 else if (io_overwrites_block(pool, bio)) { 1099 else if (io_overwrites_block(pool, bio))
957 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1100 remap_and_issue_overwrite(tc, bio, data_block, m);
958
959 h->overwrite_mapping = m;
960 m->bio = bio;
961 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
962 inc_all_io_entry(pool, bio);
963 remap_and_issue(tc, bio, data_block);
964 1101
965 } else 1102 else
966 ll_zero(tc, m, 1103 ll_zero(tc, m,
967 data_block * pool->sectors_per_block, 1104 data_block * pool->sectors_per_block,
968 (data_block + 1) * pool->sectors_per_block); 1105 (data_block + 1) * pool->sectors_per_block);
@@ -1134,29 +1271,25 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
1134 bio_list_init(&bios); 1271 bio_list_init(&bios);
1135 cell_release(pool, cell, &bios); 1272 cell_release(pool, cell, &bios);
1136 1273
1137 error = should_error_unserviceable_bio(pool); 1274 while ((bio = bio_list_pop(&bios)))
1138 if (error) 1275 retry_on_resume(bio);
1139 while ((bio = bio_list_pop(&bios)))
1140 bio_endio(bio, error);
1141 else
1142 while ((bio = bio_list_pop(&bios)))
1143 retry_on_resume(bio);
1144} 1276}
1145 1277
1146static void process_discard(struct thin_c *tc, struct bio *bio) 1278static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1147{ 1279{
1148 int r; 1280 int r;
1149 unsigned long flags; 1281 struct bio *bio = cell->holder;
1150 struct pool *pool = tc->pool; 1282 struct pool *pool = tc->pool;
1151 struct dm_bio_prison_cell *cell, *cell2; 1283 struct dm_bio_prison_cell *cell2;
1152 struct dm_cell_key key, key2; 1284 struct dm_cell_key key2;
1153 dm_block_t block = get_bio_block(tc, bio); 1285 dm_block_t block = get_bio_block(tc, bio);
1154 struct dm_thin_lookup_result lookup_result; 1286 struct dm_thin_lookup_result lookup_result;
1155 struct dm_thin_new_mapping *m; 1287 struct dm_thin_new_mapping *m;
1156 1288
1157 build_virtual_key(tc->td, block, &key); 1289 if (tc->requeue_mode) {
1158 if (bio_detain(tc->pool, &key, bio, &cell)) 1290 cell_requeue(pool, cell);
1159 return; 1291 return;
1292 }
1160 1293
1161 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1294 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1162 switch (r) { 1295 switch (r) {
@@ -1187,12 +1320,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1187 m->cell2 = cell2; 1320 m->cell2 = cell2;
1188 m->bio = bio; 1321 m->bio = bio;
1189 1322
1190 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { 1323 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1191 spin_lock_irqsave(&pool->lock, flags); 1324 pool->process_prepared_discard(m);
1192 list_add_tail(&m->list, &pool->prepared_discards); 1325
1193 spin_unlock_irqrestore(&pool->lock, flags);
1194 wake_worker(pool);
1195 }
1196 } else { 1326 } else {
1197 inc_all_io_entry(pool, bio); 1327 inc_all_io_entry(pool, bio);
1198 cell_defer_no_holder(tc, cell); 1328 cell_defer_no_holder(tc, cell);
@@ -1227,6 +1357,19 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1227 } 1357 }
1228} 1358}
1229 1359
1360static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1361{
1362 struct dm_bio_prison_cell *cell;
1363 struct dm_cell_key key;
1364 dm_block_t block = get_bio_block(tc, bio);
1365
1366 build_virtual_key(tc->td, block, &key);
1367 if (bio_detain(tc->pool, &key, bio, &cell))
1368 return;
1369
1370 process_discard_cell(tc, cell);
1371}
1372
1230static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1373static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1231 struct dm_cell_key *key, 1374 struct dm_cell_key *key,
1232 struct dm_thin_lookup_result *lookup_result, 1375 struct dm_thin_lookup_result *lookup_result,
@@ -1255,11 +1398,53 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1255 } 1398 }
1256} 1399}
1257 1400
1401static void __remap_and_issue_shared_cell(void *context,
1402 struct dm_bio_prison_cell *cell)
1403{
1404 struct remap_info *info = context;
1405 struct bio *bio;
1406
1407 while ((bio = bio_list_pop(&cell->bios))) {
1408 if ((bio_data_dir(bio) == WRITE) ||
1409 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1410 bio_list_add(&info->defer_bios, bio);
1411 else {
1412 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1413
1414 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1415 inc_all_io_entry(info->tc->pool, bio);
1416 bio_list_add(&info->issue_bios, bio);
1417 }
1418 }
1419}
1420
1421static void remap_and_issue_shared_cell(struct thin_c *tc,
1422 struct dm_bio_prison_cell *cell,
1423 dm_block_t block)
1424{
1425 struct bio *bio;
1426 struct remap_info info;
1427
1428 info.tc = tc;
1429 bio_list_init(&info.defer_bios);
1430 bio_list_init(&info.issue_bios);
1431
1432 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1433 &info, cell);
1434
1435 while ((bio = bio_list_pop(&info.defer_bios)))
1436 thin_defer_bio(tc, bio);
1437
1438 while ((bio = bio_list_pop(&info.issue_bios)))
1439 remap_and_issue(tc, bio, block);
1440}
1441
1258static void process_shared_bio(struct thin_c *tc, struct bio *bio, 1442static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1259 dm_block_t block, 1443 dm_block_t block,
1260 struct dm_thin_lookup_result *lookup_result) 1444 struct dm_thin_lookup_result *lookup_result,
1445 struct dm_bio_prison_cell *virt_cell)
1261{ 1446{
1262 struct dm_bio_prison_cell *cell; 1447 struct dm_bio_prison_cell *data_cell;
1263 struct pool *pool = tc->pool; 1448 struct pool *pool = tc->pool;
1264 struct dm_cell_key key; 1449 struct dm_cell_key key;
1265 1450
@@ -1268,19 +1453,23 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1268 * of being broken so we have nothing further to do here. 1453 * of being broken so we have nothing further to do here.
1269 */ 1454 */
1270 build_data_key(tc->td, lookup_result->block, &key); 1455 build_data_key(tc->td, lookup_result->block, &key);
1271 if (bio_detain(pool, &key, bio, &cell)) 1456 if (bio_detain(pool, &key, bio, &data_cell)) {
1457 cell_defer_no_holder(tc, virt_cell);
1272 return; 1458 return;
1459 }
1273 1460
1274 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) 1461 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1275 break_sharing(tc, bio, block, &key, lookup_result, cell); 1462 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1276 else { 1463 cell_defer_no_holder(tc, virt_cell);
1464 } else {
1277 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1465 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1278 1466
1279 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1467 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1280 inc_all_io_entry(pool, bio); 1468 inc_all_io_entry(pool, bio);
1281 cell_defer_no_holder(tc, cell);
1282
1283 remap_and_issue(tc, bio, lookup_result->block); 1469 remap_and_issue(tc, bio, lookup_result->block);
1470
1471 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1472 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1284 } 1473 }
1285} 1474}
1286 1475
@@ -1333,34 +1522,28 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1333 } 1522 }
1334} 1523}
1335 1524
1336static void process_bio(struct thin_c *tc, struct bio *bio) 1525static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1337{ 1526{
1338 int r; 1527 int r;
1339 struct pool *pool = tc->pool; 1528 struct pool *pool = tc->pool;
1529 struct bio *bio = cell->holder;
1340 dm_block_t block = get_bio_block(tc, bio); 1530 dm_block_t block = get_bio_block(tc, bio);
1341 struct dm_bio_prison_cell *cell;
1342 struct dm_cell_key key;
1343 struct dm_thin_lookup_result lookup_result; 1531 struct dm_thin_lookup_result lookup_result;
1344 1532
1345 /* 1533 if (tc->requeue_mode) {
1346 * If cell is already occupied, then the block is already 1534 cell_requeue(pool, cell);
1347 * being provisioned so we have nothing further to do here.
1348 */
1349 build_virtual_key(tc->td, block, &key);
1350 if (bio_detain(pool, &key, bio, &cell))
1351 return; 1535 return;
1536 }
1352 1537
1353 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1538 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1354 switch (r) { 1539 switch (r) {
1355 case 0: 1540 case 0:
1356 if (lookup_result.shared) { 1541 if (lookup_result.shared)
1357 process_shared_bio(tc, bio, block, &lookup_result); 1542 process_shared_bio(tc, bio, block, &lookup_result, cell);
1358 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */ 1543 else {
1359 } else {
1360 inc_all_io_entry(pool, bio); 1544 inc_all_io_entry(pool, bio);
1361 cell_defer_no_holder(tc, cell);
1362
1363 remap_and_issue(tc, bio, lookup_result.block); 1545 remap_and_issue(tc, bio, lookup_result.block);
1546 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1364 } 1547 }
1365 break; 1548 break;
1366 1549
@@ -1394,7 +1577,26 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1394 } 1577 }
1395} 1578}
1396 1579
1397static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1580static void process_bio(struct thin_c *tc, struct bio *bio)
1581{
1582 struct pool *pool = tc->pool;
1583 dm_block_t block = get_bio_block(tc, bio);
1584 struct dm_bio_prison_cell *cell;
1585 struct dm_cell_key key;
1586
1587 /*
1588 * If cell is already occupied, then the block is already
1589 * being provisioned so we have nothing further to do here.
1590 */
1591 build_virtual_key(tc->td, block, &key);
1592 if (bio_detain(pool, &key, bio, &cell))
1593 return;
1594
1595 process_cell(tc, cell);
1596}
1597
1598static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1599 struct dm_bio_prison_cell *cell)
1398{ 1600{
1399 int r; 1601 int r;
1400 int rw = bio_data_dir(bio); 1602 int rw = bio_data_dir(bio);
@@ -1404,15 +1606,21 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1404 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1606 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1405 switch (r) { 1607 switch (r) {
1406 case 0: 1608 case 0:
1407 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) 1609 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1408 handle_unserviceable_bio(tc->pool, bio); 1610 handle_unserviceable_bio(tc->pool, bio);
1409 else { 1611 if (cell)
1612 cell_defer_no_holder(tc, cell);
1613 } else {
1410 inc_all_io_entry(tc->pool, bio); 1614 inc_all_io_entry(tc->pool, bio);
1411 remap_and_issue(tc, bio, lookup_result.block); 1615 remap_and_issue(tc, bio, lookup_result.block);
1616 if (cell)
1617 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1412 } 1618 }
1413 break; 1619 break;
1414 1620
1415 case -ENODATA: 1621 case -ENODATA:
1622 if (cell)
1623 cell_defer_no_holder(tc, cell);
1416 if (rw != READ) { 1624 if (rw != READ) {
1417 handle_unserviceable_bio(tc->pool, bio); 1625 handle_unserviceable_bio(tc->pool, bio);
1418 break; 1626 break;
@@ -1431,11 +1639,23 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1431 default: 1639 default:
1432 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1640 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1433 __func__, r); 1641 __func__, r);
1642 if (cell)
1643 cell_defer_no_holder(tc, cell);
1434 bio_io_error(bio); 1644 bio_io_error(bio);
1435 break; 1645 break;
1436 } 1646 }
1437} 1647}
1438 1648
1649static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1650{
1651 __process_bio_read_only(tc, bio, NULL);
1652}
1653
1654static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1655{
1656 __process_bio_read_only(tc, cell->holder, cell);
1657}
1658
1439static void process_bio_success(struct thin_c *tc, struct bio *bio) 1659static void process_bio_success(struct thin_c *tc, struct bio *bio)
1440{ 1660{
1441 bio_endio(bio, 0); 1661 bio_endio(bio, 0);
@@ -1446,6 +1666,16 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1446 bio_io_error(bio); 1666 bio_io_error(bio);
1447} 1667}
1448 1668
1669static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1670{
1671 cell_success(tc->pool, cell);
1672}
1673
1674static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1675{
1676 cell_error(tc->pool, cell);
1677}
1678
1449/* 1679/*
1450 * FIXME: should we also commit due to size of transaction, measured in 1680 * FIXME: should we also commit due to size of transaction, measured in
1451 * metadata blocks? 1681 * metadata blocks?
@@ -1527,9 +1757,10 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1527 struct bio *bio; 1757 struct bio *bio;
1528 struct bio_list bios; 1758 struct bio_list bios;
1529 struct blk_plug plug; 1759 struct blk_plug plug;
1760 unsigned count = 0;
1530 1761
1531 if (tc->requeue_mode) { 1762 if (tc->requeue_mode) {
1532 requeue_bio_list(tc, &tc->deferred_bio_list); 1763 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
1533 return; 1764 return;
1534 } 1765 }
1535 1766
@@ -1568,10 +1799,97 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1568 pool->process_discard(tc, bio); 1799 pool->process_discard(tc, bio);
1569 else 1800 else
1570 pool->process_bio(tc, bio); 1801 pool->process_bio(tc, bio);
1802
1803 if ((count++ & 127) == 0) {
1804 throttle_work_update(&pool->throttle);
1805 dm_pool_issue_prefetches(pool->pmd);
1806 }
1571 } 1807 }
1572 blk_finish_plug(&plug); 1808 blk_finish_plug(&plug);
1573} 1809}
1574 1810
1811static int cmp_cells(const void *lhs, const void *rhs)
1812{
1813 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1814 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1815
1816 BUG_ON(!lhs_cell->holder);
1817 BUG_ON(!rhs_cell->holder);
1818
1819 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1820 return -1;
1821
1822 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1823 return 1;
1824
1825 return 0;
1826}
1827
1828static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1829{
1830 unsigned count = 0;
1831 struct dm_bio_prison_cell *cell, *tmp;
1832
1833 list_for_each_entry_safe(cell, tmp, cells, user_list) {
1834 if (count >= CELL_SORT_ARRAY_SIZE)
1835 break;
1836
1837 pool->cell_sort_array[count++] = cell;
1838 list_del(&cell->user_list);
1839 }
1840
1841 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1842
1843 return count;
1844}
1845
1846static void process_thin_deferred_cells(struct thin_c *tc)
1847{
1848 struct pool *pool = tc->pool;
1849 unsigned long flags;
1850 struct list_head cells;
1851 struct dm_bio_prison_cell *cell;
1852 unsigned i, j, count;
1853
1854 INIT_LIST_HEAD(&cells);
1855
1856 spin_lock_irqsave(&tc->lock, flags);
1857 list_splice_init(&tc->deferred_cells, &cells);
1858 spin_unlock_irqrestore(&tc->lock, flags);
1859
1860 if (list_empty(&cells))
1861 return;
1862
1863 do {
1864 count = sort_cells(tc->pool, &cells);
1865
1866 for (i = 0; i < count; i++) {
1867 cell = pool->cell_sort_array[i];
1868 BUG_ON(!cell->holder);
1869
1870 /*
1871 * If we've got no free new_mapping structs, and processing
1872 * this bio might require one, we pause until there are some
1873 * prepared mappings to process.
1874 */
1875 if (ensure_next_mapping(pool)) {
1876 for (j = i; j < count; j++)
1877 list_add(&pool->cell_sort_array[j]->user_list, &cells);
1878
1879 spin_lock_irqsave(&tc->lock, flags);
1880 list_splice(&cells, &tc->deferred_cells);
1881 spin_unlock_irqrestore(&tc->lock, flags);
1882 return;
1883 }
1884
1885 if (cell->holder->bi_rw & REQ_DISCARD)
1886 pool->process_discard_cell(tc, cell);
1887 else
1888 pool->process_cell(tc, cell);
1889 }
1890 } while (!list_empty(&cells));
1891}
1892
1575static void thin_get(struct thin_c *tc); 1893static void thin_get(struct thin_c *tc);
1576static void thin_put(struct thin_c *tc); 1894static void thin_put(struct thin_c *tc);
1577 1895
@@ -1620,6 +1938,7 @@ static void process_deferred_bios(struct pool *pool)
1620 1938
1621 tc = get_first_thin(pool); 1939 tc = get_first_thin(pool);
1622 while (tc) { 1940 while (tc) {
1941 process_thin_deferred_cells(tc);
1623 process_thin_deferred_bios(tc); 1942 process_thin_deferred_bios(tc);
1624 tc = get_next_thin(pool, tc); 1943 tc = get_next_thin(pool, tc);
1625 } 1944 }
@@ -1653,9 +1972,15 @@ static void do_worker(struct work_struct *ws)
1653{ 1972{
1654 struct pool *pool = container_of(ws, struct pool, worker); 1973 struct pool *pool = container_of(ws, struct pool, worker);
1655 1974
1975 throttle_work_start(&pool->throttle);
1976 dm_pool_issue_prefetches(pool->pmd);
1977 throttle_work_update(&pool->throttle);
1656 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); 1978 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1979 throttle_work_update(&pool->throttle);
1657 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); 1980 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1981 throttle_work_update(&pool->throttle);
1658 process_deferred_bios(pool); 1982 process_deferred_bios(pool);
1983 throttle_work_complete(&pool->throttle);
1659} 1984}
1660 1985
1661/* 1986/*
@@ -1792,6 +2117,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1792 dm_pool_metadata_read_only(pool->pmd); 2117 dm_pool_metadata_read_only(pool->pmd);
1793 pool->process_bio = process_bio_fail; 2118 pool->process_bio = process_bio_fail;
1794 pool->process_discard = process_bio_fail; 2119 pool->process_discard = process_bio_fail;
2120 pool->process_cell = process_cell_fail;
2121 pool->process_discard_cell = process_cell_fail;
1795 pool->process_prepared_mapping = process_prepared_mapping_fail; 2122 pool->process_prepared_mapping = process_prepared_mapping_fail;
1796 pool->process_prepared_discard = process_prepared_discard_fail; 2123 pool->process_prepared_discard = process_prepared_discard_fail;
1797 2124
@@ -1804,6 +2131,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1804 dm_pool_metadata_read_only(pool->pmd); 2131 dm_pool_metadata_read_only(pool->pmd);
1805 pool->process_bio = process_bio_read_only; 2132 pool->process_bio = process_bio_read_only;
1806 pool->process_discard = process_bio_success; 2133 pool->process_discard = process_bio_success;
2134 pool->process_cell = process_cell_read_only;
2135 pool->process_discard_cell = process_cell_success;
1807 pool->process_prepared_mapping = process_prepared_mapping_fail; 2136 pool->process_prepared_mapping = process_prepared_mapping_fail;
1808 pool->process_prepared_discard = process_prepared_discard_passdown; 2137 pool->process_prepared_discard = process_prepared_discard_passdown;
1809 2138
@@ -1822,7 +2151,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1822 if (old_mode != new_mode) 2151 if (old_mode != new_mode)
1823 notify_of_pool_mode_change(pool, "out-of-data-space"); 2152 notify_of_pool_mode_change(pool, "out-of-data-space");
1824 pool->process_bio = process_bio_read_only; 2153 pool->process_bio = process_bio_read_only;
1825 pool->process_discard = process_discard; 2154 pool->process_discard = process_discard_bio;
2155 pool->process_cell = process_cell_read_only;
2156 pool->process_discard_cell = process_discard_cell;
1826 pool->process_prepared_mapping = process_prepared_mapping; 2157 pool->process_prepared_mapping = process_prepared_mapping;
1827 pool->process_prepared_discard = process_prepared_discard_passdown; 2158 pool->process_prepared_discard = process_prepared_discard_passdown;
1828 2159
@@ -1835,7 +2166,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1835 notify_of_pool_mode_change(pool, "write"); 2166 notify_of_pool_mode_change(pool, "write");
1836 dm_pool_metadata_read_write(pool->pmd); 2167 dm_pool_metadata_read_write(pool->pmd);
1837 pool->process_bio = process_bio; 2168 pool->process_bio = process_bio;
1838 pool->process_discard = process_discard; 2169 pool->process_discard = process_discard_bio;
2170 pool->process_cell = process_cell;
2171 pool->process_discard_cell = process_discard_cell;
1839 pool->process_prepared_mapping = process_prepared_mapping; 2172 pool->process_prepared_mapping = process_prepared_mapping;
1840 pool->process_prepared_discard = process_prepared_discard; 2173 pool->process_prepared_discard = process_prepared_discard;
1841 break; 2174 break;
@@ -1895,6 +2228,29 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1895 wake_worker(pool); 2228 wake_worker(pool);
1896} 2229}
1897 2230
2231static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2232{
2233 struct pool *pool = tc->pool;
2234
2235 throttle_lock(&pool->throttle);
2236 thin_defer_bio(tc, bio);
2237 throttle_unlock(&pool->throttle);
2238}
2239
2240static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2241{
2242 unsigned long flags;
2243 struct pool *pool = tc->pool;
2244
2245 throttle_lock(&pool->throttle);
2246 spin_lock_irqsave(&tc->lock, flags);
2247 list_add_tail(&cell->user_list, &tc->deferred_cells);
2248 spin_unlock_irqrestore(&tc->lock, flags);
2249 throttle_unlock(&pool->throttle);
2250
2251 wake_worker(pool);
2252}
2253
1898static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 2254static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1899{ 2255{
1900 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2256 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1915,8 +2271,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1915 dm_block_t block = get_bio_block(tc, bio); 2271 dm_block_t block = get_bio_block(tc, bio);
1916 struct dm_thin_device *td = tc->td; 2272 struct dm_thin_device *td = tc->td;
1917 struct dm_thin_lookup_result result; 2273 struct dm_thin_lookup_result result;
1918 struct dm_bio_prison_cell cell1, cell2; 2274 struct dm_bio_prison_cell *virt_cell, *data_cell;
1919 struct dm_bio_prison_cell *cell_result;
1920 struct dm_cell_key key; 2275 struct dm_cell_key key;
1921 2276
1922 thin_hook_bio(tc, bio); 2277 thin_hook_bio(tc, bio);
@@ -1932,7 +2287,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1932 } 2287 }
1933 2288
1934 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { 2289 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1935 thin_defer_bio(tc, bio); 2290 thin_defer_bio_with_throttle(tc, bio);
1936 return DM_MAPIO_SUBMITTED; 2291 return DM_MAPIO_SUBMITTED;
1937 } 2292 }
1938 2293
@@ -1941,7 +2296,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1941 * there's a race with discard. 2296 * there's a race with discard.
1942 */ 2297 */
1943 build_virtual_key(tc->td, block, &key); 2298 build_virtual_key(tc->td, block, &key);
1944 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) 2299 if (bio_detain(tc->pool, &key, bio, &virt_cell))
1945 return DM_MAPIO_SUBMITTED; 2300 return DM_MAPIO_SUBMITTED;
1946 2301
1947 r = dm_thin_find_block(td, block, 0, &result); 2302 r = dm_thin_find_block(td, block, 0, &result);
@@ -1966,20 +2321,19 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1966 * More distant ancestors are irrelevant. The 2321 * More distant ancestors are irrelevant. The
1967 * shared flag will be set in their case. 2322 * shared flag will be set in their case.
1968 */ 2323 */
1969 thin_defer_bio(tc, bio); 2324 thin_defer_cell(tc, virt_cell);
1970 cell_defer_no_holder_no_free(tc, &cell1);
1971 return DM_MAPIO_SUBMITTED; 2325 return DM_MAPIO_SUBMITTED;
1972 } 2326 }
1973 2327
1974 build_data_key(tc->td, result.block, &key); 2328 build_data_key(tc->td, result.block, &key);
1975 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { 2329 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
1976 cell_defer_no_holder_no_free(tc, &cell1); 2330 cell_defer_no_holder(tc, virt_cell);
1977 return DM_MAPIO_SUBMITTED; 2331 return DM_MAPIO_SUBMITTED;
1978 } 2332 }
1979 2333
1980 inc_all_io_entry(tc->pool, bio); 2334 inc_all_io_entry(tc->pool, bio);
1981 cell_defer_no_holder_no_free(tc, &cell2); 2335 cell_defer_no_holder(tc, data_cell);
1982 cell_defer_no_holder_no_free(tc, &cell1); 2336 cell_defer_no_holder(tc, virt_cell);
1983 2337
1984 remap(tc, bio, result.block); 2338 remap(tc, bio, result.block);
1985 return DM_MAPIO_REMAPPED; 2339 return DM_MAPIO_REMAPPED;
@@ -1991,18 +2345,13 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1991 * of doing so. 2345 * of doing so.
1992 */ 2346 */
1993 handle_unserviceable_bio(tc->pool, bio); 2347 handle_unserviceable_bio(tc->pool, bio);
1994 cell_defer_no_holder_no_free(tc, &cell1); 2348 cell_defer_no_holder(tc, virt_cell);
1995 return DM_MAPIO_SUBMITTED; 2349 return DM_MAPIO_SUBMITTED;
1996 } 2350 }
1997 /* fall through */ 2351 /* fall through */
1998 2352
1999 case -EWOULDBLOCK: 2353 case -EWOULDBLOCK:
2000 /* 2354 thin_defer_cell(tc, virt_cell);
2001 * In future, the failed dm_thin_find_block above could
2002 * provide the hint to load the metadata into cache.
2003 */
2004 thin_defer_bio(tc, bio);
2005 cell_defer_no_holder_no_free(tc, &cell1);
2006 return DM_MAPIO_SUBMITTED; 2355 return DM_MAPIO_SUBMITTED;
2007 2356
2008 default: 2357 default:
@@ -2012,7 +2361,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2012 * pool is switched to fail-io mode. 2361 * pool is switched to fail-io mode.
2013 */ 2362 */
2014 bio_io_error(bio); 2363 bio_io_error(bio);
2015 cell_defer_no_holder_no_free(tc, &cell1); 2364 cell_defer_no_holder(tc, virt_cell);
2016 return DM_MAPIO_SUBMITTED; 2365 return DM_MAPIO_SUBMITTED;
2017 } 2366 }
2018} 2367}
@@ -2193,7 +2542,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2193 pool->sectors_per_block_shift = __ffs(block_size); 2542 pool->sectors_per_block_shift = __ffs(block_size);
2194 pool->low_water_blocks = 0; 2543 pool->low_water_blocks = 0;
2195 pool_features_init(&pool->pf); 2544 pool_features_init(&pool->pf);
2196 pool->prison = dm_bio_prison_create(PRISON_CELLS); 2545 pool->prison = dm_bio_prison_create();
2197 if (!pool->prison) { 2546 if (!pool->prison) {
2198 *error = "Error creating pool's bio prison"; 2547 *error = "Error creating pool's bio prison";
2199 err_p = ERR_PTR(-ENOMEM); 2548 err_p = ERR_PTR(-ENOMEM);
@@ -2219,6 +2568,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2219 goto bad_wq; 2568 goto bad_wq;
2220 } 2569 }
2221 2570
2571 throttle_init(&pool->throttle);
2222 INIT_WORK(&pool->worker, do_worker); 2572 INIT_WORK(&pool->worker, do_worker);
2223 INIT_DELAYED_WORK(&pool->waker, do_waker); 2573 INIT_DELAYED_WORK(&pool->waker, do_waker);
2224 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2574 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
@@ -2228,6 +2578,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2228 INIT_LIST_HEAD(&pool->prepared_discards); 2578 INIT_LIST_HEAD(&pool->prepared_discards);
2229 INIT_LIST_HEAD(&pool->active_thins); 2579 INIT_LIST_HEAD(&pool->active_thins);
2230 pool->low_water_triggered = false; 2580 pool->low_water_triggered = false;
2581 pool->suspended = true;
2231 2582
2232 pool->shared_read_ds = dm_deferred_set_create(); 2583 pool->shared_read_ds = dm_deferred_set_create();
2233 if (!pool->shared_read_ds) { 2584 if (!pool->shared_read_ds) {
@@ -2764,20 +3115,77 @@ static int pool_preresume(struct dm_target *ti)
2764 return 0; 3115 return 0;
2765} 3116}
2766 3117
3118static void pool_suspend_active_thins(struct pool *pool)
3119{
3120 struct thin_c *tc;
3121
3122 /* Suspend all active thin devices */
3123 tc = get_first_thin(pool);
3124 while (tc) {
3125 dm_internal_suspend_noflush(tc->thin_md);
3126 tc = get_next_thin(pool, tc);
3127 }
3128}
3129
3130static void pool_resume_active_thins(struct pool *pool)
3131{
3132 struct thin_c *tc;
3133
3134 /* Resume all active thin devices */
3135 tc = get_first_thin(pool);
3136 while (tc) {
3137 dm_internal_resume(tc->thin_md);
3138 tc = get_next_thin(pool, tc);
3139 }
3140}
3141
2767static void pool_resume(struct dm_target *ti) 3142static void pool_resume(struct dm_target *ti)
2768{ 3143{
2769 struct pool_c *pt = ti->private; 3144 struct pool_c *pt = ti->private;
2770 struct pool *pool = pt->pool; 3145 struct pool *pool = pt->pool;
2771 unsigned long flags; 3146 unsigned long flags;
2772 3147
3148 /*
3149 * Must requeue active_thins' bios and then resume
3150 * active_thins _before_ clearing 'suspend' flag.
3151 */
3152 requeue_bios(pool);
3153 pool_resume_active_thins(pool);
3154
2773 spin_lock_irqsave(&pool->lock, flags); 3155 spin_lock_irqsave(&pool->lock, flags);
2774 pool->low_water_triggered = false; 3156 pool->low_water_triggered = false;
3157 pool->suspended = false;
2775 spin_unlock_irqrestore(&pool->lock, flags); 3158 spin_unlock_irqrestore(&pool->lock, flags);
2776 requeue_bios(pool);
2777 3159
2778 do_waker(&pool->waker.work); 3160 do_waker(&pool->waker.work);
2779} 3161}
2780 3162
3163static void pool_presuspend(struct dm_target *ti)
3164{
3165 struct pool_c *pt = ti->private;
3166 struct pool *pool = pt->pool;
3167 unsigned long flags;
3168
3169 spin_lock_irqsave(&pool->lock, flags);
3170 pool->suspended = true;
3171 spin_unlock_irqrestore(&pool->lock, flags);
3172
3173 pool_suspend_active_thins(pool);
3174}
3175
3176static void pool_presuspend_undo(struct dm_target *ti)
3177{
3178 struct pool_c *pt = ti->private;
3179 struct pool *pool = pt->pool;
3180 unsigned long flags;
3181
3182 pool_resume_active_thins(pool);
3183
3184 spin_lock_irqsave(&pool->lock, flags);
3185 pool->suspended = false;
3186 spin_unlock_irqrestore(&pool->lock, flags);
3187}
3188
2781static void pool_postsuspend(struct dm_target *ti) 3189static void pool_postsuspend(struct dm_target *ti)
2782{ 3190{
2783 struct pool_c *pt = ti->private; 3191 struct pool_c *pt = ti->private;
@@ -2949,7 +3357,6 @@ static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct
2949 * create_thin <dev_id> 3357 * create_thin <dev_id>
2950 * create_snap <dev_id> <origin_id> 3358 * create_snap <dev_id> <origin_id>
2951 * delete <dev_id> 3359 * delete <dev_id>
2952 * trim <dev_id> <new_size_in_sectors>
2953 * set_transaction_id <current_trans_id> <new_trans_id> 3360 * set_transaction_id <current_trans_id> <new_trans_id>
2954 * reserve_metadata_snap 3361 * reserve_metadata_snap
2955 * release_metadata_snap 3362 * release_metadata_snap
@@ -3177,15 +3584,35 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3177{ 3584{
3178 struct pool_c *pt = ti->private; 3585 struct pool_c *pt = ti->private;
3179 struct pool *pool = pt->pool; 3586 struct pool *pool = pt->pool;
3180 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; 3587 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3588
3589 /*
3590 * If max_sectors is smaller than pool->sectors_per_block adjust it
3591 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3592 * This is especially beneficial when the pool's data device is a RAID
3593 * device that has a full stripe width that matches pool->sectors_per_block
3594 * -- because even though partial RAID stripe-sized IOs will be issued to a
3595 * single RAID stripe; when aggregated they will end on a full RAID stripe
3596 * boundary.. which avoids additional partial RAID stripe writes cascading
3597 */
3598 if (limits->max_sectors < pool->sectors_per_block) {
3599 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3600 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3601 limits->max_sectors--;
3602 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3603 }
3604 }
3181 3605
3182 /* 3606 /*
3183 * If the system-determined stacked limits are compatible with the 3607 * If the system-determined stacked limits are compatible with the
3184 * pool's blocksize (io_opt is a factor) do not override them. 3608 * pool's blocksize (io_opt is a factor) do not override them.
3185 */ 3609 */
3186 if (io_opt_sectors < pool->sectors_per_block || 3610 if (io_opt_sectors < pool->sectors_per_block ||
3187 do_div(io_opt_sectors, pool->sectors_per_block)) { 3611 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3188 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); 3612 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3613 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3614 else
3615 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3189 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 3616 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3190 } 3617 }
3191 3618
@@ -3214,11 +3641,13 @@ static struct target_type pool_target = {
3214 .name = "thin-pool", 3641 .name = "thin-pool",
3215 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3642 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3216 DM_TARGET_IMMUTABLE, 3643 DM_TARGET_IMMUTABLE,
3217 .version = {1, 13, 0}, 3644 .version = {1, 14, 0},
3218 .module = THIS_MODULE, 3645 .module = THIS_MODULE,
3219 .ctr = pool_ctr, 3646 .ctr = pool_ctr,
3220 .dtr = pool_dtr, 3647 .dtr = pool_dtr,
3221 .map = pool_map, 3648 .map = pool_map,
3649 .presuspend = pool_presuspend,
3650 .presuspend_undo = pool_presuspend_undo,
3222 .postsuspend = pool_postsuspend, 3651 .postsuspend = pool_postsuspend,
3223 .preresume = pool_preresume, 3652 .preresume = pool_preresume,
3224 .resume = pool_resume, 3653 .resume = pool_resume,
@@ -3248,14 +3677,14 @@ static void thin_dtr(struct dm_target *ti)
3248 struct thin_c *tc = ti->private; 3677 struct thin_c *tc = ti->private;
3249 unsigned long flags; 3678 unsigned long flags;
3250 3679
3251 thin_put(tc);
3252 wait_for_completion(&tc->can_destroy);
3253
3254 spin_lock_irqsave(&tc->pool->lock, flags); 3680 spin_lock_irqsave(&tc->pool->lock, flags);
3255 list_del_rcu(&tc->list); 3681 list_del_rcu(&tc->list);
3256 spin_unlock_irqrestore(&tc->pool->lock, flags); 3682 spin_unlock_irqrestore(&tc->pool->lock, flags);
3257 synchronize_rcu(); 3683 synchronize_rcu();
3258 3684
3685 thin_put(tc);
3686 wait_for_completion(&tc->can_destroy);
3687
3259 mutex_lock(&dm_thin_pool_table.mutex); 3688 mutex_lock(&dm_thin_pool_table.mutex);
3260 3689
3261 __pool_dec(tc->pool); 3690 __pool_dec(tc->pool);
@@ -3302,7 +3731,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3302 r = -ENOMEM; 3731 r = -ENOMEM;
3303 goto out_unlock; 3732 goto out_unlock;
3304 } 3733 }
3734 tc->thin_md = dm_table_get_md(ti->table);
3305 spin_lock_init(&tc->lock); 3735 spin_lock_init(&tc->lock);
3736 INIT_LIST_HEAD(&tc->deferred_cells);
3306 bio_list_init(&tc->deferred_bio_list); 3737 bio_list_init(&tc->deferred_bio_list);
3307 bio_list_init(&tc->retry_on_resume_list); 3738 bio_list_init(&tc->retry_on_resume_list);
3308 tc->sort_bio_list = RB_ROOT; 3739 tc->sort_bio_list = RB_ROOT;
@@ -3347,18 +3778,18 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3347 if (get_pool_mode(tc->pool) == PM_FAIL) { 3778 if (get_pool_mode(tc->pool) == PM_FAIL) {
3348 ti->error = "Couldn't open thin device, Pool is in fail mode"; 3779 ti->error = "Couldn't open thin device, Pool is in fail mode";
3349 r = -EINVAL; 3780 r = -EINVAL;
3350 goto bad_thin_open; 3781 goto bad_pool;
3351 } 3782 }
3352 3783
3353 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 3784 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3354 if (r) { 3785 if (r) {
3355 ti->error = "Couldn't open thin internal device"; 3786 ti->error = "Couldn't open thin internal device";
3356 goto bad_thin_open; 3787 goto bad_pool;
3357 } 3788 }
3358 3789
3359 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 3790 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3360 if (r) 3791 if (r)
3361 goto bad_target_max_io_len; 3792 goto bad;
3362 3793
3363 ti->num_flush_bios = 1; 3794 ti->num_flush_bios = 1;
3364 ti->flush_supported = true; 3795 ti->flush_supported = true;
@@ -3373,14 +3804,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3373 ti->split_discard_bios = true; 3804 ti->split_discard_bios = true;
3374 } 3805 }
3375 3806
3376 dm_put(pool_md);
3377
3378 mutex_unlock(&dm_thin_pool_table.mutex); 3807 mutex_unlock(&dm_thin_pool_table.mutex);
3379 3808
3380 atomic_set(&tc->refcount, 1);
3381 init_completion(&tc->can_destroy);
3382
3383 spin_lock_irqsave(&tc->pool->lock, flags); 3809 spin_lock_irqsave(&tc->pool->lock, flags);
3810 if (tc->pool->suspended) {
3811 spin_unlock_irqrestore(&tc->pool->lock, flags);
3812 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
3813 ti->error = "Unable to activate thin device while pool is suspended";
3814 r = -EINVAL;
3815 goto bad;
3816 }
3384 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3817 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3385 spin_unlock_irqrestore(&tc->pool->lock, flags); 3818 spin_unlock_irqrestore(&tc->pool->lock, flags);
3386 /* 3819 /*
@@ -3391,11 +3824,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3391 */ 3824 */
3392 synchronize_rcu(); 3825 synchronize_rcu();
3393 3826
3827 dm_put(pool_md);
3828
3829 atomic_set(&tc->refcount, 1);
3830 init_completion(&tc->can_destroy);
3831
3394 return 0; 3832 return 0;
3395 3833
3396bad_target_max_io_len: 3834bad:
3397 dm_pool_close_thin_device(tc->td); 3835 dm_pool_close_thin_device(tc->td);
3398bad_thin_open: 3836bad_pool:
3399 __pool_dec(tc->pool); 3837 __pool_dec(tc->pool);
3400bad_pool_lookup: 3838bad_pool_lookup:
3401 dm_put(pool_md); 3839 dm_put(pool_md);
@@ -3541,6 +3979,21 @@ err:
3541 DMEMIT("Error"); 3979 DMEMIT("Error");
3542} 3980}
3543 3981
3982static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3983 struct bio_vec *biovec, int max_size)
3984{
3985 struct thin_c *tc = ti->private;
3986 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
3987
3988 if (!q->merge_bvec_fn)
3989 return max_size;
3990
3991 bvm->bi_bdev = tc->pool_dev->bdev;
3992 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
3993
3994 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3995}
3996
3544static int thin_iterate_devices(struct dm_target *ti, 3997static int thin_iterate_devices(struct dm_target *ti,
3545 iterate_devices_callout_fn fn, void *data) 3998 iterate_devices_callout_fn fn, void *data)
3546{ 3999{
@@ -3565,7 +4018,7 @@ static int thin_iterate_devices(struct dm_target *ti,
3565 4018
3566static struct target_type thin_target = { 4019static struct target_type thin_target = {
3567 .name = "thin", 4020 .name = "thin",
3568 .version = {1, 13, 0}, 4021 .version = {1, 14, 0},
3569 .module = THIS_MODULE, 4022 .module = THIS_MODULE,
3570 .ctr = thin_ctr, 4023 .ctr = thin_ctr,
3571 .dtr = thin_dtr, 4024 .dtr = thin_dtr,
@@ -3575,6 +4028,7 @@ static struct target_type thin_target = {
3575 .presuspend = thin_presuspend, 4028 .presuspend = thin_presuspend,
3576 .postsuspend = thin_postsuspend, 4029 .postsuspend = thin_postsuspend,
3577 .status = thin_status, 4030 .status = thin_status,
4031 .merge = thin_merge,
3578 .iterate_devices = thin_iterate_devices, 4032 .iterate_devices = thin_iterate_devices,
3579}; 4033};
3580 4034
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 58f3927fd7cc..8f37ed215b19 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -19,6 +19,7 @@
19#include <linux/idr.h> 19#include <linux/idr.h>
20#include <linux/hdreg.h> 20#include <linux/hdreg.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/wait.h>
22 23
23#include <trace/events/block.h> 24#include <trace/events/block.h>
24 25
@@ -117,6 +118,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
117#define DMF_NOFLUSH_SUSPENDING 5 118#define DMF_NOFLUSH_SUSPENDING 5
118#define DMF_MERGE_IS_OPTIONAL 6 119#define DMF_MERGE_IS_OPTIONAL 6
119#define DMF_DEFERRED_REMOVE 7 120#define DMF_DEFERRED_REMOVE 7
121#define DMF_SUSPENDED_INTERNALLY 8
120 122
121/* 123/*
122 * A dummy definition to make RCU happy. 124 * A dummy definition to make RCU happy.
@@ -140,7 +142,7 @@ struct mapped_device {
140 * Use dm_get_live_table{_fast} or take suspend_lock for 142 * Use dm_get_live_table{_fast} or take suspend_lock for
141 * dereference. 143 * dereference.
142 */ 144 */
143 struct dm_table *map; 145 struct dm_table __rcu *map;
144 146
145 struct list_head table_devices; 147 struct list_head table_devices;
146 struct mutex table_devices_lock; 148 struct mutex table_devices_lock;
@@ -525,14 +527,15 @@ retry:
525 goto out; 527 goto out;
526 528
527 tgt = dm_table_get_target(map, 0); 529 tgt = dm_table_get_target(map, 0);
530 if (!tgt->type->ioctl)
531 goto out;
528 532
529 if (dm_suspended_md(md)) { 533 if (dm_suspended_md(md)) {
530 r = -EAGAIN; 534 r = -EAGAIN;
531 goto out; 535 goto out;
532 } 536 }
533 537
534 if (tgt->type->ioctl) 538 r = tgt->type->ioctl(tgt, cmd, arg);
535 r = tgt->type->ioctl(tgt, cmd, arg);
536 539
537out: 540out:
538 dm_put_live_table(md, srcu_idx); 541 dm_put_live_table(md, srcu_idx);
@@ -1607,9 +1610,9 @@ static int dm_merge_bvec(struct request_queue *q,
1607 * Find maximum amount of I/O that won't need splitting 1610 * Find maximum amount of I/O that won't need splitting
1608 */ 1611 */
1609 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1612 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1610 (sector_t) BIO_MAX_SECTORS); 1613 (sector_t) queue_max_sectors(q));
1611 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1614 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1612 if (max_size < 0) 1615 if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
1613 max_size = 0; 1616 max_size = 0;
1614 1617
1615 /* 1618 /*
@@ -1621,10 +1624,10 @@ static int dm_merge_bvec(struct request_queue *q,
1621 max_size = ti->type->merge(ti, bvm, biovec, max_size); 1624 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1622 /* 1625 /*
1623 * If the target doesn't support merge method and some of the devices 1626 * If the target doesn't support merge method and some of the devices
1624 * provided their merge_bvec method (we know this by looking at 1627 * provided their merge_bvec method (we know this by looking for the
1625 * queue_max_hw_sectors), then we can't allow bios with multiple vector 1628 * max_hw_sectors that dm_set_device_limits may set), then we can't
1626 * entries. So always set max_size to 0, and the code below allows 1629 * allow bios with multiple vector entries. So always set max_size
1627 * just one page. 1630 * to 0, and the code below allows just one page.
1628 */ 1631 */
1629 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1632 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1630 max_size = 0; 1633 max_size = 0;
@@ -2332,7 +2335,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2332 2335
2333 merge_is_optional = dm_table_merge_is_optional(t); 2336 merge_is_optional = dm_table_merge_is_optional(t);
2334 2337
2335 old_map = md->map; 2338 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2336 rcu_assign_pointer(md->map, t); 2339 rcu_assign_pointer(md->map, t);
2337 md->immutable_target_type = dm_table_get_immutable_target_type(t); 2340 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2338 2341
@@ -2341,7 +2344,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2341 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2344 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2342 else 2345 else
2343 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); 2346 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2344 dm_sync_table(md); 2347 if (old_map)
2348 dm_sync_table(md);
2345 2349
2346 return old_map; 2350 return old_map;
2347} 2351}
@@ -2351,7 +2355,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2351 */ 2355 */
2352static struct dm_table *__unbind(struct mapped_device *md) 2356static struct dm_table *__unbind(struct mapped_device *md)
2353{ 2357{
2354 struct dm_table *map = md->map; 2358 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2355 2359
2356 if (!map) 2360 if (!map)
2357 return NULL; 2361 return NULL;
@@ -2716,36 +2720,18 @@ static void unlock_fs(struct mapped_device *md)
2716} 2720}
2717 2721
2718/* 2722/*
2719 * We need to be able to change a mapping table under a mounted 2723 * If __dm_suspend returns 0, the device is completely quiescent
2720 * filesystem. For example we might want to move some data in 2724 * now. There is no request-processing activity. All new requests
2721 * the background. Before the table can be swapped with 2725 * are being added to md->deferred list.
2722 * dm_bind_table, dm_suspend must be called to flush any in
2723 * flight bios and ensure that any further io gets deferred.
2724 */
2725/*
2726 * Suspend mechanism in request-based dm.
2727 * 2726 *
2728 * 1. Flush all I/Os by lock_fs() if needed. 2727 * Caller must hold md->suspend_lock
2729 * 2. Stop dispatching any I/O by stopping the request_queue.
2730 * 3. Wait for all in-flight I/Os to be completed or requeued.
2731 *
2732 * To abort suspend, start the request_queue.
2733 */ 2728 */
2734int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 2729static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2730 unsigned suspend_flags, int interruptible)
2735{ 2731{
2736 struct dm_table *map = NULL; 2732 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2737 int r = 0; 2733 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2738 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 2734 int r;
2739 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2740
2741 mutex_lock(&md->suspend_lock);
2742
2743 if (dm_suspended_md(md)) {
2744 r = -EINVAL;
2745 goto out_unlock;
2746 }
2747
2748 map = md->map;
2749 2735
2750 /* 2736 /*
2751 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 2737 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
@@ -2754,7 +2740,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2754 if (noflush) 2740 if (noflush)
2755 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2741 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2756 2742
2757 /* This does not get reverted if there's an error later. */ 2743 /*
2744 * This gets reverted if there's an error later and the targets
2745 * provide the .presuspend_undo hook.
2746 */
2758 dm_table_presuspend_targets(map); 2747 dm_table_presuspend_targets(map);
2759 2748
2760 /* 2749 /*
@@ -2765,8 +2754,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2765 */ 2754 */
2766 if (!noflush && do_lockfs) { 2755 if (!noflush && do_lockfs) {
2767 r = lock_fs(md); 2756 r = lock_fs(md);
2768 if (r) 2757 if (r) {
2769 goto out_unlock; 2758 dm_table_presuspend_undo_targets(map);
2759 return r;
2760 }
2770 } 2761 }
2771 2762
2772 /* 2763 /*
@@ -2782,7 +2773,8 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2782 * flush_workqueue(md->wq). 2773 * flush_workqueue(md->wq).
2783 */ 2774 */
2784 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2775 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2785 synchronize_srcu(&md->io_barrier); 2776 if (map)
2777 synchronize_srcu(&md->io_barrier);
2786 2778
2787 /* 2779 /*
2788 * Stop md->queue before flushing md->wq in case request-based 2780 * Stop md->queue before flushing md->wq in case request-based
@@ -2798,11 +2790,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2798 * We call dm_wait_for_completion to wait for all existing requests 2790 * We call dm_wait_for_completion to wait for all existing requests
2799 * to finish. 2791 * to finish.
2800 */ 2792 */
2801 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); 2793 r = dm_wait_for_completion(md, interruptible);
2802 2794
2803 if (noflush) 2795 if (noflush)
2804 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 2796 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2805 synchronize_srcu(&md->io_barrier); 2797 if (map)
2798 synchronize_srcu(&md->io_barrier);
2806 2799
2807 /* were we interrupted ? */ 2800 /* were we interrupted ? */
2808 if (r < 0) { 2801 if (r < 0) {
@@ -2812,14 +2805,56 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2812 start_queue(md->queue); 2805 start_queue(md->queue);
2813 2806
2814 unlock_fs(md); 2807 unlock_fs(md);
2815 goto out_unlock; /* pushback list is already flushed, so skip flush */ 2808 dm_table_presuspend_undo_targets(map);
2809 /* pushback list is already flushed, so skip flush */
2816 } 2810 }
2817 2811
2818 /* 2812 return r;
2819 * If dm_wait_for_completion returned 0, the device is completely 2813}
2820 * quiescent now. There is no request-processing activity. All new 2814
2821 * requests are being added to md->deferred list. 2815/*
2822 */ 2816 * We need to be able to change a mapping table under a mounted
2817 * filesystem. For example we might want to move some data in
2818 * the background. Before the table can be swapped with
2819 * dm_bind_table, dm_suspend must be called to flush any in
2820 * flight bios and ensure that any further io gets deferred.
2821 */
2822/*
2823 * Suspend mechanism in request-based dm.
2824 *
2825 * 1. Flush all I/Os by lock_fs() if needed.
2826 * 2. Stop dispatching any I/O by stopping the request_queue.
2827 * 3. Wait for all in-flight I/Os to be completed or requeued.
2828 *
2829 * To abort suspend, start the request_queue.
2830 */
2831int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2832{
2833 struct dm_table *map = NULL;
2834 int r = 0;
2835
2836retry:
2837 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2838
2839 if (dm_suspended_md(md)) {
2840 r = -EINVAL;
2841 goto out_unlock;
2842 }
2843
2844 if (dm_suspended_internally_md(md)) {
2845 /* already internally suspended, wait for internal resume */
2846 mutex_unlock(&md->suspend_lock);
2847 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2848 if (r)
2849 return r;
2850 goto retry;
2851 }
2852
2853 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2854
2855 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
2856 if (r)
2857 goto out_unlock;
2823 2858
2824 set_bit(DMF_SUSPENDED, &md->flags); 2859 set_bit(DMF_SUSPENDED, &md->flags);
2825 2860
@@ -2830,22 +2865,13 @@ out_unlock:
2830 return r; 2865 return r;
2831} 2866}
2832 2867
2833int dm_resume(struct mapped_device *md) 2868static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2834{ 2869{
2835 int r = -EINVAL; 2870 if (map) {
2836 struct dm_table *map = NULL; 2871 int r = dm_table_resume_targets(map);
2837 2872 if (r)
2838 mutex_lock(&md->suspend_lock); 2873 return r;
2839 if (!dm_suspended_md(md)) 2874 }
2840 goto out;
2841
2842 map = md->map;
2843 if (!map || !dm_table_get_size(map))
2844 goto out;
2845
2846 r = dm_table_resume_targets(map);
2847 if (r)
2848 goto out;
2849 2875
2850 dm_queue_flush(md); 2876 dm_queue_flush(md);
2851 2877
@@ -2859,6 +2885,37 @@ int dm_resume(struct mapped_device *md)
2859 2885
2860 unlock_fs(md); 2886 unlock_fs(md);
2861 2887
2888 return 0;
2889}
2890
2891int dm_resume(struct mapped_device *md)
2892{
2893 int r = -EINVAL;
2894 struct dm_table *map = NULL;
2895
2896retry:
2897 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2898
2899 if (!dm_suspended_md(md))
2900 goto out;
2901
2902 if (dm_suspended_internally_md(md)) {
2903 /* already internally suspended, wait for internal resume */
2904 mutex_unlock(&md->suspend_lock);
2905 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2906 if (r)
2907 return r;
2908 goto retry;
2909 }
2910
2911 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2912 if (!map || !dm_table_get_size(map))
2913 goto out;
2914
2915 r = __dm_resume(md, map);
2916 if (r)
2917 goto out;
2918
2862 clear_bit(DMF_SUSPENDED, &md->flags); 2919 clear_bit(DMF_SUSPENDED, &md->flags);
2863 2920
2864 r = 0; 2921 r = 0;
@@ -2872,15 +2929,80 @@ out:
2872 * Internal suspend/resume works like userspace-driven suspend. It waits 2929 * Internal suspend/resume works like userspace-driven suspend. It waits
2873 * until all bios finish and prevents issuing new bios to the target drivers. 2930 * until all bios finish and prevents issuing new bios to the target drivers.
2874 * It may be used only from the kernel. 2931 * It may be used only from the kernel.
2875 *
2876 * Internal suspend holds md->suspend_lock, which prevents interaction with
2877 * userspace-driven suspend.
2878 */ 2932 */
2879 2933
2880void dm_internal_suspend(struct mapped_device *md) 2934static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2881{ 2935{
2882 mutex_lock(&md->suspend_lock); 2936 struct dm_table *map = NULL;
2937
2938 if (dm_suspended_internally_md(md))
2939 return; /* nested internal suspend */
2940
2941 if (dm_suspended_md(md)) {
2942 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2943 return; /* nest suspend */
2944 }
2945
2946 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2947
2948 /*
2949 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2950 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2951 * would require changing .presuspend to return an error -- avoid this
2952 * until there is a need for more elaborate variants of internal suspend.
2953 */
2954 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
2955
2956 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2957
2958 dm_table_postsuspend_targets(map);
2959}
2960
2961static void __dm_internal_resume(struct mapped_device *md)
2962{
2963 if (!dm_suspended_internally_md(md))
2964 return; /* resume from nested internal suspend */
2965
2883 if (dm_suspended_md(md)) 2966 if (dm_suspended_md(md))
2967 goto done; /* resume from nested suspend */
2968
2969 /*
2970 * NOTE: existing callers don't need to call dm_table_resume_targets
2971 * (which may fail -- so best to avoid it for now by passing NULL map)
2972 */
2973 (void) __dm_resume(md, NULL);
2974
2975done:
2976 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2977 smp_mb__after_atomic();
2978 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2979}
2980
2981void dm_internal_suspend_noflush(struct mapped_device *md)
2982{
2983 mutex_lock(&md->suspend_lock);
2984 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2985 mutex_unlock(&md->suspend_lock);
2986}
2987EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2988
2989void dm_internal_resume(struct mapped_device *md)
2990{
2991 mutex_lock(&md->suspend_lock);
2992 __dm_internal_resume(md);
2993 mutex_unlock(&md->suspend_lock);
2994}
2995EXPORT_SYMBOL_GPL(dm_internal_resume);
2996
2997/*
2998 * Fast variants of internal suspend/resume hold md->suspend_lock,
2999 * which prevents interaction with userspace-driven suspend.
3000 */
3001
3002void dm_internal_suspend_fast(struct mapped_device *md)
3003{
3004 mutex_lock(&md->suspend_lock);
3005 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2884 return; 3006 return;
2885 3007
2886 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 3008 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
@@ -2889,9 +3011,9 @@ void dm_internal_suspend(struct mapped_device *md)
2889 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3011 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2890} 3012}
2891 3013
2892void dm_internal_resume(struct mapped_device *md) 3014void dm_internal_resume_fast(struct mapped_device *md)
2893{ 3015{
2894 if (dm_suspended_md(md)) 3016 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2895 goto done; 3017 goto done;
2896 3018
2897 dm_queue_flush(md); 3019 dm_queue_flush(md);
@@ -2977,6 +3099,11 @@ int dm_suspended_md(struct mapped_device *md)
2977 return test_bit(DMF_SUSPENDED, &md->flags); 3099 return test_bit(DMF_SUSPENDED, &md->flags);
2978} 3100}
2979 3101
3102int dm_suspended_internally_md(struct mapped_device *md)
3103{
3104 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3105}
3106
2980int dm_test_deferred_remove_flag(struct mapped_device *md) 3107int dm_test_deferred_remove_flag(struct mapped_device *md)
2981{ 3108{
2982 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 3109 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 988c7fb7b145..84b0f9e4ba6c 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -65,6 +65,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
65 struct queue_limits *limits); 65 struct queue_limits *limits);
66struct list_head *dm_table_get_devices(struct dm_table *t); 66struct list_head *dm_table_get_devices(struct dm_table *t);
67void dm_table_presuspend_targets(struct dm_table *t); 67void dm_table_presuspend_targets(struct dm_table *t);
68void dm_table_presuspend_undo_targets(struct dm_table *t);
68void dm_table_postsuspend_targets(struct dm_table *t); 69void dm_table_postsuspend_targets(struct dm_table *t);
69int dm_table_resume_targets(struct dm_table *t); 70int dm_table_resume_targets(struct dm_table *t);
70int dm_table_any_congested(struct dm_table *t, int bdi_bits); 71int dm_table_any_congested(struct dm_table *t, int bdi_bits);
@@ -129,6 +130,15 @@ int dm_deleting_md(struct mapped_device *md);
129int dm_suspended_md(struct mapped_device *md); 130int dm_suspended_md(struct mapped_device *md);
130 131
131/* 132/*
133 * Internal suspend and resume methods.
134 */
135int dm_suspended_internally_md(struct mapped_device *md);
136void dm_internal_suspend_fast(struct mapped_device *md);
137void dm_internal_resume_fast(struct mapped_device *md);
138void dm_internal_suspend_noflush(struct mapped_device *md);
139void dm_internal_resume(struct mapped_device *md);
140
141/*
132 * Test if the device is scheduled for deferred remove. 142 * Test if the device is scheduled for deferred remove.
133 */ 143 */
134int dm_test_deferred_remove_flag(struct mapped_device *md); 144int dm_test_deferred_remove_flag(struct mapped_device *md);
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 1d75b1dc1e2e..e64b61ad0ef3 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -645,8 +645,10 @@ static int array_resize(struct dm_array_info *info, dm_block_t root,
645 int r; 645 int r;
646 struct resize resize; 646 struct resize resize;
647 647
648 if (old_size == new_size) 648 if (old_size == new_size) {
649 *new_root = root;
649 return 0; 650 return 0;
651 }
650 652
651 resize.info = info; 653 resize.info = info;
652 resize.root = root; 654 resize.root = root;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 786b689bdfc7..e8a904298887 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -564,7 +564,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
564{ 564{
565 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 565 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
566 566
567 return smm->ll.nr_blocks; 567 *count = smm->ll.nr_blocks;
568
569 return 0;
568} 570}
569 571
570static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count) 572static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
@@ -581,7 +583,9 @@ static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
581{ 583{
582 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 584 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
583 585
584 return b < smm->begin ? 1 : 0; 586 *result = (b < smm->begin) ? 1 : 0;
587
588 return 0;
585} 589}
586 590
587static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, 591static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 3bc30a0ae3d6..9cb797d800cf 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -10,6 +10,8 @@
10#include "dm-persistent-data-internal.h" 10#include "dm-persistent-data-internal.h"
11 11
12#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/mutex.h>
14#include <linux/hash.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/device-mapper.h> 16#include <linux/device-mapper.h>
15 17
@@ -17,6 +19,61 @@
17 19
18/*----------------------------------------------------------------*/ 20/*----------------------------------------------------------------*/
19 21
22#define PREFETCH_SIZE 128
23#define PREFETCH_BITS 7
24#define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
25
26struct prefetch_set {
27 struct mutex lock;
28 dm_block_t blocks[PREFETCH_SIZE];
29};
30
31static unsigned prefetch_hash(dm_block_t b)
32{
33 return hash_64(b, PREFETCH_BITS);
34}
35
36static void prefetch_wipe(struct prefetch_set *p)
37{
38 unsigned i;
39 for (i = 0; i < PREFETCH_SIZE; i++)
40 p->blocks[i] = PREFETCH_SENTINEL;
41}
42
43static void prefetch_init(struct prefetch_set *p)
44{
45 mutex_init(&p->lock);
46 prefetch_wipe(p);
47}
48
49static void prefetch_add(struct prefetch_set *p, dm_block_t b)
50{
51 unsigned h = prefetch_hash(b);
52
53 mutex_lock(&p->lock);
54 if (p->blocks[h] == PREFETCH_SENTINEL)
55 p->blocks[h] = b;
56
57 mutex_unlock(&p->lock);
58}
59
60static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
61{
62 unsigned i;
63
64 mutex_lock(&p->lock);
65
66 for (i = 0; i < PREFETCH_SIZE; i++)
67 if (p->blocks[i] != PREFETCH_SENTINEL) {
68 dm_bm_prefetch(bm, p->blocks[i]);
69 p->blocks[i] = PREFETCH_SENTINEL;
70 }
71
72 mutex_unlock(&p->lock);
73}
74
75/*----------------------------------------------------------------*/
76
20struct shadow_info { 77struct shadow_info {
21 struct hlist_node hlist; 78 struct hlist_node hlist;
22 dm_block_t where; 79 dm_block_t where;
@@ -37,6 +94,8 @@ struct dm_transaction_manager {
37 94
38 spinlock_t lock; 95 spinlock_t lock;
39 struct hlist_head buckets[DM_HASH_SIZE]; 96 struct hlist_head buckets[DM_HASH_SIZE];
97
98 struct prefetch_set prefetches;
40}; 99};
41 100
42/*----------------------------------------------------------------*/ 101/*----------------------------------------------------------------*/
@@ -117,6 +176,8 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
117 for (i = 0; i < DM_HASH_SIZE; i++) 176 for (i = 0; i < DM_HASH_SIZE; i++)
118 INIT_HLIST_HEAD(tm->buckets + i); 177 INIT_HLIST_HEAD(tm->buckets + i);
119 178
179 prefetch_init(&tm->prefetches);
180
120 return tm; 181 return tm;
121} 182}
122 183
@@ -268,8 +329,14 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
268 struct dm_block_validator *v, 329 struct dm_block_validator *v,
269 struct dm_block **blk) 330 struct dm_block **blk)
270{ 331{
271 if (tm->is_clone) 332 if (tm->is_clone) {
272 return dm_bm_read_try_lock(tm->real->bm, b, v, blk); 333 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
334
335 if (r == -EWOULDBLOCK)
336 prefetch_add(&tm->real->prefetches, b);
337
338 return r;
339 }
273 340
274 return dm_bm_read_lock(tm->bm, b, v, blk); 341 return dm_bm_read_lock(tm->bm, b, v, blk);
275} 342}
@@ -317,6 +384,12 @@ struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
317 return tm->bm; 384 return tm->bm;
318} 385}
319 386
387void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
388{
389 prefetch_issue(&tm->prefetches, tm->bm);
390}
391EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
392
320/*----------------------------------------------------------------*/ 393/*----------------------------------------------------------------*/
321 394
322static int dm_tm_create_internal(struct dm_block_manager *bm, 395static int dm_tm_create_internal(struct dm_block_manager *bm,
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index 2772ed2a781a..2e0d4d66fb1b 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -109,6 +109,13 @@ int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
109struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm); 109struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm);
110 110
111/* 111/*
112 * If you're using a non-blocking clone the tm will build up a list of
113 * requested blocks that weren't in core. This call will request those
114 * blocks to be prefetched.
115 */
116void dm_tm_issue_prefetches(struct dm_transaction_manager *tm);
117
118/*
112 * A little utility that ties the knot by producing a transaction manager 119 * A little utility that ties the knot by producing a transaction manager
113 * that has a space map managed by the transaction manager... 120 * that has a space map managed by the transaction manager...
114 * 121 *
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index be763150b8aa..c07b9db51b05 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -931,6 +931,35 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
931 } 931 }
932} 932}
933 933
934/* Check for correctness of the ctrl's value based on the data from
935 struct v4l2_queryctrl and the available menu items. Note that
936 menu_items may be NULL, in that case it is ignored. */
937static int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
938 const char * const *menu_items)
939{
940 if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
941 return -EINVAL;
942 if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
943 return -EBUSY;
944 if (qctrl->type == V4L2_CTRL_TYPE_STRING)
945 return 0;
946 if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
947 qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
948 qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
949 return 0;
950 if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
951 return -ERANGE;
952 if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
953 if (menu_items[ctrl->value] == NULL ||
954 menu_items[ctrl->value][0] == '\0')
955 return -EINVAL;
956 }
957 if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
958 (ctrl->value & ~qctrl->maximum))
959 return -ERANGE;
960 return 0;
961}
962
934int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy, 963int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy,
935 struct v4l2_ext_controls *ctrls, unsigned int cmd) 964 struct v4l2_ext_controls *ctrls, unsigned int cmd)
936{ 965{
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 4418119cf707..1ff9f5323bc3 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -71,7 +71,7 @@ static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev,
71 if (saa7146_read(dev, MC2) & 2) 71 if (saa7146_read(dev, MC2) & 2)
72 break; 72 break;
73 if (err) { 73 if (err) {
74 pr_err("%s: %s timed out while waiting for registers getting programmed\n", 74 pr_debug("%s: %s timed out while waiting for registers getting programmed\n",
75 dev->name, __func__); 75 dev->name, __func__);
76 return -ETIMEDOUT; 76 return -ETIMEDOUT;
77 } 77 }
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 273043ea8f47..35d0e887bd65 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -107,8 +107,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
107 107
108void sms_ir_exit(struct smscore_device_t *coredev) 108void sms_ir_exit(struct smscore_device_t *coredev)
109{ 109{
110 if (coredev->ir.dev) 110 rc_unregister_device(coredev->ir.dev);
111 rc_unregister_device(coredev->ir.dev);
112 111
113 sms_log(""); 112 sms_log("");
114} 113}
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index c7dace671a9d..47da0378cad8 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -286,9 +286,17 @@ static const struct {
286 { TUNER_ABSENT, "Xceive XC5200C"}, 286 { TUNER_ABSENT, "Xceive XC5200C"},
287 { TUNER_ABSENT, "NXP 18273"}, 287 { TUNER_ABSENT, "NXP 18273"},
288 { TUNER_ABSENT, "Montage M88TS2022"}, 288 { TUNER_ABSENT, "Montage M88TS2022"},
289 /* 180-189 */ 289 /* 180-188 */
290 { TUNER_ABSENT, "NXP 18272M"}, 290 { TUNER_ABSENT, "NXP 18272M"},
291 { TUNER_ABSENT, "NXP 18272S"}, 291 { TUNER_ABSENT, "NXP 18272S"},
292
293 { TUNER_ABSENT, "Mirics MSi003"},
294 { TUNER_ABSENT, "MaxLinear MxL256"},
295 { TUNER_ABSENT, "SiLabs Si2158"},
296 { TUNER_ABSENT, "SiLabs Si2178"},
297 { TUNER_ABSENT, "SiLabs Si2157"},
298 { TUNER_ABSENT, "SiLabs Si2177"},
299 { TUNER_ABSENT, "ITE IT9137FN"},
292}; 300};
293 301
294/* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are 302/* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are
@@ -351,6 +359,16 @@ static const struct {
351 { TVEEPROM_AUDPROC_INTERNAL, "CX23887" }, 359 { TVEEPROM_AUDPROC_INTERNAL, "CX23887" },
352 { TVEEPROM_AUDPROC_INTERNAL, "SAA7164" }, 360 { TVEEPROM_AUDPROC_INTERNAL, "SAA7164" },
353 { TVEEPROM_AUDPROC_INTERNAL, "AU8522" }, 361 { TVEEPROM_AUDPROC_INTERNAL, "AU8522" },
362 /* 45-49 */
363 { TVEEPROM_AUDPROC_INTERNAL, "AVF4910B" },
364 { TVEEPROM_AUDPROC_INTERNAL, "SAA7231" },
365 { TVEEPROM_AUDPROC_INTERNAL, "CX23102" },
366 { TVEEPROM_AUDPROC_INTERNAL, "SAA7163" },
367 { TVEEPROM_AUDPROC_OTHER, "AK4113" },
368 /* 50-52 */
369 { TVEEPROM_AUDPROC_OTHER, "CS5340" },
370 { TVEEPROM_AUDPROC_OTHER, "CS8416" },
371 { TVEEPROM_AUDPROC_OTHER, "CX20810" },
354}; 372};
355 373
356/* This list is supplied by Hauppauge. Thanks! */ 374/* This list is supplied by Hauppauge. Thanks! */
@@ -371,8 +389,12 @@ static const char *decoderIC[] = {
371 "CX25843", "CX23418", "NEC61153", "CX23885", "CX23888", 389 "CX25843", "CX23418", "NEC61153", "CX23885", "CX23888",
372 /* 35-39 */ 390 /* 35-39 */
373 "SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A", 391 "SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A",
374 /* 40-42 */ 392 /* 40-44 */
375 "SAA7164", "CX23885B", "AU8522" 393 "SAA7164", "CX23885B", "AU8522", "ADV7401", "AVF4910B",
394 /* 45-49 */
395 "SAA7231", "CX23102", "SAA7163", "ADV7441A", "ADV7181C",
396 /* 50-53 */
397 "CX25836", "TDA9955", "TDA19977", "ADV7842"
376}; 398};
377 399
378static int hasRadioTuner(int tunerType) 400static int hasRadioTuner(int tunerType)
@@ -548,10 +570,10 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
548 tvee->serial_number = 570 tvee->serial_number =
549 eeprom_data[i+5] + 571 eeprom_data[i+5] +
550 (eeprom_data[i+6] << 8) + 572 (eeprom_data[i+6] << 8) +
551 (eeprom_data[i+7] << 16); 573 (eeprom_data[i+7] << 16)+
574 (eeprom_data[i+8] << 24);
552 575
553 if ((eeprom_data[i + 8] & 0xf0) && 576 if (eeprom_data[i + 8] == 0xf0) {
554 (tvee->serial_number < 0xffffff)) {
555 tvee->MAC_address[0] = 0x00; 577 tvee->MAC_address[0] = 0x00;
556 tvee->MAC_address[1] = 0x0D; 578 tvee->MAC_address[1] = 0x0D;
557 tvee->MAC_address[2] = 0xFE; 579 tvee->MAC_address[2] = 0xFE;
@@ -696,7 +718,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
696 } 718 }
697 } 719 }
698 720
699 tveeprom_info("Hauppauge model %d, rev %s, serial# %d\n", 721 tveeprom_info("Hauppauge model %d, rev %s, serial# %u\n",
700 tvee->model, tvee->rev_str, tvee->serial_number); 722 tvee->model, tvee->rev_str, tvee->serial_number);
701 if (tvee->has_MAC_address == 1) 723 if (tvee->has_MAC_address == 1)
702 tveeprom_info("MAC address is %pM\n", tvee->MAC_address); 724 tveeprom_info("MAC address is %pM\n", tvee->MAC_address);
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index e07a84e7bc56..80ab8d0ff6e0 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -356,6 +356,7 @@
356#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 356#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
357#define USB_PID_SONY_PLAYTV 0x0003 357#define USB_PID_SONY_PLAYTV 0x0003
358#define USB_PID_MYGICA_D689 0xd811 358#define USB_PID_MYGICA_D689 0xd811
359#define USB_PID_MYGICA_T230 0xc688
359#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 360#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
360#define USB_PID_ELGATO_EYETV_DTT 0x0021 361#define USB_PID_ELGATO_EYETV_DTT 0x0021
361#define USB_PID_ELGATO_EYETV_DTT_2 0x003f 362#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 059e6117f22b..e4041f074909 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -379,7 +379,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
379 /* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */ 379 /* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */
380 if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) { 380 if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) {
381 printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n", 381 printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
382 priv->ts_count, ts[0], ts[1] & TS_TEI >> 7, ts[3] & 0xC0 >> 6); 382 priv->ts_count, ts[0],
383 (ts[1] & TS_TEI) >> 7,
384 (ts[3] & TS_SC) >> 6);
383 385
384 /* Drop partly decoded SNDU, reset state, resync on PUSI. */ 386 /* Drop partly decoded SNDU, reset state, resync on PUSI. */
385 if (priv->ule_skb) { 387 if (priv->ule_skb) {
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 5a134547e325..6c75418222e2 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -648,12 +648,15 @@ config DVB_MB86A20S
648 A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator. 648 A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator.
649 Say Y when you want to support this frontend. 649 Say Y when you want to support this frontend.
650 650
651comment "ISDB-S (satellite) & ISDB-T (terrestrial) frontends"
652 depends on DVB_CORE
653
651config DVB_TC90522 654config DVB_TC90522
652 tristate "Toshiba TC90522" 655 tristate "Toshiba TC90522"
653 depends on DVB_CORE && I2C 656 depends on DVB_CORE && I2C
654 default m if !MEDIA_SUBDRV_AUTOSELECT 657 default m if !MEDIA_SUBDRV_AUTOSELECT
655 help 658 help
656 A Toshiba TC90522 2xISDB-T + 2xISDB-S demodulator. 659 Toshiba TC90522 2xISDB-S 8PSK + 2xISDB-T OFDM demodulator.
657 Say Y when you want to support this frontend. 660 Say Y when you want to support this frontend.
658 661
659comment "Digital terrestrial only tuners/PLL" 662comment "Digital terrestrial only tuners/PLL"
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 63a89c1c59ff..82ce47bdf5dc 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -291,6 +291,12 @@ static int af9033_init(struct dvb_frontend *fe)
291 if (clock_adc_lut[i].clock == dev->cfg.clock) 291 if (clock_adc_lut[i].clock == dev->cfg.clock)
292 break; 292 break;
293 } 293 }
294 if (i == ARRAY_SIZE(clock_adc_lut)) {
295 dev_err(&dev->client->dev,
296 "Couldn't find ADC config for clock=%d\n",
297 dev->cfg.clock);
298 goto err;
299 }
294 300
295 adc_cw = af9033_div(dev, clock_adc_lut[i].adc, 1000000ul, 19ul); 301 adc_cw = af9033_div(dev, clock_adc_lut[i].adc, 1000000ul, 19ul);
296 buf[0] = (adc_cw >> 0) & 0xff; 302 buf[0] = (adc_cw >> 0) & 0xff;
@@ -580,7 +586,15 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
580 break; 586 break;
581 } 587 }
582 } 588 }
583 ret = af9033_wr_regs(dev, 0x800001, 589 if (i == ARRAY_SIZE(coeff_lut)) {
590 dev_err(&dev->client->dev,
591 "Couldn't find LUT config for clock=%d\n",
592 dev->cfg.clock);
593 ret = -EINVAL;
594 goto err;
595 }
596
597 ret = af9033_wr_regs(dev, 0x800001,
584 coeff_lut[i].val, sizeof(coeff_lut[i].val)); 598 coeff_lut[i].val, sizeof(coeff_lut[i].val));
585 } 599 }
586 600
@@ -592,6 +606,13 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
592 if (clock_adc_lut[i].clock == dev->cfg.clock) 606 if (clock_adc_lut[i].clock == dev->cfg.clock)
593 break; 607 break;
594 } 608 }
609 if (i == ARRAY_SIZE(clock_adc_lut)) {
610 dev_err(&dev->client->dev,
611 "Couldn't find ADC clock for clock=%d\n",
612 dev->cfg.clock);
613 ret = -EINVAL;
614 goto err;
615 }
595 adc_freq = clock_adc_lut[i].adc; 616 adc_freq = clock_adc_lut[i].adc;
596 617
597 /* get used IF frequency */ 618 /* get used IF frequency */
@@ -849,29 +870,97 @@ static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
849{ 870{
850 struct af9033_dev *dev = fe->demodulator_priv; 871 struct af9033_dev *dev = fe->demodulator_priv;
851 struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; 872 struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
873 int ret;
874 u8 u8tmp;
852 875
853 /* use DVBv5 CNR */ 876 /* use DVBv5 CNR */
854 if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) 877 if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) {
855 *snr = div_s64(c->cnr.stat[0].svalue, 100); /* 1000x => 10x */ 878 /* Return 0.1 dB for AF9030 and 0-0xffff for IT9130. */
856 else 879 if (dev->is_af9035) {
880 /* 1000x => 10x (0.1 dB) */
881 *snr = div_s64(c->cnr.stat[0].svalue, 100);
882 } else {
883 /* 1000x => 1x (1 dB) */
884 *snr = div_s64(c->cnr.stat[0].svalue, 1000);
885
886 /* read current modulation */
887 ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
888 if (ret)
889 goto err;
890
891 /* scale value to 0x0000-0xffff */
892 switch ((u8tmp >> 0) & 3) {
893 case 0:
894 *snr = *snr * 0xffff / 23;
895 break;
896 case 1:
897 *snr = *snr * 0xffff / 26;
898 break;
899 case 2:
900 *snr = *snr * 0xffff / 32;
901 break;
902 default:
903 goto err;
904 }
905 }
906 } else {
857 *snr = 0; 907 *snr = 0;
908 }
858 909
859 return 0; 910 return 0;
911
912err:
913 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
914
915 return ret;
860} 916}
861 917
862static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength) 918static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
863{ 919{
864 struct af9033_dev *dev = fe->demodulator_priv; 920 struct af9033_dev *dev = fe->demodulator_priv;
865 int ret; 921 struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
866 u8 strength2; 922 int ret, tmp, power_real;
923 u8 u8tmp, gain_offset, buf[7];
867 924
868 /* read signal strength of 0-100 scale */ 925 if (dev->is_af9035) {
869 ret = af9033_rd_reg(dev, 0x800048, &strength2); 926 /* read signal strength of 0-100 scale */
870 if (ret < 0) 927 ret = af9033_rd_reg(dev, 0x800048, &u8tmp);
871 goto err; 928 if (ret < 0)
929 goto err;
930
931 /* scale value to 0x0000-0xffff */
932 *strength = u8tmp * 0xffff / 100;
933 } else {
934 ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
935 if (ret < 0)
936 goto err;
937
938 ret = af9033_rd_regs(dev, 0x80f900, buf, 7);
939 if (ret < 0)
940 goto err;
941
942 if (c->frequency <= 300000000)
943 gain_offset = 7; /* VHF */
944 else
945 gain_offset = 4; /* UHF */
946
947 power_real = (u8tmp - 100 - gain_offset) -
948 power_reference[((buf[3] >> 0) & 3)][((buf[6] >> 0) & 7)];
949
950 if (power_real < -15)
951 tmp = 0;
952 else if ((power_real >= -15) && (power_real < 0))
953 tmp = (2 * (power_real + 15)) / 3;
954 else if ((power_real >= 0) && (power_real < 20))
955 tmp = 4 * power_real + 10;
956 else if ((power_real >= 20) && (power_real < 35))
957 tmp = (2 * (power_real - 20)) / 3 + 90;
958 else
959 tmp = 100;
872 960
873 /* scale value to 0x0000-0xffff */ 961 /* scale value to 0x0000-0xffff */
874 *strength = strength2 * 0xffff / 100; 962 *strength = tmp * 0xffff / 100;
963 }
875 964
876 return 0; 965 return 0;
877 966
@@ -1011,6 +1100,33 @@ static void af9033_stat_work(struct work_struct *work)
1011 1100
1012 snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0); 1101 snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
1013 1102
1103 /* read superframe number */
1104 ret = af9033_rd_reg(dev, 0x80f78b, &u8tmp);
1105 if (ret)
1106 goto err;
1107
1108 if (u8tmp)
1109 snr_val /= u8tmp;
1110
1111 /* read current transmission mode */
1112 ret = af9033_rd_reg(dev, 0x80f900, &u8tmp);
1113 if (ret)
1114 goto err;
1115
1116 switch ((u8tmp >> 0) & 3) {
1117 case 0:
1118 snr_val *= 4;
1119 break;
1120 case 1:
1121 snr_val *= 1;
1122 break;
1123 case 2:
1124 snr_val *= 2;
1125 break;
1126 default:
1127 goto err_schedule_delayed_work;
1128 }
1129
1014 /* read current modulation */ 1130 /* read current modulation */
1015 ret = af9033_rd_reg(dev, 0x80f903, &u8tmp); 1131 ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
1016 if (ret) 1132 if (ret)
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index c12c92cb5855..8e23275148ed 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -181,7 +181,10 @@ static const struct val_snr qam64_snr_lut[] = {
181 { 0x05570d, 26 }, 181 { 0x05570d, 26 },
182 { 0x059feb, 27 }, 182 { 0x059feb, 27 },
183 { 0x05bf38, 28 }, 183 { 0x05bf38, 28 },
184 { 0xffffff, 29 }, 184 { 0x05f78f, 29 },
185 { 0x0612c3, 30 },
186 { 0x0626be, 31 },
187 { 0xffffff, 32 },
185}; 188};
186 189
187static const struct reg_val ofsm_init[] = { 190static const struct reg_val ofsm_init[] = {
@@ -2051,4 +2054,10 @@ static const struct reg_val tuner_init_it9135_62[] = {
2051 { 0x80fd8b, 0x00 }, 2054 { 0x80fd8b, 0x00 },
2052}; 2055};
2053 2056
2057/* NorDig power reference table */
2058static const int power_reference[][5] = {
2059 {-93, -91, -90, -89, -88}, /* QPSK 1/2 ~ 7/8 */
2060 {-87, -85, -84, -83, -82}, /* 16QAM 1/2 ~ 7/8 */
2061 {-82, -80, -78, -77, -76}, /* 64QAM 1/2 ~ 7/8 */
2062};
2054#endif /* AF9033_PRIV_H */ 2063#endif /* AF9033_PRIV_H */
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index a68974f6d708..5d06c99b0e97 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -29,6 +29,7 @@
29#include "au8522_priv.h" 29#include "au8522_priv.h"
30 30
31static int debug; 31static int debug;
32static int zv_mode = 1; /* default to on */
32 33
33#define dprintk(arg...)\ 34#define dprintk(arg...)\
34 do { if (debug)\ 35 do { if (debug)\
@@ -469,6 +470,87 @@ static struct {
469 { 0x8526, 0x01 }, 470 { 0x8526, 0x01 },
470}; 471};
471 472
473static struct {
474 u16 reg;
475 u16 data;
476} QAM256_mod_tab_zv_mode[] = {
477 { 0x80a3, 0x09 },
478 { 0x80a4, 0x00 },
479 { 0x8081, 0xc4 },
480 { 0x80a5, 0x40 },
481 { 0x80b5, 0xfb },
482 { 0x80b6, 0x8e },
483 { 0x80b7, 0x39 },
484 { 0x80aa, 0x77 },
485 { 0x80ad, 0x77 },
486 { 0x80a6, 0x67 },
487 { 0x8262, 0x20 },
488 { 0x821c, 0x30 },
489 { 0x80b8, 0x3e },
490 { 0x80b9, 0xf0 },
491 { 0x80ba, 0x01 },
492 { 0x80bb, 0x18 },
493 { 0x80bc, 0x50 },
494 { 0x80bd, 0x00 },
495 { 0x80be, 0xea },
496 { 0x80bf, 0xef },
497 { 0x80c0, 0xfc },
498 { 0x80c1, 0xbd },
499 { 0x80c2, 0x1f },
500 { 0x80c3, 0xfc },
501 { 0x80c4, 0xdd },
502 { 0x80c5, 0xaf },
503 { 0x80c6, 0x00 },
504 { 0x80c7, 0x38 },
505 { 0x80c8, 0x30 },
506 { 0x80c9, 0x05 },
507 { 0x80ca, 0x4a },
508 { 0x80cb, 0xd0 },
509 { 0x80cc, 0x01 },
510 { 0x80cd, 0xd9 },
511 { 0x80ce, 0x6f },
512 { 0x80cf, 0xf9 },
513 { 0x80d0, 0x70 },
514 { 0x80d1, 0xdf },
515 { 0x80d2, 0xf7 },
516 { 0x80d3, 0xc2 },
517 { 0x80d4, 0xdf },
518 { 0x80d5, 0x02 },
519 { 0x80d6, 0x9a },
520 { 0x80d7, 0xd0 },
521 { 0x8250, 0x0d },
522 { 0x8251, 0xcd },
523 { 0x8252, 0xe0 },
524 { 0x8253, 0x05 },
525 { 0x8254, 0xa7 },
526 { 0x8255, 0xff },
527 { 0x8256, 0xed },
528 { 0x8257, 0x5b },
529 { 0x8258, 0xae },
530 { 0x8259, 0xe6 },
531 { 0x825a, 0x3d },
532 { 0x825b, 0x0f },
533 { 0x825c, 0x0d },
534 { 0x825d, 0xea },
535 { 0x825e, 0xf2 },
536 { 0x825f, 0x51 },
537 { 0x8260, 0xf5 },
538 { 0x8261, 0x06 },
539 { 0x821a, 0x01 },
540 { 0x8546, 0x40 },
541 { 0x8210, 0x26 },
542 { 0x8211, 0xf6 },
543 { 0x8212, 0x84 },
544 { 0x8213, 0x02 },
545 { 0x8502, 0x01 },
546 { 0x8121, 0x04 },
547 { 0x8122, 0x04 },
548 { 0x852e, 0x10 },
549 { 0x80a4, 0xca },
550 { 0x80a7, 0x40 },
551 { 0x8526, 0x01 },
552};
553
472static int au8522_enable_modulation(struct dvb_frontend *fe, 554static int au8522_enable_modulation(struct dvb_frontend *fe,
473 fe_modulation_t m) 555 fe_modulation_t m)
474{ 556{
@@ -495,12 +577,23 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
495 au8522_set_if(fe, state->config->qam_if); 577 au8522_set_if(fe, state->config->qam_if);
496 break; 578 break;
497 case QAM_256: 579 case QAM_256:
498 dprintk("%s() QAM 256\n", __func__); 580 if (zv_mode) {
499 for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++) 581 dprintk("%s() QAM 256 (zv_mode)\n", __func__);
500 au8522_writereg(state, 582 for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab_zv_mode); i++)
501 QAM256_mod_tab[i].reg, 583 au8522_writereg(state,
502 QAM256_mod_tab[i].data); 584 QAM256_mod_tab_zv_mode[i].reg,
503 au8522_set_if(fe, state->config->qam_if); 585 QAM256_mod_tab_zv_mode[i].data);
586 au8522_set_if(fe, state->config->qam_if);
587 msleep(100);
588 au8522_writereg(state, 0x821a, 0x00);
589 } else {
590 dprintk("%s() QAM 256\n", __func__);
591 for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
592 au8522_writereg(state,
593 QAM256_mod_tab[i].reg,
594 QAM256_mod_tab[i].data);
595 au8522_set_if(fe, state->config->qam_if);
596 }
504 break; 597 break;
505 default: 598 default:
506 dprintk("%s() Invalid modulation\n", __func__); 599 dprintk("%s() Invalid modulation\n", __func__);
@@ -537,7 +630,12 @@ static int au8522_set_frontend(struct dvb_frontend *fe)
537 return ret; 630 return ret;
538 631
539 /* Allow the tuner to settle */ 632 /* Allow the tuner to settle */
540 msleep(100); 633 if (zv_mode) {
634 dprintk("%s() increase tuner settling time for zv_mode\n",
635 __func__);
636 msleep(250);
637 } else
638 msleep(100);
541 639
542 au8522_enable_modulation(fe, c->modulation); 640 au8522_enable_modulation(fe, c->modulation);
543 641
@@ -823,6 +921,11 @@ static struct dvb_frontend_ops au8522_ops = {
823module_param(debug, int, 0644); 921module_param(debug, int, 0644);
824MODULE_PARM_DESC(debug, "Enable verbose debug messages"); 922MODULE_PARM_DESC(debug, "Enable verbose debug messages");
825 923
924module_param(zv_mode, int, 0644);
925MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatability mode (default:on).\n"
926 "\t\ton - modified AU8522 QAM256 initialization.\n"
927 "\t\tProvides faster lock when using ZeeVee modulator based sources");
928
826MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver"); 929MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver");
827MODULE_AUTHOR("Steven Toth"); 930MODULE_AUTHOR("Steven Toth");
828MODULE_LICENSE("GPL"); 931MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 3d399d9a6343..86563260d0f2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -169,6 +169,9 @@ static int cx22700_set_tps(struct cx22700_state *state,
169 169
170 cx22700_writereg (state, 0x04, val); 170 cx22700_writereg (state, 0x04, val);
171 171
172 if (p->code_rate_HP - FEC_1_2 >= sizeof(fec_tab) ||
173 p->code_rate_LP - FEC_1_2 >= sizeof(fec_tab))
174 return -EINVAL;
172 val = fec_tab[p->code_rate_HP - FEC_1_2] << 3; 175 val = fec_tab[p->code_rate_HP - FEC_1_2] << 3;
173 val |= fec_tab[p->code_rate_LP - FEC_1_2]; 176 val |= fec_tab[p->code_rate_LP - FEC_1_2];
174 177
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 95b981cd7115..7b510f2ae20f 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -177,47 +177,45 @@ static int cx24110_set_inversion (struct cx24110_state* state, fe_spectral_inver
177 return 0; 177 return 0;
178} 178}
179 179
180static int cx24110_set_fec (struct cx24110_state* state, fe_code_rate_t fec) 180static int cx24110_set_fec(struct cx24110_state* state, fe_code_rate_t fec)
181{ 181{
182/* fixme (low): error handling */ 182 static const int rate[FEC_AUTO] = {-1, 1, 2, 3, 5, 7, -1};
183 183 static const int g1[FEC_AUTO] = {-1, 0x01, 0x02, 0x05, 0x15, 0x45, -1};
184 static const int rate[]={-1,1,2,3,5,7,-1}; 184 static const int g2[FEC_AUTO] = {-1, 0x01, 0x03, 0x06, 0x1a, 0x7a, -1};
185 static const int g1[]={-1,0x01,0x02,0x05,0x15,0x45,-1};
186 static const int g2[]={-1,0x01,0x03,0x06,0x1a,0x7a,-1};
187 185
188 /* Well, the AutoAcq engine of the cx24106 and 24110 automatically 186 /* Well, the AutoAcq engine of the cx24106 and 24110 automatically
189 searches all enabled viterbi rates, and can handle non-standard 187 searches all enabled viterbi rates, and can handle non-standard
190 rates as well. */ 188 rates as well. */
191 189
192 if (fec>FEC_AUTO) 190 if (fec > FEC_AUTO)
193 fec=FEC_AUTO; 191 fec = FEC_AUTO;
194 192
195 if (fec==FEC_AUTO) { /* (re-)establish AutoAcq behaviour */ 193 if (fec == FEC_AUTO) { /* (re-)establish AutoAcq behaviour */
196 cx24110_writereg(state,0x37,cx24110_readreg(state,0x37)&0xdf); 194 cx24110_writereg(state, 0x37, cx24110_readreg(state, 0x37) & 0xdf);
197 /* clear AcqVitDis bit */ 195 /* clear AcqVitDis bit */
198 cx24110_writereg(state,0x18,0xae); 196 cx24110_writereg(state, 0x18, 0xae);
199 /* allow all DVB standard code rates */ 197 /* allow all DVB standard code rates */
200 cx24110_writereg(state,0x05,(cx24110_readreg(state,0x05)&0xf0)|0x3); 198 cx24110_writereg(state, 0x05, (cx24110_readreg(state, 0x05) & 0xf0) | 0x3);
201 /* set nominal Viterbi rate 3/4 */ 199 /* set nominal Viterbi rate 3/4 */
202 cx24110_writereg(state,0x22,(cx24110_readreg(state,0x22)&0xf0)|0x3); 200 cx24110_writereg(state, 0x22, (cx24110_readreg(state, 0x22) & 0xf0) | 0x3);
203 /* set current Viterbi rate 3/4 */ 201 /* set current Viterbi rate 3/4 */
204 cx24110_writereg(state,0x1a,0x05); cx24110_writereg(state,0x1b,0x06); 202 cx24110_writereg(state, 0x1a, 0x05);
203 cx24110_writereg(state, 0x1b, 0x06);
205 /* set the puncture registers for code rate 3/4 */ 204 /* set the puncture registers for code rate 3/4 */
206 return 0; 205 return 0;
207 } else { 206 } else {
208 cx24110_writereg(state,0x37,cx24110_readreg(state,0x37)|0x20); 207 cx24110_writereg(state, 0x37, cx24110_readreg(state, 0x37) | 0x20);
209 /* set AcqVitDis bit */ 208 /* set AcqVitDis bit */
210 if(rate[fec]>0) { 209 if (rate[fec] < 0)
211 cx24110_writereg(state,0x05,(cx24110_readreg(state,0x05)&0xf0)|rate[fec]); 210 return -EINVAL;
212 /* set nominal Viterbi rate */ 211
213 cx24110_writereg(state,0x22,(cx24110_readreg(state,0x22)&0xf0)|rate[fec]); 212 cx24110_writereg(state, 0x05, (cx24110_readreg(state, 0x05) & 0xf0) | rate[fec]);
214 /* set current Viterbi rate */ 213 /* set nominal Viterbi rate */
215 cx24110_writereg(state,0x1a,g1[fec]); 214 cx24110_writereg(state, 0x22, (cx24110_readreg(state, 0x22) & 0xf0) | rate[fec]);
216 cx24110_writereg(state,0x1b,g2[fec]); 215 /* set current Viterbi rate */
217 /* not sure if this is the right way: I always used AutoAcq mode */ 216 cx24110_writereg(state, 0x1a, g1[fec]);
218 } else 217 cx24110_writereg(state, 0x1b, g2[fec]);
219 return -EOPNOTSUPP; 218 /* not sure if this is the right way: I always used AutoAcq mode */
220/* fixme (low): which is the correct return code? */
221 } 219 }
222 return 0; 220 return 0;
223} 221}
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index a6c3c9e2e897..acb965ce0358 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -459,7 +459,7 @@ static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
459 if (state->priv->skip_fw_load) 459 if (state->priv->skip_fw_load)
460 return 0; 460 return 0;
461 461
462 /* check if firmware if already running */ 462 /* check if firmware is already running */
463 if (cx24117_readreg(state, 0xeb) != 0xa) { 463 if (cx24117_readreg(state, 0xeb) != 0xa) {
464 /* Load firmware */ 464 /* Load firmware */
465 /* request the firmware, this will block until loaded */ 465 /* request the firmware, this will block until loaded */
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 589134e95175..c505d696f92d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -1780,7 +1780,7 @@ static u32 interpolate_value(u32 value, struct linear_segments *segments,
1780} 1780}
1781 1781
1782/* FIXME: may require changes - this one was borrowed from dib8000 */ 1782/* FIXME: may require changes - this one was borrowed from dib8000 */
1783static u32 dib7000p_get_time_us(struct dvb_frontend *demod, int layer) 1783static u32 dib7000p_get_time_us(struct dvb_frontend *demod)
1784{ 1784{
1785 struct dtv_frontend_properties *c = &demod->dtv_property_cache; 1785 struct dtv_frontend_properties *c = &demod->dtv_property_cache;
1786 u64 time_us, tmp64; 1786 u64 time_us, tmp64;
@@ -1881,7 +1881,6 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
1881{ 1881{
1882 struct dib7000p_state *state = demod->demodulator_priv; 1882 struct dib7000p_state *state = demod->demodulator_priv;
1883 struct dtv_frontend_properties *c = &demod->dtv_property_cache; 1883 struct dtv_frontend_properties *c = &demod->dtv_property_cache;
1884 int i;
1885 int show_per_stats = 0; 1884 int show_per_stats = 0;
1886 u32 time_us = 0, val, snr; 1885 u32 time_us = 0, val, snr;
1887 u64 blocks, ucb; 1886 u64 blocks, ucb;
@@ -1935,7 +1934,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
1935 1934
1936 /* Estimate the number of packets based on bitrate */ 1935 /* Estimate the number of packets based on bitrate */
1937 if (!time_us) 1936 if (!time_us)
1938 time_us = dib7000p_get_time_us(demod, -1); 1937 time_us = dib7000p_get_time_us(demod);
1939 1938
1940 if (time_us) { 1939 if (time_us) {
1941 blocks = 1250000ULL * 1000000ULL; 1940 blocks = 1250000ULL * 1000000ULL;
@@ -1949,7 +1948,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
1949 1948
1950 /* Get post-BER measures */ 1949 /* Get post-BER measures */
1951 if (time_after(jiffies, state->ber_jiffies_stats)) { 1950 if (time_after(jiffies, state->ber_jiffies_stats)) {
1952 time_us = dib7000p_get_time_us(demod, -1); 1951 time_us = dib7000p_get_time_us(demod);
1953 state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000); 1952 state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
1954 1953
1955 dprintk("Next all layers stats available in %u us.", time_us); 1954 dprintk("Next all layers stats available in %u us.", time_us);
@@ -1969,7 +1968,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat)
1969 c->block_error.stat[0].scale = FE_SCALE_COUNTER; 1968 c->block_error.stat[0].scale = FE_SCALE_COUNTER;
1970 c->block_error.stat[0].uvalue += val; 1969 c->block_error.stat[0].uvalue += val;
1971 1970
1972 time_us = dib7000p_get_time_us(demod, i); 1971 time_us = dib7000p_get_time_us(demod);
1973 if (time_us) { 1972 if (time_us) {
1974 blocks = 1250000ULL * 1000000ULL; 1973 blocks = 1250000ULL * 1000000ULL;
1975 do_div(blocks, time_us * 8 * 204); 1974 do_div(blocks, time_us * 8 * 204);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 5ec221ffdfca..2bfa7a435974 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12255,8 +12255,7 @@ static void drx39xxj_release(struct dvb_frontend *fe)
12255 kfree(demod->my_ext_attr); 12255 kfree(demod->my_ext_attr);
12256 kfree(demod->my_common_attr); 12256 kfree(demod->my_common_attr);
12257 kfree(demod->my_i2c_dev_addr); 12257 kfree(demod->my_i2c_dev_addr);
12258 if (demod->firmware) 12258 release_firmware(demod->firmware);
12259 release_firmware(demod->firmware);
12260 kfree(demod); 12259 kfree(demod);
12261 kfree(state); 12260 kfree(state);
12262} 12261}
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 672195147d01..d46cf5f7cd2e 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -166,9 +166,9 @@ static unsigned int debug;
166module_param(debug, int, 0644); 166module_param(debug, int, 0644);
167MODULE_PARM_DESC(debug, "enable debug messages"); 167MODULE_PARM_DESC(debug, "enable debug messages");
168 168
169#define dprintk(level, fmt, arg...) do { \ 169#define dprintk(level, fmt, arg...) do { \
170if (debug >= level) \ 170if (debug >= level) \
171 pr_debug(fmt, ##arg); \ 171 printk(KERN_DEBUG KBUILD_MODNAME ": %s " fmt, __func__, ##arg); \
172} while (0) 172} while (0)
173 173
174 174
@@ -6310,8 +6310,7 @@ static void drxk_release(struct dvb_frontend *fe)
6310 struct drxk_state *state = fe->demodulator_priv; 6310 struct drxk_state *state = fe->demodulator_priv;
6311 6311
6312 dprintk(1, "\n"); 6312 dprintk(1, "\n");
6313 if (state->fw) 6313 release_firmware(state->fw);
6314 release_firmware(state->fw);
6315 6314
6316 kfree(state); 6315 kfree(state);
6317} 6316}
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 81657e94c5a4..ba4ee0b48834 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Montage M88DS3103 demodulator driver 2 * Montage M88DS3103/M88RS6000 demodulator driver
3 * 3 *
4 * Copyright (C) 2013 Antti Palosaari <crope@iki.fi> 4 * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
5 * 5 *
@@ -162,7 +162,7 @@ static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
162 162
163 dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); 163 dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
164 164
165 if (tab_len > 83) { 165 if (tab_len > 86) {
166 ret = -EINVAL; 166 ret = -EINVAL;
167 goto err; 167 goto err;
168 } 168 }
@@ -245,9 +245,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
245 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 245 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
246 int ret, len; 246 int ret, len;
247 const struct m88ds3103_reg_val *init; 247 const struct m88ds3103_reg_val *init;
248 u8 u8tmp, u8tmp1, u8tmp2; 248 u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */
249 u8 buf[2]; 249 u8 buf[3];
250 u16 u16tmp, divide_ratio; 250 u16 u16tmp, divide_ratio = 0;
251 u32 tuner_frequency, target_mclk; 251 u32 tuner_frequency, target_mclk;
252 s32 s32tmp; 252 s32 s32tmp;
253 253
@@ -262,6 +262,22 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
262 goto err; 262 goto err;
263 } 263 }
264 264
265 /* reset */
266 ret = m88ds3103_wr_reg(priv, 0x07, 0x80);
267 if (ret)
268 goto err;
269
270 ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
271 if (ret)
272 goto err;
273
274 /* Disable demod clock path */
275 if (priv->chip_id == M88RS6000_CHIP_ID) {
276 ret = m88ds3103_wr_reg(priv, 0x06, 0xe0);
277 if (ret)
278 goto err;
279 }
280
265 /* program tuner */ 281 /* program tuner */
266 if (fe->ops.tuner_ops.set_params) { 282 if (fe->ops.tuner_ops.set_params) {
267 ret = fe->ops.tuner_ops.set_params(fe); 283 ret = fe->ops.tuner_ops.set_params(fe);
@@ -282,49 +298,44 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
282 tuner_frequency = c->frequency; 298 tuner_frequency = c->frequency;
283 } 299 }
284 300
285 /* reset */ 301 /* select M88RS6000 demod main mclk and ts mclk from tuner die. */
286 ret = m88ds3103_wr_reg(priv, 0x07, 0x80); 302 if (priv->chip_id == M88RS6000_CHIP_ID) {
287 if (ret) 303 if (c->symbol_rate > 45010000)
288 goto err; 304 priv->mclk_khz = 110250;
289 305 else
290 ret = m88ds3103_wr_reg(priv, 0x07, 0x00); 306 priv->mclk_khz = 96000;
291 if (ret)
292 goto err;
293
294 ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
295 if (ret)
296 goto err;
297 307
298 ret = m88ds3103_wr_reg(priv, 0x00, 0x01); 308 if (c->delivery_system == SYS_DVBS)
299 if (ret) 309 target_mclk = 96000;
300 goto err; 310 else
311 target_mclk = 144000;
301 312
302 switch (c->delivery_system) { 313 /* Enable demod clock path */
303 case SYS_DVBS: 314 ret = m88ds3103_wr_reg(priv, 0x06, 0x00);
304 len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals); 315 if (ret)
305 init = m88ds3103_dvbs_init_reg_vals; 316 goto err;
306 target_mclk = 96000; 317 usleep_range(10000, 20000);
307 break; 318 } else {
308 case SYS_DVBS2: 319 /* set M88DS3103 mclk and ts mclk. */
309 len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals); 320 priv->mclk_khz = 96000;
310 init = m88ds3103_dvbs2_init_reg_vals;
311 321
312 switch (priv->cfg->ts_mode) { 322 switch (priv->cfg->ts_mode) {
313 case M88DS3103_TS_SERIAL: 323 case M88DS3103_TS_SERIAL:
314 case M88DS3103_TS_SERIAL_D7: 324 case M88DS3103_TS_SERIAL_D7:
315 if (c->symbol_rate < 18000000) 325 target_mclk = priv->cfg->ts_clk;
316 target_mclk = 96000;
317 else
318 target_mclk = 144000;
319 break; 326 break;
320 case M88DS3103_TS_PARALLEL: 327 case M88DS3103_TS_PARALLEL:
321 case M88DS3103_TS_CI: 328 case M88DS3103_TS_CI:
322 if (c->symbol_rate < 18000000) 329 if (c->delivery_system == SYS_DVBS)
323 target_mclk = 96000; 330 target_mclk = 96000;
324 else if (c->symbol_rate < 28000000) 331 else {
325 target_mclk = 144000; 332 if (c->symbol_rate < 18000000)
326 else 333 target_mclk = 96000;
327 target_mclk = 192000; 334 else if (c->symbol_rate < 28000000)
335 target_mclk = 144000;
336 else
337 target_mclk = 192000;
338 }
328 break; 339 break;
329 default: 340 default:
330 dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", 341 dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n",
@@ -332,6 +343,55 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
332 ret = -EINVAL; 343 ret = -EINVAL;
333 goto err; 344 goto err;
334 } 345 }
346
347 switch (target_mclk) {
348 case 96000:
349 u8tmp1 = 0x02; /* 0b10 */
350 u8tmp2 = 0x01; /* 0b01 */
351 break;
352 case 144000:
353 u8tmp1 = 0x00; /* 0b00 */
354 u8tmp2 = 0x01; /* 0b01 */
355 break;
356 case 192000:
357 u8tmp1 = 0x03; /* 0b11 */
358 u8tmp2 = 0x00; /* 0b00 */
359 break;
360 }
361 ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
362 if (ret)
363 goto err;
364 ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
365 if (ret)
366 goto err;
367 }
368
369 ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
370 if (ret)
371 goto err;
372
373 ret = m88ds3103_wr_reg(priv, 0x00, 0x01);
374 if (ret)
375 goto err;
376
377 switch (c->delivery_system) {
378 case SYS_DVBS:
379 if (priv->chip_id == M88RS6000_CHIP_ID) {
380 len = ARRAY_SIZE(m88rs6000_dvbs_init_reg_vals);
381 init = m88rs6000_dvbs_init_reg_vals;
382 } else {
383 len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals);
384 init = m88ds3103_dvbs_init_reg_vals;
385 }
386 break;
387 case SYS_DVBS2:
388 if (priv->chip_id == M88RS6000_CHIP_ID) {
389 len = ARRAY_SIZE(m88rs6000_dvbs2_init_reg_vals);
390 init = m88rs6000_dvbs2_init_reg_vals;
391 } else {
392 len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals);
393 init = m88ds3103_dvbs2_init_reg_vals;
394 }
335 break; 395 break;
336 default: 396 default:
337 dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n", 397 dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
@@ -347,7 +407,30 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
347 goto err; 407 goto err;
348 } 408 }
349 409
350 u8tmp1 = 0; /* silence compiler warning */ 410 if (priv->chip_id == M88RS6000_CHIP_ID) {
411 if ((c->delivery_system == SYS_DVBS2)
412 && ((c->symbol_rate / 1000) <= 5000)) {
413 ret = m88ds3103_wr_reg(priv, 0xc0, 0x04);
414 if (ret)
415 goto err;
416 buf[0] = 0x09;
417 buf[1] = 0x22;
418 buf[2] = 0x88;
419 ret = m88ds3103_wr_regs(priv, 0x8a, buf, 3);
420 if (ret)
421 goto err;
422 }
423 ret = m88ds3103_wr_reg_mask(priv, 0x9d, 0x08, 0x08);
424 if (ret)
425 goto err;
426 ret = m88ds3103_wr_reg(priv, 0xf1, 0x01);
427 if (ret)
428 goto err;
429 ret = m88ds3103_wr_reg_mask(priv, 0x30, 0x80, 0x80);
430 if (ret)
431 goto err;
432 }
433
351 switch (priv->cfg->ts_mode) { 434 switch (priv->cfg->ts_mode) {
352 case M88DS3103_TS_SERIAL: 435 case M88DS3103_TS_SERIAL:
353 u8tmp1 = 0x00; 436 u8tmp1 = 0x00;
@@ -383,16 +466,15 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
383 ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20); 466 ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20);
384 if (ret) 467 if (ret)
385 goto err; 468 goto err;
386 }
387
388 if (priv->cfg->ts_clk) {
389 divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
390 u8tmp1 = divide_ratio / 2;
391 u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
392 } else {
393 divide_ratio = 0;
394 u8tmp1 = 0; 469 u8tmp1 = 0;
395 u8tmp2 = 0; 470 u8tmp2 = 0;
471 break;
472 default:
473 if (priv->cfg->ts_clk) {
474 divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
475 u8tmp1 = divide_ratio / 2;
476 u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
477 }
396 } 478 }
397 479
398 dev_dbg(&priv->i2c->dev, 480 dev_dbg(&priv->i2c->dev,
@@ -420,29 +502,6 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
420 if (ret) 502 if (ret)
421 goto err; 503 goto err;
422 504
423 switch (target_mclk) {
424 case 96000:
425 u8tmp1 = 0x02; /* 0b10 */
426 u8tmp2 = 0x01; /* 0b01 */
427 break;
428 case 144000:
429 u8tmp1 = 0x00; /* 0b00 */
430 u8tmp2 = 0x01; /* 0b01 */
431 break;
432 case 192000:
433 u8tmp1 = 0x03; /* 0b11 */
434 u8tmp2 = 0x00; /* 0b00 */
435 break;
436 }
437
438 ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0);
439 if (ret)
440 goto err;
441
442 ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0);
443 if (ret)
444 goto err;
445
446 if (c->symbol_rate <= 3000000) 505 if (c->symbol_rate <= 3000000)
447 u8tmp = 0x20; 506 u8tmp = 0x20;
448 else if (c->symbol_rate <= 10000000) 507 else if (c->symbol_rate <= 10000000)
@@ -466,7 +525,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
466 if (ret) 525 if (ret)
467 goto err; 526 goto err;
468 527
469 u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2); 528 u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, priv->mclk_khz / 2);
470 buf[0] = (u16tmp >> 0) & 0xff; 529 buf[0] = (u16tmp >> 0) & 0xff;
471 buf[1] = (u16tmp >> 8) & 0xff; 530 buf[1] = (u16tmp >> 8) & 0xff;
472 ret = m88ds3103_wr_regs(priv, 0x61, buf, 2); 531 ret = m88ds3103_wr_regs(priv, 0x61, buf, 2);
@@ -489,7 +548,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
489 (tuner_frequency - c->frequency)); 548 (tuner_frequency - c->frequency));
490 549
491 s32tmp = 0x10000 * (tuner_frequency - c->frequency); 550 s32tmp = 0x10000 * (tuner_frequency - c->frequency);
492 s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ); 551 s32tmp = DIV_ROUND_CLOSEST(s32tmp, priv->mclk_khz);
493 if (s32tmp < 0) 552 if (s32tmp < 0)
494 s32tmp += 0x10000; 553 s32tmp += 0x10000;
495 554
@@ -520,7 +579,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
520 struct m88ds3103_priv *priv = fe->demodulator_priv; 579 struct m88ds3103_priv *priv = fe->demodulator_priv;
521 int ret, len, remaining; 580 int ret, len, remaining;
522 const struct firmware *fw = NULL; 581 const struct firmware *fw = NULL;
523 u8 *fw_file = M88DS3103_FIRMWARE; 582 u8 *fw_file;
524 u8 u8tmp; 583 u8 u8tmp;
525 584
526 dev_dbg(&priv->i2c->dev, "%s:\n", __func__); 585 dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
@@ -541,15 +600,6 @@ static int m88ds3103_init(struct dvb_frontend *fe)
541 if (ret) 600 if (ret)
542 goto err; 601 goto err;
543 602
544 /* reset */
545 ret = m88ds3103_wr_reg(priv, 0x07, 0x60);
546 if (ret)
547 goto err;
548
549 ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
550 if (ret)
551 goto err;
552
553 /* firmware status */ 603 /* firmware status */
554 ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp); 604 ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp);
555 if (ret) 605 if (ret)
@@ -560,10 +610,23 @@ static int m88ds3103_init(struct dvb_frontend *fe)
560 if (u8tmp) 610 if (u8tmp)
561 goto skip_fw_download; 611 goto skip_fw_download;
562 612
613 /* global reset, global diseqc reset, golbal fec reset */
614 ret = m88ds3103_wr_reg(priv, 0x07, 0xe0);
615 if (ret)
616 goto err;
617
618 ret = m88ds3103_wr_reg(priv, 0x07, 0x00);
619 if (ret)
620 goto err;
621
563 /* cold state - try to download firmware */ 622 /* cold state - try to download firmware */
564 dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n", 623 dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n",
565 KBUILD_MODNAME, m88ds3103_ops.info.name); 624 KBUILD_MODNAME, m88ds3103_ops.info.name);
566 625
626 if (priv->chip_id == M88RS6000_CHIP_ID)
627 fw_file = M88RS6000_FIRMWARE;
628 else
629 fw_file = M88DS3103_FIRMWARE;
567 /* request the firmware, this will block and timeout */ 630 /* request the firmware, this will block and timeout */
568 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent); 631 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
569 if (ret) { 632 if (ret) {
@@ -577,7 +640,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
577 640
578 ret = m88ds3103_wr_reg(priv, 0xb2, 0x01); 641 ret = m88ds3103_wr_reg(priv, 0xb2, 0x01);
579 if (ret) 642 if (ret)
580 goto err; 643 goto error_fw_release;
581 644
582 for (remaining = fw->size; remaining > 0; 645 for (remaining = fw->size; remaining > 0;
583 remaining -= (priv->cfg->i2c_wr_max - 1)) { 646 remaining -= (priv->cfg->i2c_wr_max - 1)) {
@@ -591,13 +654,13 @@ static int m88ds3103_init(struct dvb_frontend *fe)
591 dev_err(&priv->i2c->dev, 654 dev_err(&priv->i2c->dev,
592 "%s: firmware download failed=%d\n", 655 "%s: firmware download failed=%d\n",
593 KBUILD_MODNAME, ret); 656 KBUILD_MODNAME, ret);
594 goto err; 657 goto error_fw_release;
595 } 658 }
596 } 659 }
597 660
598 ret = m88ds3103_wr_reg(priv, 0xb2, 0x00); 661 ret = m88ds3103_wr_reg(priv, 0xb2, 0x00);
599 if (ret) 662 if (ret)
600 goto err; 663 goto error_fw_release;
601 664
602 release_firmware(fw); 665 release_firmware(fw);
603 fw = NULL; 666 fw = NULL;
@@ -623,10 +686,10 @@ skip_fw_download:
623 priv->warm = true; 686 priv->warm = true;
624 687
625 return 0; 688 return 0;
626err:
627 if (fw)
628 release_firmware(fw);
629 689
690error_fw_release:
691 release_firmware(fw);
692err:
630 dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); 693 dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
631 return ret; 694 return ret;
632} 695}
@@ -635,13 +698,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
635{ 698{
636 struct m88ds3103_priv *priv = fe->demodulator_priv; 699 struct m88ds3103_priv *priv = fe->demodulator_priv;
637 int ret; 700 int ret;
701 u8 u8tmp;
638 702
639 dev_dbg(&priv->i2c->dev, "%s:\n", __func__); 703 dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
640 704
641 priv->delivery_system = SYS_UNDEFINED; 705 priv->delivery_system = SYS_UNDEFINED;
642 706
643 /* TS Hi-Z */ 707 /* TS Hi-Z */
644 ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01); 708 if (priv->chip_id == M88RS6000_CHIP_ID)
709 u8tmp = 0x29;
710 else
711 u8tmp = 0x27;
712 ret = m88ds3103_wr_reg_mask(priv, u8tmp, 0x00, 0x01);
645 if (ret) 713 if (ret)
646 goto err; 714 goto err;
647 715
@@ -830,7 +898,7 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
830 goto err; 898 goto err;
831 899
832 c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) * 900 c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
833 M88DS3103_MCLK_KHZ * 1000 / 0x10000; 901 priv->mclk_khz * 1000 / 0x10000;
834 902
835 return 0; 903 return 0;
836err: 904err:
@@ -1310,18 +1378,22 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1310 priv->i2c = i2c; 1378 priv->i2c = i2c;
1311 mutex_init(&priv->i2c_mutex); 1379 mutex_init(&priv->i2c_mutex);
1312 1380
1313 ret = m88ds3103_rd_reg(priv, 0x01, &chip_id); 1381 /* 0x00: chip id[6:0], 0x01: chip ver[7:0], 0x02: chip ver[15:8] */
1382 ret = m88ds3103_rd_reg(priv, 0x00, &chip_id);
1314 if (ret) 1383 if (ret)
1315 goto err; 1384 goto err;
1316 1385
1317 dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id); 1386 chip_id >>= 1;
1387 dev_info(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id);
1318 1388
1319 switch (chip_id) { 1389 switch (chip_id) {
1320 case 0xd0: 1390 case M88RS6000_CHIP_ID:
1391 case M88DS3103_CHIP_ID:
1321 break; 1392 break;
1322 default: 1393 default:
1323 goto err; 1394 goto err;
1324 } 1395 }
1396 priv->chip_id = chip_id;
1325 1397
1326 switch (priv->cfg->clock_out) { 1398 switch (priv->cfg->clock_out) {
1327 case M88DS3103_CLOCK_OUT_DISABLED: 1399 case M88DS3103_CLOCK_OUT_DISABLED:
@@ -1337,6 +1409,11 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1337 goto err; 1409 goto err;
1338 } 1410 }
1339 1411
1412 /* 0x29 register is defined differently for m88rs6000. */
1413 /* set internal tuner address to 0x21 */
1414 if (chip_id == M88RS6000_CHIP_ID)
1415 u8tmp = 0x00;
1416
1340 ret = m88ds3103_wr_reg(priv, 0x29, u8tmp); 1417 ret = m88ds3103_wr_reg(priv, 0x29, u8tmp);
1341 if (ret) 1418 if (ret)
1342 goto err; 1419 goto err;
@@ -1364,6 +1441,9 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1364 1441
1365 /* create dvb_frontend */ 1442 /* create dvb_frontend */
1366 memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops)); 1443 memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
1444 if (priv->chip_id == M88RS6000_CHIP_ID)
1445 strncpy(priv->fe.ops.info.name,
1446 "Montage M88RS6000", sizeof(priv->fe.ops.info.name));
1367 priv->fe.demodulator_priv = priv; 1447 priv->fe.demodulator_priv = priv;
1368 1448
1369 return &priv->fe; 1449 return &priv->fe;
@@ -1423,3 +1503,4 @@ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
1423MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver"); 1503MODULE_DESCRIPTION("Montage M88DS3103 DVB-S/S2 demodulator driver");
1424MODULE_LICENSE("GPL"); 1504MODULE_LICENSE("GPL");
1425MODULE_FIRMWARE(M88DS3103_FIRMWARE); 1505MODULE_FIRMWARE(M88DS3103_FIRMWARE);
1506MODULE_FIRMWARE(M88RS6000_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index 9169fdd143cf..a2c0958111f8 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -25,7 +25,10 @@
25#include <linux/math64.h> 25#include <linux/math64.h>
26 26
27#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw" 27#define M88DS3103_FIRMWARE "dvb-demod-m88ds3103.fw"
28#define M88RS6000_FIRMWARE "dvb-demod-m88rs6000.fw"
28#define M88DS3103_MCLK_KHZ 96000 29#define M88DS3103_MCLK_KHZ 96000
30#define M88RS6000_CHIP_ID 0x74
31#define M88DS3103_CHIP_ID 0x70
29 32
30struct m88ds3103_priv { 33struct m88ds3103_priv {
31 struct i2c_adapter *i2c; 34 struct i2c_adapter *i2c;
@@ -38,6 +41,10 @@ struct m88ds3103_priv {
38 u32 ber; 41 u32 ber;
39 bool warm; /* FW running */ 42 bool warm; /* FW running */
40 struct i2c_adapter *i2c_adapter; 43 struct i2c_adapter *i2c_adapter;
44 /* auto detect chip id to do different config */
45 u8 chip_id;
46 /* main mclk is calculated for M88RS6000 dynamically */
47 u32 mclk_khz;
41}; 48};
42 49
43struct m88ds3103_reg_val { 50struct m88ds3103_reg_val {
@@ -214,4 +221,178 @@ static const struct m88ds3103_reg_val m88ds3103_dvbs2_init_reg_vals[] = {
214 {0xb8, 0x00}, 221 {0xb8, 0x00},
215}; 222};
216 223
224static const struct m88ds3103_reg_val m88rs6000_dvbs_init_reg_vals[] = {
225 {0x23, 0x07},
226 {0x08, 0x03},
227 {0x0c, 0x02},
228 {0x20, 0x00},
229 {0x21, 0x54},
230 {0x25, 0x82},
231 {0x27, 0x31},
232 {0x30, 0x08},
233 {0x31, 0x40},
234 {0x32, 0x32},
235 {0x33, 0x35},
236 {0x35, 0xff},
237 {0x3a, 0x00},
238 {0x37, 0x10},
239 {0x38, 0x10},
240 {0x39, 0x02},
241 {0x42, 0x60},
242 {0x4a, 0x80},
243 {0x4b, 0x04},
244 {0x4d, 0x91},
245 {0x5d, 0xc8},
246 {0x50, 0x36},
247 {0x51, 0x36},
248 {0x52, 0x36},
249 {0x53, 0x36},
250 {0x63, 0x0f},
251 {0x64, 0x30},
252 {0x65, 0x40},
253 {0x68, 0x26},
254 {0x69, 0x4c},
255 {0x70, 0x20},
256 {0x71, 0x70},
257 {0x72, 0x04},
258 {0x73, 0x00},
259 {0x70, 0x40},
260 {0x71, 0x70},
261 {0x72, 0x04},
262 {0x73, 0x00},
263 {0x70, 0x60},
264 {0x71, 0x70},
265 {0x72, 0x04},
266 {0x73, 0x00},
267 {0x70, 0x80},
268 {0x71, 0x70},
269 {0x72, 0x04},
270 {0x73, 0x00},
271 {0x70, 0xa0},
272 {0x71, 0x70},
273 {0x72, 0x04},
274 {0x73, 0x00},
275 {0x70, 0x1f},
276 {0x76, 0x38},
277 {0x77, 0xa6},
278 {0x78, 0x0c},
279 {0x79, 0x80},
280 {0x7f, 0x14},
281 {0x7c, 0x00},
282 {0xae, 0x82},
283 {0x80, 0x64},
284 {0x81, 0x66},
285 {0x82, 0x44},
286 {0x85, 0x04},
287 {0xcd, 0xf4},
288 {0x90, 0x33},
289 {0xa0, 0x44},
290 {0xbe, 0x00},
291 {0xc0, 0x08},
292 {0xc3, 0x10},
293 {0xc4, 0x08},
294 {0xc5, 0xf0},
295 {0xc6, 0xff},
296 {0xc7, 0x00},
297 {0xc8, 0x1a},
298 {0xc9, 0x80},
299 {0xe0, 0xf8},
300 {0xe6, 0x8b},
301 {0xd0, 0x40},
302 {0xf8, 0x20},
303 {0xfa, 0x0f},
304 {0x00, 0x00},
305 {0xbd, 0x01},
306 {0xb8, 0x00},
307 {0x29, 0x11},
308};
309
310static const struct m88ds3103_reg_val m88rs6000_dvbs2_init_reg_vals[] = {
311 {0x23, 0x07},
312 {0x08, 0x07},
313 {0x0c, 0x02},
314 {0x20, 0x00},
315 {0x21, 0x54},
316 {0x25, 0x82},
317 {0x27, 0x31},
318 {0x30, 0x08},
319 {0x32, 0x32},
320 {0x33, 0x35},
321 {0x35, 0xff},
322 {0x3a, 0x00},
323 {0x37, 0x10},
324 {0x38, 0x10},
325 {0x39, 0x02},
326 {0x42, 0x60},
327 {0x4a, 0x80},
328 {0x4b, 0x04},
329 {0x4d, 0x91},
330 {0x5d, 0xc8},
331 {0x50, 0x36},
332 {0x51, 0x36},
333 {0x52, 0x36},
334 {0x53, 0x36},
335 {0x63, 0x0f},
336 {0x64, 0x10},
337 {0x65, 0x20},
338 {0x68, 0x46},
339 {0x69, 0xcd},
340 {0x70, 0x20},
341 {0x71, 0x70},
342 {0x72, 0x04},
343 {0x73, 0x00},
344 {0x70, 0x40},
345 {0x71, 0x70},
346 {0x72, 0x04},
347 {0x73, 0x00},
348 {0x70, 0x60},
349 {0x71, 0x70},
350 {0x72, 0x04},
351 {0x73, 0x00},
352 {0x70, 0x80},
353 {0x71, 0x70},
354 {0x72, 0x04},
355 {0x73, 0x00},
356 {0x70, 0xa0},
357 {0x71, 0x70},
358 {0x72, 0x04},
359 {0x73, 0x00},
360 {0x70, 0x1f},
361 {0x76, 0x38},
362 {0x77, 0xa6},
363 {0x78, 0x0c},
364 {0x79, 0x80},
365 {0x7f, 0x14},
366 {0x85, 0x08},
367 {0xcd, 0xf4},
368 {0x90, 0x33},
369 {0x86, 0x00},
370 {0x87, 0x0f},
371 {0x89, 0x00},
372 {0x8b, 0x44},
373 {0x8c, 0x66},
374 {0x9d, 0xc1},
375 {0x8a, 0x10},
376 {0xad, 0x40},
377 {0xa0, 0x44},
378 {0xbe, 0x00},
379 {0xc0, 0x08},
380 {0xc1, 0x10},
381 {0xc2, 0x08},
382 {0xc3, 0x10},
383 {0xc4, 0x08},
384 {0xc5, 0xf0},
385 {0xc6, 0xff},
386 {0xc7, 0x00},
387 {0xc8, 0x1a},
388 {0xc9, 0x80},
389 {0xca, 0x23},
390 {0xcb, 0x24},
391 {0xcc, 0xf4},
392 {0xce, 0x74},
393 {0x00, 0x00},
394 {0xbd, 0x01},
395 {0xb8, 0x00},
396 {0x29, 0x01},
397};
217#endif 398#endif
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
new file mode 100644
index 000000000000..da4558bce60f
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -0,0 +1,38 @@
1/*
2 * Panasonic MN88472 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef MN88472_H
18#define MN88472_H
19
20#include <linux/dvb/frontend.h>
21
22struct mn88472_config {
23 /*
24 * Max num of bytes given I2C adapter could write at once.
25 * Default: none
26 */
27 u16 i2c_wr_max;
28
29
30 /* Everything after that is returned by the driver. */
31
32 /*
33 * DVB frontend.
34 */
35 struct dvb_frontend **fe;
36};
37
38#endif
diff --git a/drivers/media/dvb-frontends/mn88473.h b/drivers/media/dvb-frontends/mn88473.h
new file mode 100644
index 000000000000..a373ec93cbe0
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88473.h
@@ -0,0 +1,38 @@
1/*
2 * Panasonic MN88473 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef MN88473_H
18#define MN88473_H
19
20#include <linux/dvb/frontend.h>
21
22struct mn88473_config {
23 /*
24 * Max num of bytes given I2C adapter could write at once.
25 * Default: none
26 */
27 u16 i2c_wr_max;
28
29
30 /* Everything after that is returned by the driver. */
31
32 /*
33 * DVB frontend.
34 */
35 struct dvb_frontend **fe;
36};
37
38#endif
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index eb737cf29a36..9026e1aee163 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -258,13 +258,11 @@ static int rtl2832_rd_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
258 return rtl2832_rd(priv, reg, val, len); 258 return rtl2832_rd(priv, reg, val, len);
259} 259}
260 260
261#if 0 /* currently not used */
262/* write single register */ 261/* write single register */
263static int rtl2832_wr_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 val) 262static int rtl2832_wr_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 val)
264{ 263{
265 return rtl2832_wr_regs(priv, reg, page, &val, 1); 264 return rtl2832_wr_regs(priv, reg, page, &val, 1);
266} 265}
267#endif
268 266
269/* read single register */ 267/* read single register */
270static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val) 268static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
@@ -599,6 +597,11 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
599 if (fe->ops.tuner_ops.set_params) 597 if (fe->ops.tuner_ops.set_params)
600 fe->ops.tuner_ops.set_params(fe); 598 fe->ops.tuner_ops.set_params(fe);
601 599
600 /* PIP mode related */
601 ret = rtl2832_wr_regs(priv, 0x92, 1, "\x00\x0f\xff", 3);
602 if (ret)
603 goto err;
604
602 /* If the frontend has get_if_frequency(), use it */ 605 /* If the frontend has get_if_frequency(), use it */
603 if (fe->ops.tuner_ops.get_if_frequency) { 606 if (fe->ops.tuner_ops.get_if_frequency) {
604 u32 if_freq; 607 u32 if_freq;
@@ -661,7 +664,6 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
661 if (ret) 664 if (ret)
662 goto err; 665 goto err;
663 666
664
665 /* soft reset */ 667 /* soft reset */
666 ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1); 668 ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
667 if (ret) 669 if (ret)
@@ -1020,6 +1022,58 @@ static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
1020 return 0; 1022 return 0;
1021} 1023}
1022 1024
1025int rtl2832_enable_external_ts_if(struct dvb_frontend *fe)
1026{
1027 struct rtl2832_priv *priv = fe->demodulator_priv;
1028 int ret;
1029
1030 dev_dbg(&priv->i2c->dev, "%s: setting PIP mode\n", __func__);
1031
1032 ret = rtl2832_wr_regs(priv, 0x0c, 1, "\x5f\xff", 2);
1033 if (ret)
1034 goto err;
1035
1036 ret = rtl2832_wr_demod_reg(priv, DVBT_PIP_ON, 0x1);
1037 if (ret)
1038 goto err;
1039
1040 ret = rtl2832_wr_reg(priv, 0xbc, 0, 0x18);
1041 if (ret)
1042 goto err;
1043
1044 ret = rtl2832_wr_reg(priv, 0x22, 0, 0x01);
1045 if (ret)
1046 goto err;
1047
1048 ret = rtl2832_wr_reg(priv, 0x26, 0, 0x1f);
1049 if (ret)
1050 goto err;
1051
1052 ret = rtl2832_wr_reg(priv, 0x27, 0, 0xff);
1053 if (ret)
1054 goto err;
1055
1056 ret = rtl2832_wr_regs(priv, 0x92, 1, "\x7f\xf7\xff", 3);
1057 if (ret)
1058 goto err;
1059
1060 /* soft reset */
1061 ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
1062 if (ret)
1063 goto err;
1064
1065 ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
1066 if (ret)
1067 goto err;
1068
1069 return 0;
1070err:
1071 dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
1072 return ret;
1073
1074}
1075EXPORT_SYMBOL(rtl2832_enable_external_ts_if);
1076
1023struct i2c_adapter *rtl2832_get_i2c_adapter(struct dvb_frontend *fe) 1077struct i2c_adapter *rtl2832_get_i2c_adapter(struct dvb_frontend *fe)
1024{ 1078{
1025 struct rtl2832_priv *priv = fe->demodulator_priv; 1079 struct rtl2832_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index cb3b6b0775b8..5254c1dfc8de 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -64,6 +64,10 @@ extern struct i2c_adapter *rtl2832_get_private_i2c_adapter(
64 struct dvb_frontend *fe 64 struct dvb_frontend *fe
65); 65);
66 66
67extern int rtl2832_enable_external_ts_if(
68 struct dvb_frontend *fe
69);
70
67#else 71#else
68 72
69static inline struct dvb_frontend *rtl2832_attach( 73static inline struct dvb_frontend *rtl2832_attach(
@@ -89,6 +93,13 @@ static inline struct i2c_adapter *rtl2832_get_private_i2c_adapter(
89 return NULL; 93 return NULL;
90} 94}
91 95
96static inline int rtl2832_enable_external_ts_if(
97 struct dvb_frontend *fe
98)
99{
100 return -ENODEV;
101}
102
92#endif 103#endif
93 104
94 105
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 7bf98cf6bbe1..2896b47c29d8 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1013,6 +1013,10 @@ static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
1013 if (s->d->props->power_ctrl) 1013 if (s->d->props->power_ctrl)
1014 s->d->props->power_ctrl(s->d, 1); 1014 s->d->props->power_ctrl(s->d, 1);
1015 1015
1016 /* enable ADC */
1017 if (s->d->props->frontend_ctrl)
1018 s->d->props->frontend_ctrl(s->fe, 1);
1019
1016 set_bit(POWER_ON, &s->flags); 1020 set_bit(POWER_ON, &s->flags);
1017 1021
1018 ret = rtl2832_sdr_set_tuner(s); 1022 ret = rtl2832_sdr_set_tuner(s);
@@ -1064,6 +1068,10 @@ static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
1064 1068
1065 clear_bit(POWER_ON, &s->flags); 1069 clear_bit(POWER_ON, &s->flags);
1066 1070
1071 /* disable ADC */
1072 if (s->d->props->frontend_ctrl)
1073 s->d->props->frontend_ctrl(s->fe, 0);
1074
1067 if (s->d->props->power_ctrl) 1075 if (s->d->props->power_ctrl)
1068 s->d->props->power_ctrl(s->d, 0); 1076 s->d->props->power_ctrl(s->d, 0);
1069 1077
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 1cd93be281ed..ce9ab442b4b6 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -308,14 +308,16 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
308 if (ret) 308 if (ret)
309 goto err; 309 goto err;
310 310
311 memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x18", 6); 311 memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x08", 6);
312 cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
312 cmd.wlen = 6; 313 cmd.wlen = 6;
313 cmd.rlen = 4; 314 cmd.rlen = 4;
314 ret = si2168_cmd_execute(s, &cmd); 315 ret = si2168_cmd_execute(s, &cmd);
315 if (ret) 316 if (ret)
316 goto err; 317 goto err;
317 318
318 memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x15", 6); 319 memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x05", 6);
320 cmd.args[5] |= s->ts_clock_inv ? 0x00 : 0x10;
319 cmd.wlen = 6; 321 cmd.wlen = 6;
320 cmd.rlen = 4; 322 cmd.rlen = 4;
321 ret = si2168_cmd_execute(s, &cmd); 323 ret = si2168_cmd_execute(s, &cmd);
@@ -453,27 +455,45 @@ static int si2168_init(struct dvb_frontend *fe)
453 dev_err(&s->client->dev, 455 dev_err(&s->client->dev,
454 "firmware file '%s' not found\n", 456 "firmware file '%s' not found\n",
455 fw_file); 457 fw_file);
456 goto err; 458 goto error_fw_release;
457 } 459 }
458 } 460 }
459 461
460 dev_info(&s->client->dev, "downloading firmware from file '%s'\n", 462 dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
461 fw_file); 463 fw_file);
462 464
463 for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) { 465 if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
464 len = remaining; 466 /* firmware is in the new format */
465 if (len > i2c_wr_max) 467 for (remaining = fw->size; remaining > 0; remaining -= 17) {
466 len = i2c_wr_max; 468 len = fw->data[fw->size - remaining];
467 469 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
468 memcpy(cmd.args, &fw->data[fw->size - remaining], len); 470 cmd.wlen = len;
469 cmd.wlen = len; 471 cmd.rlen = 1;
470 cmd.rlen = 1; 472 ret = si2168_cmd_execute(s, &cmd);
471 ret = si2168_cmd_execute(s, &cmd); 473 if (ret) {
472 if (ret) { 474 dev_err(&s->client->dev,
473 dev_err(&s->client->dev, 475 "firmware download failed=%d\n",
474 "firmware download failed=%d\n", 476 ret);
475 ret); 477 goto error_fw_release;
476 goto err; 478 }
479 }
480 } else {
481 /* firmware is in the old format */
482 for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
483 len = remaining;
484 if (len > i2c_wr_max)
485 len = i2c_wr_max;
486
487 memcpy(cmd.args, &fw->data[fw->size - remaining], len);
488 cmd.wlen = len;
489 cmd.rlen = 1;
490 ret = si2168_cmd_execute(s, &cmd);
491 if (ret) {
492 dev_err(&s->client->dev,
493 "firmware download failed=%d\n",
494 ret);
495 goto error_fw_release;
496 }
477 } 497 }
478 } 498 }
479 499
@@ -487,6 +507,17 @@ static int si2168_init(struct dvb_frontend *fe)
487 if (ret) 507 if (ret)
488 goto err; 508 goto err;
489 509
510 /* query firmware version */
511 memcpy(cmd.args, "\x11", 1);
512 cmd.wlen = 1;
513 cmd.rlen = 10;
514 ret = si2168_cmd_execute(s, &cmd);
515 if (ret)
516 goto err;
517
518 dev_dbg(&s->client->dev, "firmware version: %c.%c.%d\n",
519 cmd.args[6], cmd.args[7], cmd.args[8]);
520
490 /* set ts mode */ 521 /* set ts mode */
491 memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6); 522 memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
492 cmd.args[4] |= s->ts_mode; 523 cmd.args[4] |= s->ts_mode;
@@ -498,17 +529,16 @@ static int si2168_init(struct dvb_frontend *fe)
498 529
499 s->fw_loaded = true; 530 s->fw_loaded = true;
500 531
501warm:
502 dev_info(&s->client->dev, "found a '%s' in warm state\n", 532 dev_info(&s->client->dev, "found a '%s' in warm state\n",
503 si2168_ops.info.name); 533 si2168_ops.info.name);
504 534warm:
505 s->active = true; 535 s->active = true;
506 536
507 return 0; 537 return 0;
508err:
509 if (fw)
510 release_firmware(fw);
511 538
539error_fw_release:
540 release_firmware(fw);
541err:
512 dev_dbg(&s->client->dev, "failed=%d\n", ret); 542 dev_dbg(&s->client->dev, "failed=%d\n", ret);
513 return ret; 543 return ret;
514} 544}
@@ -670,6 +700,7 @@ static int si2168_probe(struct i2c_client *client,
670 *config->i2c_adapter = s->adapter; 700 *config->i2c_adapter = s->adapter;
671 *config->fe = &s->fe; 701 *config->fe = &s->fe;
672 s->ts_mode = config->ts_mode; 702 s->ts_mode = config->ts_mode;
703 s->ts_clock_inv = config->ts_clock_inv;
673 s->fw_loaded = false; 704 s->fw_loaded = false;
674 705
675 i2c_set_clientdata(client, s); 706 i2c_set_clientdata(client, s);
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index e086d6719451..87bc12146667 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -37,6 +37,10 @@ struct si2168_config {
37 37
38 /* TS mode */ 38 /* TS mode */
39 u8 ts_mode; 39 u8 ts_mode;
40
41 /* TS clock inverted */
42 bool ts_clock_inv;
43
40}; 44};
41 45
42#define SI2168_TS_PARALLEL 0x06 46#define SI2168_TS_PARALLEL 0x06
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index e13983ed4be1..60bc3349b6c3 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -38,6 +38,7 @@ struct si2168 {
38 bool active; 38 bool active;
39 bool fw_loaded; 39 bool fw_loaded;
40 u8 ts_mode; 40 u8 ts_mode;
41 bool ts_clock_inv;
41}; 42};
42 43
43/* firmare command struct */ 44/* firmare command struct */
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 15bf4318cb74..cc1ef966f99f 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -92,6 +92,9 @@ static int sp2_write_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
92 return -EIO; 92 return -EIO;
93 } 93 }
94 94
95 dev_dbg(&s->client->dev, "addr=0x%04x, reg = 0x%02x, data = %*ph\n",
96 client->addr, reg, len, buf);
97
95 return 0; 98 return 0;
96} 99}
97 100
@@ -103,9 +106,6 @@ static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
103 int mem, ret; 106 int mem, ret;
104 int (*ci_op_cam)(void*, u8, int, u8, int*) = s->ci_control; 107 int (*ci_op_cam)(void*, u8, int, u8, int*) = s->ci_control;
105 108
106 dev_dbg(&s->client->dev, "slot=%d, acs=0x%02x, addr=0x%04x, data = 0x%02x",
107 slot, acs, addr, data);
108
109 if (slot != 0) 109 if (slot != 0)
110 return -EINVAL; 110 return -EINVAL;
111 111
@@ -140,13 +140,16 @@ static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
140 if (ret) 140 if (ret)
141 return ret; 141 return ret;
142 142
143 if (read) { 143 dev_dbg(&s->client->dev, "%s: slot=%d, addr=0x%04x, %s, data=%x",
144 dev_dbg(&s->client->dev, "cam read, addr=0x%04x, data = 0x%04x", 144 (read) ? "read" : "write", slot, addr,
145 addr, mem); 145 (acs == SP2_CI_ATTR_ACS) ? "attr" : "io",
146 (read) ? mem : data);
147
148 if (read)
146 return mem; 149 return mem;
147 } else { 150 else
148 return 0; 151 return 0;
149 } 152
150} 153}
151 154
152int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, 155int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
@@ -407,7 +410,7 @@ err:
407 410
408static int sp2_remove(struct i2c_client *client) 411static int sp2_remove(struct i2c_client *client)
409{ 412{
410 struct si2157 *s = i2c_get_clientdata(client); 413 struct sp2 *s = i2c_get_clientdata(client);
411 414
412 dev_dbg(&client->dev, "\n"); 415 dev_dbg(&client->dev, "\n");
413 416
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 07cd5ea7a038..19646fbb061d 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -705,7 +705,7 @@ static int stb0899_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_ma
705 struct stb0899_state *state = fe->demodulator_priv; 705 struct stb0899_state *state = fe->demodulator_priv;
706 u8 reg, i; 706 u8 reg, i;
707 707
708 if (cmd->msg_len > 8) 708 if (cmd->msg_len > sizeof(cmd->msg))
709 return -EINVAL; 709 return -EINVAL;
710 710
711 /* enable FIFO precharge */ 711 /* enable FIFO precharge */
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 23e872f84742..0b2a934f53e5 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -2146,7 +2146,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
2146 2146
2147 u32 reg; 2147 u32 reg;
2148 s32 car_step, steps, cur_step, dir, freq, timeout_lock; 2148 s32 car_step, steps, cur_step, dir, freq, timeout_lock;
2149 int lock = 0; 2149 int lock;
2150 2150
2151 if (state->srate >= 10000000) 2151 if (state->srate >= 10000000)
2152 timeout_lock = timeout_dmd / 3; 2152 timeout_lock = timeout_dmd / 3;
@@ -2154,98 +2154,96 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
2154 timeout_lock = timeout_dmd / 2; 2154 timeout_lock = timeout_dmd / 2;
2155 2155
2156 lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */ 2156 lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
2157 if (!lock) { 2157 if (lock)
2158 if (state->srate >= 10000000) { 2158 return lock;
2159 if (stv090x_chk_tmg(state)) {
2160 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2161 goto err;
2162 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2163 goto err;
2164 lock = stv090x_get_dmdlock(state, timeout_dmd);
2165 } else {
2166 lock = 0;
2167 }
2168 } else {
2169 if (state->srate <= 4000000)
2170 car_step = 1000;
2171 else if (state->srate <= 7000000)
2172 car_step = 2000;
2173 else if (state->srate <= 10000000)
2174 car_step = 3000;
2175 else
2176 car_step = 5000;
2177
2178 steps = (state->search_range / 1000) / car_step;
2179 steps /= 2;
2180 steps = 2 * (steps + 1);
2181 if (steps < 0)
2182 steps = 2;
2183 else if (steps > 12)
2184 steps = 12;
2185
2186 cur_step = 1;
2187 dir = 1;
2188
2189 if (!lock) {
2190 freq = state->frequency;
2191 state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
2192 while ((cur_step <= steps) && (!lock)) {
2193 if (dir > 0)
2194 freq += cur_step * car_step;
2195 else
2196 freq -= cur_step * car_step;
2197
2198 /* Setup tuner */
2199 if (stv090x_i2c_gate_ctrl(state, 1) < 0)
2200 goto err;
2201 2159
2202 if (state->config->tuner_set_frequency) { 2160 if (state->srate >= 10000000) {
2203 if (state->config->tuner_set_frequency(fe, freq) < 0) 2161 if (stv090x_chk_tmg(state)) {
2204 goto err_gateoff; 2162 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2205 } 2163 goto err;
2164 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2165 goto err;
2166 return stv090x_get_dmdlock(state, timeout_dmd);
2167 }
2168 return 0;
2169 }
2206 2170
2207 if (state->config->tuner_set_bandwidth) { 2171 if (state->srate <= 4000000)
2208 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0) 2172 car_step = 1000;
2209 goto err_gateoff; 2173 else if (state->srate <= 7000000)
2210 } 2174 car_step = 2000;
2175 else if (state->srate <= 10000000)
2176 car_step = 3000;
2177 else
2178 car_step = 5000;
2211 2179
2212 if (stv090x_i2c_gate_ctrl(state, 0) < 0) 2180 steps = (state->search_range / 1000) / car_step;
2213 goto err; 2181 steps /= 2;
2182 steps = 2 * (steps + 1);
2183 if (steps < 0)
2184 steps = 2;
2185 else if (steps > 12)
2186 steps = 12;
2214 2187
2215 msleep(50); 2188 cur_step = 1;
2189 dir = 1;
2216 2190
2217 if (stv090x_i2c_gate_ctrl(state, 1) < 0) 2191 freq = state->frequency;
2218 goto err; 2192 state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
2193 while ((cur_step <= steps) && (!lock)) {
2194 if (dir > 0)
2195 freq += cur_step * car_step;
2196 else
2197 freq -= cur_step * car_step;
2219 2198
2220 if (state->config->tuner_get_status) { 2199 /* Setup tuner */
2221 if (state->config->tuner_get_status(fe, &reg) < 0) 2200 if (stv090x_i2c_gate_ctrl(state, 1) < 0)
2222 goto err_gateoff; 2201 goto err;
2223 }
2224 2202
2225 if (reg) 2203 if (state->config->tuner_set_frequency) {
2226 dprintk(FE_DEBUG, 1, "Tuner phase locked"); 2204 if (state->config->tuner_set_frequency(fe, freq) < 0)
2227 else 2205 goto err_gateoff;
2228 dprintk(FE_DEBUG, 1, "Tuner unlocked"); 2206 }
2229 2207
2230 if (stv090x_i2c_gate_ctrl(state, 0) < 0) 2208 if (state->config->tuner_set_bandwidth) {
2231 goto err; 2209 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
2210 goto err_gateoff;
2211 }
2232 2212
2233 STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c); 2213 if (stv090x_i2c_gate_ctrl(state, 0) < 0)
2234 if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0) 2214 goto err;
2235 goto err;
2236 if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
2237 goto err;
2238 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2239 goto err;
2240 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2241 goto err;
2242 lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
2243 2215
2244 dir *= -1; 2216 msleep(50);
2245 cur_step++; 2217
2246 } 2218 if (stv090x_i2c_gate_ctrl(state, 1) < 0)
2247 } 2219 goto err;
2220
2221 if (state->config->tuner_get_status) {
2222 if (state->config->tuner_get_status(fe, &reg) < 0)
2223 goto err_gateoff;
2248 } 2224 }
2225
2226 if (reg)
2227 dprintk(FE_DEBUG, 1, "Tuner phase locked");
2228 else
2229 dprintk(FE_DEBUG, 1, "Tuner unlocked");
2230
2231 if (stv090x_i2c_gate_ctrl(state, 0) < 0)
2232 goto err;
2233
2234 STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
2235 if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
2236 goto err;
2237 if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
2238 goto err;
2239 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2240 goto err;
2241 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2242 goto err;
2243 lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
2244
2245 dir *= -1;
2246 cur_step++;
2249 } 2247 }
2250 2248
2251 return lock; 2249 return lock;
@@ -2663,13 +2661,9 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
2663 return STV090x_RANGEOK; 2661 return STV090x_RANGEOK;
2664 else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000)) 2662 else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
2665 return STV090x_RANGEOK; 2663 return STV090x_RANGEOK;
2666 else
2667 return STV090x_OUTOFRANGE; /* Out of Range */
2668 } else { 2664 } else {
2669 if (abs(offst_freq) <= ((state->search_range / 2000) + 500)) 2665 if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
2670 return STV090x_RANGEOK; 2666 return STV090x_RANGEOK;
2671 else
2672 return STV090x_OUTOFRANGE;
2673 } 2667 }
2674 2668
2675 return STV090x_OUTOFRANGE; 2669 return STV090x_OUTOFRANGE;
@@ -2789,6 +2783,12 @@ static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_mod
2789 aclc = car_loop[i].crl_pilots_off_30; 2783 aclc = car_loop[i].crl_pilots_off_30;
2790 } 2784 }
2791 } else { /* 16APSK and 32APSK */ 2785 } else { /* 16APSK and 32APSK */
2786 /*
2787 * This should never happen in practice, except if
2788 * something is really wrong at the car_loop table.
2789 */
2790 if (i >= 11)
2791 i = 10;
2792 if (state->srate <= 3000000) 2792 if (state->srate <= 3000000)
2793 aclc = car_loop_apsk_low[i].crl_pilots_on_2; 2793 aclc = car_loop_apsk_low[i].crl_pilots_on_2;
2794 else if (state->srate <= 7000000) 2794 else if (state->srate <= 7000000)
@@ -3470,7 +3470,20 @@ static enum dvbfe_search stv090x_search(struct dvb_frontend *fe)
3470 if (props->frequency == 0) 3470 if (props->frequency == 0)
3471 return DVBFE_ALGO_SEARCH_INVALID; 3471 return DVBFE_ALGO_SEARCH_INVALID;
3472 3472
3473 state->delsys = props->delivery_system; 3473 switch (props->delivery_system) {
3474 case SYS_DSS:
3475 state->delsys = STV090x_DSS;
3476 break;
3477 case SYS_DVBS:
3478 state->delsys = STV090x_DVBS1;
3479 break;
3480 case SYS_DVBS2:
3481 state->delsys = STV090x_DVBS2;
3482 break;
3483 default:
3484 return DVBFE_ALGO_SEARCH_INVALID;
3485 }
3486
3474 state->frequency = props->frequency; 3487 state->frequency = props->frequency;
3475 state->srate = props->symbol_rate; 3488 state->srate = props->symbol_rate;
3476 state->search_mode = STV090x_SEARCH_AUTO; 3489 state->search_mode = STV090x_SEARCH_AUTO;
@@ -4859,8 +4872,8 @@ err:
4859 return -1; 4872 return -1;
4860} 4873}
4861 4874
4862int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value, 4875static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
4863 u8 xor_value) 4876 u8 value, u8 xor_value)
4864{ 4877{
4865 struct stv090x_state *state = fe->demodulator_priv; 4878 struct stv090x_state *state = fe->demodulator_priv;
4866 u8 reg = 0; 4879 u8 reg = 0;
@@ -4871,7 +4884,6 @@ int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
4871 4884
4872 return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg); 4885 return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
4873} 4886}
4874EXPORT_SYMBOL(stv090x_set_gpio);
4875 4887
4876static struct dvb_frontend_ops stv090x_ops = { 4888static struct dvb_frontend_ops stv090x_ops = {
4877 .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS }, 4889 .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
@@ -4908,7 +4920,7 @@ static struct dvb_frontend_ops stv090x_ops = {
4908}; 4920};
4909 4921
4910 4922
4911struct dvb_frontend *stv090x_attach(const struct stv090x_config *config, 4923struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
4912 struct i2c_adapter *i2c, 4924 struct i2c_adapter *i2c,
4913 enum stv090x_demodulator demod) 4925 enum stv090x_demodulator demod)
4914{ 4926{
@@ -4969,6 +4981,8 @@ struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
4969 if (config->diseqc_envelope_mode) 4981 if (config->diseqc_envelope_mode)
4970 stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A); 4982 stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
4971 4983
4984 config->set_gpio = stv090x_set_gpio;
4985
4972 dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x", 4986 dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
4973 state->device == STV0900 ? "STV0900" : "STV0903", 4987 state->device == STV0900 ? "STV0900" : "STV0903",
4974 demod, 4988 demod,
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 0bd6adcfee8a..742eeda99000 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -89,29 +89,29 @@ struct stv090x_config {
89 89
90 bool diseqc_envelope_mode; 90 bool diseqc_envelope_mode;
91 91
92 int (*tuner_init) (struct dvb_frontend *fe); 92 int (*tuner_init)(struct dvb_frontend *fe);
93 int (*tuner_sleep) (struct dvb_frontend *fe); 93 int (*tuner_sleep)(struct dvb_frontend *fe);
94 int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode); 94 int (*tuner_set_mode)(struct dvb_frontend *fe, enum tuner_mode mode);
95 int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency); 95 int (*tuner_set_frequency)(struct dvb_frontend *fe, u32 frequency);
96 int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency); 96 int (*tuner_get_frequency)(struct dvb_frontend *fe, u32 *frequency);
97 int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth); 97 int (*tuner_set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
98 int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth); 98 int (*tuner_get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
99 int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain); 99 int (*tuner_set_bbgain)(struct dvb_frontend *fe, u32 gain);
100 int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain); 100 int (*tuner_get_bbgain)(struct dvb_frontend *fe, u32 *gain);
101 int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk); 101 int (*tuner_set_refclk)(struct dvb_frontend *fe, u32 refclk);
102 int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status); 102 int (*tuner_get_status)(struct dvb_frontend *fe, u32 *status);
103 void (*tuner_i2c_lock) (struct dvb_frontend *fe, int lock); 103 void (*tuner_i2c_lock)(struct dvb_frontend *fe, int lock);
104
105 /* dir = 0 -> output, dir = 1 -> input/open-drain */
106 int (*set_gpio)(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
107 u8 xor_value);
104}; 108};
105 109
106#if IS_ENABLED(CONFIG_DVB_STV090x) 110#if IS_ENABLED(CONFIG_DVB_STV090x)
107 111
108extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config, 112struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
109 struct i2c_adapter *i2c, 113 struct i2c_adapter *i2c,
110 enum stv090x_demodulator demod); 114 enum stv090x_demodulator demod);
111
112/* dir = 0 -> output, dir = 1 -> input/open-drain */
113extern int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
114 u8 dir, u8 value, u8 xor_value);
115 115
116#else 116#else
117 117
@@ -123,12 +123,6 @@ static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *c
123 return NULL; 123 return NULL;
124} 124}
125 125
126static inline int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio,
127 u8 opd, u8 value, u8 xor_value)
128{
129 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
130 return -ENODEV;
131}
132#endif /* CONFIG_DVB_STV090x */ 126#endif /* CONFIG_DVB_STV090x */
133 127
134#endif /* __STV090x_H */ 128#endif /* __STV090x_H */
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index e5ebdbfe8c19..e63f582378bf 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -253,6 +253,5 @@ int fdtv_ca_register(struct firedtv *fdtv)
253 253
254void fdtv_ca_release(struct firedtv *fdtv) 254void fdtv_ca_release(struct firedtv *fdtv)
255{ 255{
256 if (fdtv->cadev) 256 dvb_unregister_device(fdtv->cadev);
257 dvb_unregister_device(fdtv->cadev);
258} 257}
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index c2ba085e0d20..346a85be6de2 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -96,7 +96,7 @@ struct firedtv {
96 96
97 enum model_type type; 97 enum model_type type;
98 char subunit; 98 char subunit;
99 char isochannel; 99 s8 isochannel;
100 struct fdtv_ir_context *ir_context; 100 struct fdtv_ir_context *ir_context;
101 101
102 fe_sec_voltage_t voltage; 102 fe_sec_voltage_t voltage;
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 04bb29720aaf..40a1a95c7ce9 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -63,9 +63,9 @@ static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd)
63 63
64static char *inputs[] = { "pass_through", "play_back" }; 64static char *inputs[] = { "pass_through", "play_back" };
65 65
66static enum v4l2_mbus_pixelcode adv7170_codes[] = { 66static u32 adv7170_codes[] = {
67 V4L2_MBUS_FMT_UYVY8_2X8, 67 MEDIA_BUS_FMT_UYVY8_2X8,
68 V4L2_MBUS_FMT_UYVY8_1X16, 68 MEDIA_BUS_FMT_UYVY8_1X16,
69}; 69};
70 70
71/* ----------------------------------------------------------------------- */ 71/* ----------------------------------------------------------------------- */
@@ -263,7 +263,7 @@ static int adv7170_s_routing(struct v4l2_subdev *sd,
263} 263}
264 264
265static int adv7170_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 265static int adv7170_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
266 enum v4l2_mbus_pixelcode *code) 266 u32 *code)
267{ 267{
268 if (index >= ARRAY_SIZE(adv7170_codes)) 268 if (index >= ARRAY_SIZE(adv7170_codes))
269 return -EINVAL; 269 return -EINVAL;
@@ -278,9 +278,9 @@ static int adv7170_g_fmt(struct v4l2_subdev *sd,
278 u8 val = adv7170_read(sd, 0x7); 278 u8 val = adv7170_read(sd, 0x7);
279 279
280 if ((val & 0x40) == (1 << 6)) 280 if ((val & 0x40) == (1 << 6))
281 mf->code = V4L2_MBUS_FMT_UYVY8_1X16; 281 mf->code = MEDIA_BUS_FMT_UYVY8_1X16;
282 else 282 else
283 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 283 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
284 284
285 mf->colorspace = V4L2_COLORSPACE_SMPTE170M; 285 mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
286 mf->width = 0; 286 mf->width = 0;
@@ -297,11 +297,11 @@ static int adv7170_s_fmt(struct v4l2_subdev *sd,
297 int ret; 297 int ret;
298 298
299 switch (mf->code) { 299 switch (mf->code) {
300 case V4L2_MBUS_FMT_UYVY8_2X8: 300 case MEDIA_BUS_FMT_UYVY8_2X8:
301 val &= ~0x40; 301 val &= ~0x40;
302 break; 302 break;
303 303
304 case V4L2_MBUS_FMT_UYVY8_1X16: 304 case MEDIA_BUS_FMT_UYVY8_1X16:
305 val |= 0x40; 305 val |= 0x40;
306 break; 306 break;
307 307
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index b88f3b3d5ed9..d220af579a64 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -60,9 +60,9 @@ static inline struct adv7175 *to_adv7175(struct v4l2_subdev *sd)
60 60
61static char *inputs[] = { "pass_through", "play_back", "color_bar" }; 61static char *inputs[] = { "pass_through", "play_back", "color_bar" };
62 62
63static enum v4l2_mbus_pixelcode adv7175_codes[] = { 63static u32 adv7175_codes[] = {
64 V4L2_MBUS_FMT_UYVY8_2X8, 64 MEDIA_BUS_FMT_UYVY8_2X8,
65 V4L2_MBUS_FMT_UYVY8_1X16, 65 MEDIA_BUS_FMT_UYVY8_1X16,
66}; 66};
67 67
68/* ----------------------------------------------------------------------- */ 68/* ----------------------------------------------------------------------- */
@@ -301,7 +301,7 @@ static int adv7175_s_routing(struct v4l2_subdev *sd,
301} 301}
302 302
303static int adv7175_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 303static int adv7175_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
304 enum v4l2_mbus_pixelcode *code) 304 u32 *code)
305{ 305{
306 if (index >= ARRAY_SIZE(adv7175_codes)) 306 if (index >= ARRAY_SIZE(adv7175_codes))
307 return -EINVAL; 307 return -EINVAL;
@@ -316,9 +316,9 @@ static int adv7175_g_fmt(struct v4l2_subdev *sd,
316 u8 val = adv7175_read(sd, 0x7); 316 u8 val = adv7175_read(sd, 0x7);
317 317
318 if ((val & 0x40) == (1 << 6)) 318 if ((val & 0x40) == (1 << 6))
319 mf->code = V4L2_MBUS_FMT_UYVY8_1X16; 319 mf->code = MEDIA_BUS_FMT_UYVY8_1X16;
320 else 320 else
321 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 321 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
322 322
323 mf->colorspace = V4L2_COLORSPACE_SMPTE170M; 323 mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
324 mf->width = 0; 324 mf->width = 0;
@@ -335,11 +335,11 @@ static int adv7175_s_fmt(struct v4l2_subdev *sd,
335 int ret; 335 int ret;
336 336
337 switch (mf->code) { 337 switch (mf->code) {
338 case V4L2_MBUS_FMT_UYVY8_2X8: 338 case MEDIA_BUS_FMT_UYVY8_2X8:
339 val &= ~0x40; 339 val &= ~0x40;
340 break; 340 break;
341 341
342 case V4L2_MBUS_FMT_UYVY8_1X16: 342 case MEDIA_BUS_FMT_UYVY8_1X16:
343 val |= 0x40; 343 val |= 0x40;
344 break; 344 break;
345 345
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 821178dcb08e..bffe6eb528a3 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -422,12 +422,12 @@ static void adv7180_exit_controls(struct adv7180_state *state)
422} 422}
423 423
424static int adv7180_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, 424static int adv7180_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
425 enum v4l2_mbus_pixelcode *code) 425 u32 *code)
426{ 426{
427 if (index > 0) 427 if (index > 0)
428 return -EINVAL; 428 return -EINVAL;
429 429
430 *code = V4L2_MBUS_FMT_YUYV8_2X8; 430 *code = MEDIA_BUS_FMT_YUYV8_2X8;
431 431
432 return 0; 432 return 0;
433} 433}
@@ -437,7 +437,7 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
437{ 437{
438 struct adv7180_state *state = to_state(sd); 438 struct adv7180_state *state = to_state(sd);
439 439
440 fmt->code = V4L2_MBUS_FMT_YUYV8_2X8; 440 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
441 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; 441 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
442 fmt->field = V4L2_FIELD_INTERLACED; 442 fmt->field = V4L2_FIELD_INTERLACED;
443 fmt->width = 720; 443 fmt->width = 720;
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index df461b07b2f7..28940cc3a766 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -421,12 +421,12 @@ static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status)
421} 421}
422 422
423static int adv7183_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 423static int adv7183_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
424 enum v4l2_mbus_pixelcode *code) 424 u32 *code)
425{ 425{
426 if (index > 0) 426 if (index > 0)
427 return -EINVAL; 427 return -EINVAL;
428 428
429 *code = V4L2_MBUS_FMT_UYVY8_2X8; 429 *code = MEDIA_BUS_FMT_UYVY8_2X8;
430 return 0; 430 return 0;
431} 431}
432 432
@@ -435,7 +435,7 @@ static int adv7183_try_mbus_fmt(struct v4l2_subdev *sd,
435{ 435{
436 struct adv7183 *decoder = to_adv7183(sd); 436 struct adv7183 *decoder = to_adv7183(sd);
437 437
438 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 438 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
439 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; 439 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
440 if (decoder->std & V4L2_STD_525_60) { 440 if (decoder->std & V4L2_STD_525_60) {
441 fmt->field = V4L2_FIELD_SEQ_TB; 441 fmt->field = V4L2_FIELD_SEQ_TB;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f98acf4aafd4..81736aaf0f31 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -26,6 +26,7 @@
26#include <linux/videodev2.h> 26#include <linux/videodev2.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/hdmi.h>
29#include <linux/v4l2-dv-timings.h> 30#include <linux/v4l2-dv-timings.h>
30#include <media/v4l2-device.h> 31#include <media/v4l2-device.h>
31#include <media/v4l2-common.h> 32#include <media/v4l2-common.h>
@@ -96,6 +97,10 @@ struct adv7511_state {
96 bool have_monitor; 97 bool have_monitor;
97 /* timings from s_dv_timings */ 98 /* timings from s_dv_timings */
98 struct v4l2_dv_timings dv_timings; 99 struct v4l2_dv_timings dv_timings;
100 u32 fmt_code;
101 u32 colorspace;
102 u32 ycbcr_enc;
103 u32 quantization;
99 /* controls */ 104 /* controls */
100 struct v4l2_ctrl *hdmi_mode_ctrl; 105 struct v4l2_ctrl *hdmi_mode_ctrl;
101 struct v4l2_ctrl *hotplug_ctrl; 106 struct v4l2_ctrl *hotplug_ctrl;
@@ -779,26 +784,234 @@ static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
779{ 784{
780 struct adv7511_state *state = get_adv7511_state(sd); 785 struct adv7511_state *state = get_adv7511_state(sd);
781 786
787 memset(edid->reserved, 0, sizeof(edid->reserved));
788
782 if (edid->pad != 0) 789 if (edid->pad != 0)
783 return -EINVAL; 790 return -EINVAL;
784 if ((edid->blocks == 0) || (edid->blocks > 256)) 791
785 return -EINVAL; 792 if (edid->start_block == 0 && edid->blocks == 0) {
786 if (!state->edid.segments) { 793 edid->blocks = state->edid.segments * 2;
787 v4l2_dbg(1, debug, sd, "EDID segment 0 not found\n"); 794 return 0;
788 return -ENODATA;
789 } 795 }
796
797 if (state->edid.segments == 0)
798 return -ENODATA;
799
790 if (edid->start_block >= state->edid.segments * 2) 800 if (edid->start_block >= state->edid.segments * 2)
791 return -E2BIG; 801 return -EINVAL;
792 if ((edid->blocks + edid->start_block) >= state->edid.segments * 2) 802
803 if (edid->start_block + edid->blocks > state->edid.segments * 2)
793 edid->blocks = state->edid.segments * 2 - edid->start_block; 804 edid->blocks = state->edid.segments * 2 - edid->start_block;
794 805
795 memcpy(edid->edid, &state->edid.data[edid->start_block * 128], 806 memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
796 128 * edid->blocks); 807 128 * edid->blocks);
808
809 return 0;
810}
811
812static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
813 struct v4l2_subdev_fh *fh,
814 struct v4l2_subdev_mbus_code_enum *code)
815{
816 if (code->pad != 0)
817 return -EINVAL;
818
819 switch (code->index) {
820 case 0:
821 code->code = MEDIA_BUS_FMT_RGB888_1X24;
822 break;
823 case 1:
824 code->code = MEDIA_BUS_FMT_YUYV8_1X16;
825 break;
826 case 2:
827 code->code = MEDIA_BUS_FMT_UYVY8_1X16;
828 break;
829 default:
830 return -EINVAL;
831 }
832 return 0;
833}
834
835static void adv7511_fill_format(struct adv7511_state *state,
836 struct v4l2_mbus_framefmt *format)
837{
838 memset(format, 0, sizeof(*format));
839
840 format->width = state->dv_timings.bt.width;
841 format->height = state->dv_timings.bt.height;
842 format->field = V4L2_FIELD_NONE;
843}
844
845static int adv7511_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
846 struct v4l2_subdev_format *format)
847{
848 struct adv7511_state *state = get_adv7511_state(sd);
849
850 if (format->pad != 0)
851 return -EINVAL;
852
853 adv7511_fill_format(state, &format->format);
854
855 if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
856 struct v4l2_mbus_framefmt *fmt;
857
858 fmt = v4l2_subdev_get_try_format(fh, format->pad);
859 format->format.code = fmt->code;
860 format->format.colorspace = fmt->colorspace;
861 format->format.ycbcr_enc = fmt->ycbcr_enc;
862 format->format.quantization = fmt->quantization;
863 } else {
864 format->format.code = state->fmt_code;
865 format->format.colorspace = state->colorspace;
866 format->format.ycbcr_enc = state->ycbcr_enc;
867 format->format.quantization = state->quantization;
868 }
869
870 return 0;
871}
872
873static int adv7511_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
874 struct v4l2_subdev_format *format)
875{
876 struct adv7511_state *state = get_adv7511_state(sd);
877 /*
878 * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
879 * Video Information (AVI) InfoFrame Format"
880 *
881 * c = Colorimetry
882 * ec = Extended Colorimetry
883 * y = RGB or YCbCr
884 * q = RGB Quantization Range
885 * yq = YCC Quantization Range
886 */
887 u8 c = HDMI_COLORIMETRY_NONE;
888 u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
889 u8 y = HDMI_COLORSPACE_RGB;
890 u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
891 u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
892
893 if (format->pad != 0)
894 return -EINVAL;
895 switch (format->format.code) {
896 case MEDIA_BUS_FMT_UYVY8_1X16:
897 case MEDIA_BUS_FMT_YUYV8_1X16:
898 case MEDIA_BUS_FMT_RGB888_1X24:
899 break;
900 default:
901 return -EINVAL;
902 }
903
904 adv7511_fill_format(state, &format->format);
905 if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
906 struct v4l2_mbus_framefmt *fmt;
907
908 fmt = v4l2_subdev_get_try_format(fh, format->pad);
909 fmt->code = format->format.code;
910 fmt->colorspace = format->format.colorspace;
911 fmt->ycbcr_enc = format->format.ycbcr_enc;
912 fmt->quantization = format->format.quantization;
913 return 0;
914 }
915
916 switch (format->format.code) {
917 case MEDIA_BUS_FMT_UYVY8_1X16:
918 adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
919 adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
920 y = HDMI_COLORSPACE_YUV422;
921 break;
922 case MEDIA_BUS_FMT_YUYV8_1X16:
923 adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
924 adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
925 y = HDMI_COLORSPACE_YUV422;
926 break;
927 case MEDIA_BUS_FMT_RGB888_1X24:
928 default:
929 adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
930 adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
931 break;
932 }
933 state->fmt_code = format->format.code;
934 state->colorspace = format->format.colorspace;
935 state->ycbcr_enc = format->format.ycbcr_enc;
936 state->quantization = format->format.quantization;
937
938 switch (format->format.colorspace) {
939 case V4L2_COLORSPACE_ADOBERGB:
940 c = HDMI_COLORIMETRY_EXTENDED;
941 ec = y ? HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 :
942 HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB;
943 break;
944 case V4L2_COLORSPACE_SMPTE170M:
945 c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
946 if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
947 c = HDMI_COLORIMETRY_EXTENDED;
948 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
949 }
950 break;
951 case V4L2_COLORSPACE_REC709:
952 c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
953 if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
954 c = HDMI_COLORIMETRY_EXTENDED;
955 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
956 }
957 break;
958 case V4L2_COLORSPACE_SRGB:
959 c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
960 ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
961 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
962 break;
963 case V4L2_COLORSPACE_BT2020:
964 c = HDMI_COLORIMETRY_EXTENDED;
965 if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
966 ec = 5; /* Not yet available in hdmi.h */
967 else
968 ec = 6; /* Not yet available in hdmi.h */
969 break;
970 default:
971 break;
972 }
973
974 /*
975 * CEA-861-F says that for RGB formats the YCC range must match the
976 * RGB range, although sources should ignore the YCC range.
977 *
978 * The RGB quantization range shouldn't be non-zero if the EDID doesn't
979 * have the Q bit set in the Video Capabilities Data Block, however this
980 * isn't checked at the moment. The assumption is that the application
981 * knows the EDID and can detect this.
982 *
983 * The same is true for the YCC quantization range: non-standard YCC
984 * quantization ranges should only be sent if the EDID has the YQ bit
985 * set in the Video Capabilities Data Block.
986 */
987 switch (format->format.quantization) {
988 case V4L2_QUANTIZATION_FULL_RANGE:
989 q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
990 HDMI_QUANTIZATION_RANGE_FULL;
991 yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
992 break;
993 case V4L2_QUANTIZATION_LIM_RANGE:
994 q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
995 HDMI_QUANTIZATION_RANGE_LIMITED;
996 yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
997 break;
998 }
999
1000 adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
1001 adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
1002 adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
1003 adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2));
1004 adv7511_wr_and_or(sd, 0x59, 0x0f, yq << 4);
1005 adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
1006
797 return 0; 1007 return 0;
798} 1008}
799 1009
800static const struct v4l2_subdev_pad_ops adv7511_pad_ops = { 1010static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
801 .get_edid = adv7511_get_edid, 1011 .get_edid = adv7511_get_edid,
1012 .enum_mbus_code = adv7511_enum_mbus_code,
1013 .get_fmt = adv7511_get_fmt,
1014 .set_fmt = adv7511_set_fmt,
802 .enum_dv_timings = adv7511_enum_dv_timings, 1015 .enum_dv_timings = adv7511_enum_dv_timings,
803 .dv_timings_cap = adv7511_dv_timings_cap, 1016 .dv_timings_cap = adv7511_dv_timings_cap,
804}; 1017};
@@ -1116,6 +1329,8 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1116 return -ENODEV; 1329 return -ENODEV;
1117 } 1330 }
1118 memcpy(&state->pdata, pdata, sizeof(state->pdata)); 1331 memcpy(&state->pdata, pdata, sizeof(state->pdata));
1332 state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
1333 state->colorspace = V4L2_COLORSPACE_SRGB;
1119 1334
1120 sd = &state->sd; 1335 sd = &state->sd;
1121 1336
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 47795ff71688..e43dd2e2a38a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -88,7 +88,7 @@ struct adv7604_reg_seq {
88}; 88};
89 89
90struct adv7604_format_info { 90struct adv7604_format_info {
91 enum v4l2_mbus_pixelcode code; 91 u32 code;
92 u8 op_ch_sel; 92 u8 op_ch_sel;
93 bool rgb_out; 93 bool rgb_out;
94 bool swap_cb_cr; 94 bool swap_cb_cr;
@@ -749,77 +749,77 @@ static void adv7604_write_reg_seq(struct v4l2_subdev *sd,
749 */ 749 */
750 750
751static const struct adv7604_format_info adv7604_formats[] = { 751static const struct adv7604_format_info adv7604_formats[] = {
752 { V4L2_MBUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false, 752 { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
753 ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT }, 753 ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
754 { V4L2_MBUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false, 754 { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
755 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT }, 755 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
756 { V4L2_MBUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true, 756 { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
757 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT }, 757 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
758 { V4L2_MBUS_FMT_YUYV10_2X10, ADV7604_OP_CH_SEL_RGB, false, false, 758 { MEDIA_BUS_FMT_YUYV10_2X10, ADV7604_OP_CH_SEL_RGB, false, false,
759 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT }, 759 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
760 { V4L2_MBUS_FMT_YVYU10_2X10, ADV7604_OP_CH_SEL_RGB, false, true, 760 { MEDIA_BUS_FMT_YVYU10_2X10, ADV7604_OP_CH_SEL_RGB, false, true,
761 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT }, 761 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_10BIT },
762 { V4L2_MBUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false, 762 { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
763 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT }, 763 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
764 { V4L2_MBUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true, 764 { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
765 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT }, 765 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
766 { V4L2_MBUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false, 766 { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
767 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 767 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
768 { V4L2_MBUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true, 768 { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
769 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 769 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
770 { V4L2_MBUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false, 770 { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
771 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 771 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
772 { V4L2_MBUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true, 772 { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
773 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 773 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
774 { V4L2_MBUS_FMT_UYVY10_1X20, ADV7604_OP_CH_SEL_RBG, false, false, 774 { MEDIA_BUS_FMT_UYVY10_1X20, ADV7604_OP_CH_SEL_RBG, false, false,
775 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT }, 775 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
776 { V4L2_MBUS_FMT_VYUY10_1X20, ADV7604_OP_CH_SEL_RBG, false, true, 776 { MEDIA_BUS_FMT_VYUY10_1X20, ADV7604_OP_CH_SEL_RBG, false, true,
777 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT }, 777 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
778 { V4L2_MBUS_FMT_YUYV10_1X20, ADV7604_OP_CH_SEL_RGB, false, false, 778 { MEDIA_BUS_FMT_YUYV10_1X20, ADV7604_OP_CH_SEL_RGB, false, false,
779 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT }, 779 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
780 { V4L2_MBUS_FMT_YVYU10_1X20, ADV7604_OP_CH_SEL_RGB, false, true, 780 { MEDIA_BUS_FMT_YVYU10_1X20, ADV7604_OP_CH_SEL_RGB, false, true,
781 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT }, 781 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_10BIT },
782 { V4L2_MBUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false, 782 { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
783 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 783 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
784 { V4L2_MBUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true, 784 { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
785 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 785 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
786 { V4L2_MBUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false, 786 { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
787 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 787 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
788 { V4L2_MBUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true, 788 { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
789 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 789 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
790}; 790};
791 791
792static const struct adv7604_format_info adv7611_formats[] = { 792static const struct adv7604_format_info adv7611_formats[] = {
793 { V4L2_MBUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false, 793 { MEDIA_BUS_FMT_RGB888_1X24, ADV7604_OP_CH_SEL_RGB, true, false,
794 ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT }, 794 ADV7604_OP_MODE_SEL_SDR_444 | ADV7604_OP_FORMAT_SEL_8BIT },
795 { V4L2_MBUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false, 795 { MEDIA_BUS_FMT_YUYV8_2X8, ADV7604_OP_CH_SEL_RGB, false, false,
796 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT }, 796 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
797 { V4L2_MBUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true, 797 { MEDIA_BUS_FMT_YVYU8_2X8, ADV7604_OP_CH_SEL_RGB, false, true,
798 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT }, 798 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_8BIT },
799 { V4L2_MBUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false, 799 { MEDIA_BUS_FMT_YUYV12_2X12, ADV7604_OP_CH_SEL_RGB, false, false,
800 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT }, 800 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
801 { V4L2_MBUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true, 801 { MEDIA_BUS_FMT_YVYU12_2X12, ADV7604_OP_CH_SEL_RGB, false, true,
802 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT }, 802 ADV7604_OP_MODE_SEL_SDR_422 | ADV7604_OP_FORMAT_SEL_12BIT },
803 { V4L2_MBUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false, 803 { MEDIA_BUS_FMT_UYVY8_1X16, ADV7604_OP_CH_SEL_RBG, false, false,
804 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 804 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
805 { V4L2_MBUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true, 805 { MEDIA_BUS_FMT_VYUY8_1X16, ADV7604_OP_CH_SEL_RBG, false, true,
806 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 806 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
807 { V4L2_MBUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false, 807 { MEDIA_BUS_FMT_YUYV8_1X16, ADV7604_OP_CH_SEL_RGB, false, false,
808 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 808 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
809 { V4L2_MBUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true, 809 { MEDIA_BUS_FMT_YVYU8_1X16, ADV7604_OP_CH_SEL_RGB, false, true,
810 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT }, 810 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_8BIT },
811 { V4L2_MBUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false, 811 { MEDIA_BUS_FMT_UYVY12_1X24, ADV7604_OP_CH_SEL_RBG, false, false,
812 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 812 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
813 { V4L2_MBUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true, 813 { MEDIA_BUS_FMT_VYUY12_1X24, ADV7604_OP_CH_SEL_RBG, false, true,
814 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 814 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
815 { V4L2_MBUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false, 815 { MEDIA_BUS_FMT_YUYV12_1X24, ADV7604_OP_CH_SEL_RGB, false, false,
816 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 816 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
817 { V4L2_MBUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true, 817 { MEDIA_BUS_FMT_YVYU12_1X24, ADV7604_OP_CH_SEL_RGB, false, true,
818 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT }, 818 ADV7604_OP_MODE_SEL_SDR_422_2X | ADV7604_OP_FORMAT_SEL_12BIT },
819}; 819};
820 820
821static const struct adv7604_format_info * 821static const struct adv7604_format_info *
822adv7604_format_info(struct adv7604_state *state, enum v4l2_mbus_pixelcode code) 822adv7604_format_info(struct adv7604_state *state, u32 code)
823{ 823{
824 unsigned int i; 824 unsigned int i;
825 825
@@ -1917,7 +1917,7 @@ static int adv7604_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
1917 1917
1918 info = adv7604_format_info(state, format->format.code); 1918 info = adv7604_format_info(state, format->format.code);
1919 if (info == NULL) 1919 if (info == NULL)
1920 info = adv7604_format_info(state, V4L2_MBUS_FMT_YUYV8_2X8); 1920 info = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
1921 1921
1922 adv7604_fill_format(state, &format->format); 1922 adv7604_fill_format(state, &format->format);
1923 format->format.code = info->code; 1923 format->format.code = info->code;
@@ -1997,19 +1997,7 @@ static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
1997 struct adv7604_state *state = to_state(sd); 1997 struct adv7604_state *state = to_state(sd);
1998 u8 *data = NULL; 1998 u8 *data = NULL;
1999 1999
2000 if (edid->pad > ADV7604_PAD_HDMI_PORT_D) 2000 memset(edid->reserved, 0, sizeof(edid->reserved));
2001 return -EINVAL;
2002 if (edid->blocks == 0)
2003 return -EINVAL;
2004 if (edid->blocks > 2)
2005 return -EINVAL;
2006 if (edid->start_block > 1)
2007 return -EINVAL;
2008 if (edid->start_block == 1)
2009 edid->blocks = 1;
2010
2011 if (edid->blocks > state->edid.blocks)
2012 edid->blocks = state->edid.blocks;
2013 2001
2014 switch (edid->pad) { 2002 switch (edid->pad) {
2015 case ADV7604_PAD_HDMI_PORT_A: 2003 case ADV7604_PAD_HDMI_PORT_A:
@@ -2021,14 +2009,24 @@ static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
2021 break; 2009 break;
2022 default: 2010 default:
2023 return -EINVAL; 2011 return -EINVAL;
2024 break;
2025 } 2012 }
2026 if (!data) 2013
2014 if (edid->start_block == 0 && edid->blocks == 0) {
2015 edid->blocks = data ? state->edid.blocks : 0;
2016 return 0;
2017 }
2018
2019 if (data == NULL)
2027 return -ENODATA; 2020 return -ENODATA;
2028 2021
2029 memcpy(edid->edid, 2022 if (edid->start_block >= state->edid.blocks)
2030 data + edid->start_block * 128, 2023 return -EINVAL;
2031 edid->blocks * 128); 2024
2025 if (edid->start_block + edid->blocks > state->edid.blocks)
2026 edid->blocks = state->edid.blocks - edid->start_block;
2027
2028 memcpy(edid->edid, data + edid->start_block * 128, edid->blocks * 128);
2029
2032 return 0; 2030 return 0;
2033} 2031}
2034 2032
@@ -2068,6 +2066,8 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
2068 int err; 2066 int err;
2069 int i; 2067 int i;
2070 2068
2069 memset(edid->reserved, 0, sizeof(edid->reserved));
2070
2071 if (edid->pad > ADV7604_PAD_HDMI_PORT_D) 2071 if (edid->pad > ADV7604_PAD_HDMI_PORT_D)
2072 return -EINVAL; 2072 return -EINVAL;
2073 if (edid->start_block != 0) 2073 if (edid->start_block != 0)
@@ -2164,7 +2164,6 @@ static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
2164 return -EIO; 2164 return -EIO;
2165 } 2165 }
2166 2166
2167
2168 /* enable hotplug after 100 ms */ 2167 /* enable hotplug after 100 ms */
2169 queue_delayed_work(state->work_queues, 2168 queue_delayed_work(state->work_queues,
2170 &state->delayed_work_enable_hotplug, HZ / 10); 2169 &state->delayed_work_enable_hotplug, HZ / 10);
@@ -2807,7 +2806,7 @@ static int adv7604_probe(struct i2c_client *client,
2807 } 2806 }
2808 2807
2809 state->timings = cea640x480; 2808 state->timings = cea640x480;
2810 state->format = adv7604_format_info(state, V4L2_MBUS_FMT_YUYV8_2X8); 2809 state->format = adv7604_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
2811 2810
2812 sd = &state->sd; 2811 sd = &state->sd;
2813 v4l2_i2c_subdev_init(sd, client, &adv7604_ops); 2812 v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 48b628bc6714..75d26dfd0939 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1877,12 +1877,12 @@ static int adv7842_s_routing(struct v4l2_subdev *sd,
1877} 1877}
1878 1878
1879static int adv7842_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, 1879static int adv7842_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
1880 enum v4l2_mbus_pixelcode *code) 1880 u32 *code)
1881{ 1881{
1882 if (index) 1882 if (index)
1883 return -EINVAL; 1883 return -EINVAL;
1884 /* Good enough for now */ 1884 /* Good enough for now */
1885 *code = V4L2_MBUS_FMT_FIXED; 1885 *code = MEDIA_BUS_FMT_FIXED;
1886 return 0; 1886 return 0;
1887} 1887}
1888 1888
@@ -1893,7 +1893,7 @@ static int adv7842_g_mbus_fmt(struct v4l2_subdev *sd,
1893 1893
1894 fmt->width = state->timings.bt.width; 1894 fmt->width = state->timings.bt.width;
1895 fmt->height = state->timings.bt.height; 1895 fmt->height = state->timings.bt.height;
1896 fmt->code = V4L2_MBUS_FMT_FIXED; 1896 fmt->code = MEDIA_BUS_FMT_FIXED;
1897 fmt->field = V4L2_FIELD_NONE; 1897 fmt->field = V4L2_FIELD_NONE;
1898 1898
1899 if (state->mode == ADV7842_MODE_SDP) { 1899 if (state->mode == ADV7842_MODE_SDP) {
@@ -2028,16 +2028,7 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
2028 struct adv7842_state *state = to_state(sd); 2028 struct adv7842_state *state = to_state(sd);
2029 u8 *data = NULL; 2029 u8 *data = NULL;
2030 2030
2031 if (edid->pad > ADV7842_EDID_PORT_VGA) 2031 memset(edid->reserved, 0, sizeof(edid->reserved));
2032 return -EINVAL;
2033 if (edid->blocks == 0)
2034 return -EINVAL;
2035 if (edid->blocks > 2)
2036 return -EINVAL;
2037 if (edid->start_block > 1)
2038 return -EINVAL;
2039 if (edid->start_block == 1)
2040 edid->blocks = 1;
2041 2032
2042 switch (edid->pad) { 2033 switch (edid->pad) {
2043 case ADV7842_EDID_PORT_A: 2034 case ADV7842_EDID_PORT_A:
@@ -2052,12 +2043,23 @@ static int adv7842_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
2052 default: 2043 default:
2053 return -EINVAL; 2044 return -EINVAL;
2054 } 2045 }
2046
2047 if (edid->start_block == 0 && edid->blocks == 0) {
2048 edid->blocks = data ? 2 : 0;
2049 return 0;
2050 }
2051
2055 if (!data) 2052 if (!data)
2056 return -ENODATA; 2053 return -ENODATA;
2057 2054
2058 memcpy(edid->edid, 2055 if (edid->start_block >= 2)
2059 data + edid->start_block * 128, 2056 return -EINVAL;
2060 edid->blocks * 128); 2057
2058 if (edid->start_block + edid->blocks > 2)
2059 edid->blocks = 2 - edid->start_block;
2060
2061 memcpy(edid->edid, data + edid->start_block * 128, edid->blocks * 128);
2062
2061 return 0; 2063 return 0;
2062} 2064}
2063 2065
@@ -2066,12 +2068,16 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
2066 struct adv7842_state *state = to_state(sd); 2068 struct adv7842_state *state = to_state(sd);
2067 int err = 0; 2069 int err = 0;
2068 2070
2071 memset(e->reserved, 0, sizeof(e->reserved));
2072
2069 if (e->pad > ADV7842_EDID_PORT_VGA) 2073 if (e->pad > ADV7842_EDID_PORT_VGA)
2070 return -EINVAL; 2074 return -EINVAL;
2071 if (e->start_block != 0) 2075 if (e->start_block != 0)
2072 return -EINVAL; 2076 return -EINVAL;
2073 if (e->blocks > 2) 2077 if (e->blocks > 2) {
2078 e->blocks = 2;
2074 return -E2BIG; 2079 return -E2BIG;
2080 }
2075 2081
2076 /* todo, per edid */ 2082 /* todo, per edid */
2077 state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15], 2083 state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index c14e66756b98..69aeaf397624 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -102,7 +102,7 @@ static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd,
102 v4l_bound_align_image(&mf->width, 0, 720, 2, 102 v4l_bound_align_image(&mf->width, 0, 720, 2,
103 &mf->height, 0, ak881x->lines, 1, 0); 103 &mf->height, 0, ak881x->lines, 1, 0);
104 mf->field = V4L2_FIELD_INTERLACED; 104 mf->field = V4L2_FIELD_INTERLACED;
105 mf->code = V4L2_MBUS_FMT_YUYV8_2X8; 105 mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
106 mf->colorspace = V4L2_COLORSPACE_SMPTE170M; 106 mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
107 107
108 return 0; 108 return 0;
@@ -112,19 +112,19 @@ static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
112 struct v4l2_mbus_framefmt *mf) 112 struct v4l2_mbus_framefmt *mf)
113{ 113{
114 if (mf->field != V4L2_FIELD_INTERLACED || 114 if (mf->field != V4L2_FIELD_INTERLACED ||
115 mf->code != V4L2_MBUS_FMT_YUYV8_2X8) 115 mf->code != MEDIA_BUS_FMT_YUYV8_2X8)
116 return -EINVAL; 116 return -EINVAL;
117 117
118 return ak881x_try_g_mbus_fmt(sd, mf); 118 return ak881x_try_g_mbus_fmt(sd, mf);
119} 119}
120 120
121static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, 121static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
122 enum v4l2_mbus_pixelcode *code) 122 u32 *code)
123{ 123{
124 if (index) 124 if (index)
125 return -EINVAL; 125 return -EINVAL;
126 126
127 *code = V4L2_MBUS_FMT_YUYV8_2X8; 127 *code = MEDIA_BUS_FMT_YUYV8_2X8;
128 return 0; 128 return 0;
129} 129}
130 130
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e453a3ffe7d1..573e08826b9b 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -879,7 +879,7 @@ void cx25840_std_setup(struct i2c_client *client)
879 /* Sets horizontal blanking delay and active lines */ 879 /* Sets horizontal blanking delay and active lines */
880 cx25840_write(client, 0x470, hblank); 880 cx25840_write(client, 0x470, hblank);
881 cx25840_write(client, 0x471, 881 cx25840_write(client, 0x471,
882 0xff & (((hblank >> 8) & 0x3) | (hactive << 4))); 882 (((hblank >> 8) & 0x3) | (hactive << 4)) & 0xff);
883 cx25840_write(client, 0x472, hactive >> 4); 883 cx25840_write(client, 0x472, hactive >> 4);
884 884
885 /* Sets burst gate delay */ 885 /* Sets burst gate delay */
@@ -888,13 +888,13 @@ void cx25840_std_setup(struct i2c_client *client)
888 /* Sets vertical blanking delay and active duration */ 888 /* Sets vertical blanking delay and active duration */
889 cx25840_write(client, 0x474, vblank); 889 cx25840_write(client, 0x474, vblank);
890 cx25840_write(client, 0x475, 890 cx25840_write(client, 0x475,
891 0xff & (((vblank >> 8) & 0x3) | (vactive << 4))); 891 (((vblank >> 8) & 0x3) | (vactive << 4)) & 0xff);
892 cx25840_write(client, 0x476, vactive >> 4); 892 cx25840_write(client, 0x476, vactive >> 4);
893 cx25840_write(client, 0x477, vblank656); 893 cx25840_write(client, 0x477, vblank656);
894 894
895 /* Sets src decimation rate */ 895 /* Sets src decimation rate */
896 cx25840_write(client, 0x478, 0xff & src_decimation); 896 cx25840_write(client, 0x478, src_decimation & 0xff);
897 cx25840_write(client, 0x479, 0xff & (src_decimation >> 8)); 897 cx25840_write(client, 0x479, (src_decimation >> 8) & 0xff);
898 898
899 /* Sets Luma and UV Low pass filters */ 899 /* Sets Luma and UV Low pass filters */
900 cx25840_write(client, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); 900 cx25840_write(client, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
@@ -904,8 +904,8 @@ void cx25840_std_setup(struct i2c_client *client)
904 904
905 /* Sets SC Step*/ 905 /* Sets SC Step*/
906 cx25840_write(client, 0x47c, sc); 906 cx25840_write(client, 0x47c, sc);
907 cx25840_write(client, 0x47d, 0xff & sc >> 8); 907 cx25840_write(client, 0x47d, (sc >> 8) & 0xff);
908 cx25840_write(client, 0x47e, 0xff & sc >> 16); 908 cx25840_write(client, 0x47e, (sc >> 16) & 0xff);
909 909
910 /* Sets VBI parameters */ 910 /* Sets VBI parameters */
911 if (std & V4L2_STD_625_50) { 911 if (std & V4L2_STD_625_50) {
@@ -1373,7 +1373,7 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
1373 int HSC, VSC, Vsrc, Hsrc, filter, Vlines; 1373 int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
1374 int is_50Hz = !(state->std & V4L2_STD_525_60); 1374 int is_50Hz = !(state->std & V4L2_STD_525_60);
1375 1375
1376 if (fmt->code != V4L2_MBUS_FMT_FIXED) 1376 if (fmt->code != MEDIA_BUS_FMT_FIXED)
1377 return -EINVAL; 1377 return -EINVAL;
1378 1378
1379 fmt->field = V4L2_FIELD_INTERLACED; 1379 fmt->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/i2c/cx25840/cx25840-firmware.c b/drivers/media/i2c/cx25840/cx25840-firmware.c
index b3169f94ece8..9bbb31adc29d 100644
--- a/drivers/media/i2c/cx25840/cx25840-firmware.c
+++ b/drivers/media/i2c/cx25840/cx25840-firmware.c
@@ -113,7 +113,7 @@ int cx25840_loadfw(struct i2c_client *client)
113 const u8 *ptr; 113 const u8 *ptr;
114 const char *fwname = get_fw_name(client); 114 const char *fwname = get_fw_name(client);
115 int size, retval; 115 int size, retval;
116 int MAX_BUF_SIZE = FWSEND; 116 int max_buf_size = FWSEND;
117 u32 gpio_oe = 0, gpio_da = 0; 117 u32 gpio_oe = 0, gpio_da = 0;
118 118
119 if (is_cx2388x(state)) { 119 if (is_cx2388x(state)) {
@@ -122,10 +122,9 @@ int cx25840_loadfw(struct i2c_client *client)
122 gpio_da = cx25840_read(client, 0x164); 122 gpio_da = cx25840_read(client, 0x164);
123 } 123 }
124 124
125 if (is_cx231xx(state) && MAX_BUF_SIZE > 16) { 125 /* cx231xx cannot accept more than 16 bytes at a time */
126 v4l_err(client, " Firmware download size changed to 16 bytes max length\n"); 126 if (is_cx231xx(state) && max_buf_size > 16)
127 MAX_BUF_SIZE = 16; /* cx231xx cannot accept more than 16 bytes at a time */ 127 max_buf_size = 16;
128 }
129 128
130 if (request_firmware(&fw, fwname, FWDEV(client)) != 0) { 129 if (request_firmware(&fw, fwname, FWDEV(client)) != 0) {
131 v4l_err(client, "unable to open firmware %s\n", fwname); 130 v4l_err(client, "unable to open firmware %s\n", fwname);
@@ -140,7 +139,7 @@ int cx25840_loadfw(struct i2c_client *client)
140 size = fw->size; 139 size = fw->size;
141 ptr = fw->data; 140 ptr = fw->data;
142 while (size > 0) { 141 while (size > 0) {
143 int len = min(MAX_BUF_SIZE - 2, size); 142 int len = min(max_buf_size - 2, size);
144 143
145 memcpy(buffer + 2, ptr, len); 144 memcpy(buffer + 2, ptr, len);
146 145
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 8311f1a9a38e..175a76114953 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -464,8 +464,7 @@ static int ir_remove(struct i2c_client *client)
464 cancel_delayed_work_sync(&ir->work); 464 cancel_delayed_work_sync(&ir->work);
465 465
466 /* unregister device */ 466 /* unregister device */
467 if (ir->rc) 467 rc_unregister_device(ir->rc);
468 rc_unregister_device(ir->rc);
469 468
470 /* free memory */ 469 /* free memory */
471 return 0; 470 return 0;
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 8d870b7b43ff..2820f7c38cba 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -57,14 +57,14 @@ static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
57 [M5MOLS_RESTYPE_MONITOR] = { 57 [M5MOLS_RESTYPE_MONITOR] = {
58 .width = 1920, 58 .width = 1920,
59 .height = 1080, 59 .height = 1080,
60 .code = V4L2_MBUS_FMT_VYUY8_2X8, 60 .code = MEDIA_BUS_FMT_VYUY8_2X8,
61 .field = V4L2_FIELD_NONE, 61 .field = V4L2_FIELD_NONE,
62 .colorspace = V4L2_COLORSPACE_JPEG, 62 .colorspace = V4L2_COLORSPACE_JPEG,
63 }, 63 },
64 [M5MOLS_RESTYPE_CAPTURE] = { 64 [M5MOLS_RESTYPE_CAPTURE] = {
65 .width = 1920, 65 .width = 1920,
66 .height = 1080, 66 .height = 1080,
67 .code = V4L2_MBUS_FMT_JPEG_1X8, 67 .code = MEDIA_BUS_FMT_JPEG_1X8,
68 .field = V4L2_FIELD_NONE, 68 .field = V4L2_FIELD_NONE,
69 .colorspace = V4L2_COLORSPACE_JPEG, 69 .colorspace = V4L2_COLORSPACE_JPEG,
70 }, 70 },
@@ -479,7 +479,7 @@ static int m5mols_get_version(struct v4l2_subdev *sd)
479 * __find_restype - Lookup M-5MOLS resolution type according to pixel code 479 * __find_restype - Lookup M-5MOLS resolution type according to pixel code
480 * @code: pixel code 480 * @code: pixel code
481 */ 481 */
482static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code) 482static enum m5mols_restype __find_restype(u32 code)
483{ 483{
484 enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR; 484 enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
485 485
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 2cace7313a22..d7307862c2c5 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -192,12 +192,12 @@ static int ml86v7667_g_input_status(struct v4l2_subdev *sd, u32 *status)
192} 192}
193 193
194static int ml86v7667_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, 194static int ml86v7667_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
195 enum v4l2_mbus_pixelcode *code) 195 u32 *code)
196{ 196{
197 if (index > 0) 197 if (index > 0)
198 return -EINVAL; 198 return -EINVAL;
199 199
200 *code = V4L2_MBUS_FMT_YUYV8_2X8; 200 *code = MEDIA_BUS_FMT_YUYV8_2X8;
201 201
202 return 0; 202 return 0;
203} 203}
@@ -207,7 +207,7 @@ static int ml86v7667_mbus_fmt(struct v4l2_subdev *sd,
207{ 207{
208 struct ml86v7667_priv *priv = to_ml86v7667(sd); 208 struct ml86v7667_priv *priv = to_ml86v7667(sd);
209 209
210 fmt->code = V4L2_MBUS_FMT_YUYV8_2X8; 210 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
211 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; 211 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
212 /* The top field is always transferred first by the chip */ 212 /* The top field is always transferred first by the chip */
213 fmt->field = V4L2_FIELD_INTERLACED_TB; 213 fmt->field = V4L2_FIELD_INTERLACED_TB;
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index 85ec3bacdf1c..45b3fca188ca 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -323,7 +323,7 @@ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
323 if (code->index != 0) 323 if (code->index != 0)
324 return -EINVAL; 324 return -EINVAL;
325 325
326 code->code = V4L2_MBUS_FMT_Y8_1X8; 326 code->code = MEDIA_BUS_FMT_Y8_1X8;
327 return 0; 327 return 0;
328} 328}
329 329
@@ -331,7 +331,7 @@ static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
331 struct v4l2_subdev_fh *fh, 331 struct v4l2_subdev_fh *fh,
332 struct v4l2_subdev_frame_size_enum *fse) 332 struct v4l2_subdev_frame_size_enum *fse)
333{ 333{
334 if (fse->index != 0 || fse->code != V4L2_MBUS_FMT_Y8_1X8) 334 if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
335 return -EINVAL; 335 return -EINVAL;
336 336
337 fse->min_width = MT9M032_COLUMN_SIZE_DEF; 337 fse->min_width = MT9M032_COLUMN_SIZE_DEF;
@@ -759,7 +759,7 @@ static int mt9m032_probe(struct i2c_client *client,
759 759
760 sensor->format.width = sensor->crop.width; 760 sensor->format.width = sensor->crop.width;
761 sensor->format.height = sensor->crop.height; 761 sensor->format.height = sensor->crop.height;
762 sensor->format.code = V4L2_MBUS_FMT_Y8_1X8; 762 sensor->format.code = MEDIA_BUS_FMT_Y8_1X8;
763 sensor->format.field = V4L2_FIELD_NONE; 763 sensor->format.field = V4L2_FIELD_NONE;
764 sensor->format.colorspace = V4L2_COLORSPACE_SRGB; 764 sensor->format.colorspace = V4L2_COLORSPACE_SRGB;
765 765
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index e18797ff7faf..edb76bd33d16 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -950,9 +950,9 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
950 format = v4l2_subdev_get_try_format(fh, 0); 950 format = v4l2_subdev_get_try_format(fh, 0);
951 951
952 if (mt9p031->model == MT9P031_MODEL_MONOCHROME) 952 if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
953 format->code = V4L2_MBUS_FMT_Y12_1X12; 953 format->code = MEDIA_BUS_FMT_Y12_1X12;
954 else 954 else
955 format->code = V4L2_MBUS_FMT_SGRBG12_1X12; 955 format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
956 956
957 format->width = MT9P031_WINDOW_WIDTH_DEF; 957 format->width = MT9P031_WINDOW_WIDTH_DEF;
958 format->height = MT9P031_WINDOW_HEIGHT_DEF; 958 format->height = MT9P031_WINDOW_HEIGHT_DEF;
@@ -1120,9 +1120,9 @@ static int mt9p031_probe(struct i2c_client *client,
1120 mt9p031->crop.top = MT9P031_ROW_START_DEF; 1120 mt9p031->crop.top = MT9P031_ROW_START_DEF;
1121 1121
1122 if (mt9p031->model == MT9P031_MODEL_MONOCHROME) 1122 if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
1123 mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12; 1123 mt9p031->format.code = MEDIA_BUS_FMT_Y12_1X12;
1124 else 1124 else
1125 mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12; 1125 mt9p031->format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
1126 1126
1127 mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF; 1127 mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF;
1128 mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF; 1128 mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF;
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 422e068f5f1b..d9e9889b579f 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -333,7 +333,7 @@ static int mt9t001_enum_mbus_code(struct v4l2_subdev *subdev,
333 if (code->index > 0) 333 if (code->index > 0)
334 return -EINVAL; 334 return -EINVAL;
335 335
336 code->code = V4L2_MBUS_FMT_SGRBG10_1X10; 336 code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
337 return 0; 337 return 0;
338} 338}
339 339
@@ -341,7 +341,7 @@ static int mt9t001_enum_frame_size(struct v4l2_subdev *subdev,
341 struct v4l2_subdev_fh *fh, 341 struct v4l2_subdev_fh *fh,
342 struct v4l2_subdev_frame_size_enum *fse) 342 struct v4l2_subdev_frame_size_enum *fse)
343{ 343{
344 if (fse->index >= 8 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10) 344 if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
345 return -EINVAL; 345 return -EINVAL;
346 346
347 fse->min_width = (MT9T001_WINDOW_WIDTH_DEF + 1) / fse->index; 347 fse->min_width = (MT9T001_WINDOW_WIDTH_DEF + 1) / fse->index;
@@ -792,7 +792,7 @@ static int mt9t001_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
792 crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1; 792 crop->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
793 793
794 format = v4l2_subdev_get_try_format(fh, 0); 794 format = v4l2_subdev_get_try_format(fh, 0);
795 format->code = V4L2_MBUS_FMT_SGRBG10_1X10; 795 format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
796 format->width = MT9T001_WINDOW_WIDTH_DEF + 1; 796 format->width = MT9T001_WINDOW_WIDTH_DEF + 1;
797 format->height = MT9T001_WINDOW_HEIGHT_DEF + 1; 797 format->height = MT9T001_WINDOW_HEIGHT_DEF + 1;
798 format->field = V4L2_FIELD_NONE; 798 format->field = V4L2_FIELD_NONE;
@@ -917,7 +917,7 @@ static int mt9t001_probe(struct i2c_client *client,
917 mt9t001->crop.width = MT9T001_WINDOW_WIDTH_DEF + 1; 917 mt9t001->crop.width = MT9T001_WINDOW_WIDTH_DEF + 1;
918 mt9t001->crop.height = MT9T001_WINDOW_HEIGHT_DEF + 1; 918 mt9t001->crop.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
919 919
920 mt9t001->format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 920 mt9t001->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
921 mt9t001->format.width = MT9T001_WINDOW_WIDTH_DEF + 1; 921 mt9t001->format.width = MT9T001_WINDOW_WIDTH_DEF + 1;
922 mt9t001->format.height = MT9T001_WINDOW_HEIGHT_DEF + 1; 922 mt9t001->format.height = MT9T001_WINDOW_HEIGHT_DEF + 1;
923 mt9t001->format.field = V4L2_FIELD_NONE; 923 mt9t001->format.field = V4L2_FIELD_NONE;
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 47e475319a24..a10f7f8f0558 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -325,18 +325,18 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
325} 325}
326 326
327static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 327static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
328 enum v4l2_mbus_pixelcode *code) 328 u32 *code)
329{ 329{
330 if (index > 0) 330 if (index > 0)
331 return -EINVAL; 331 return -EINVAL;
332 332
333 *code = V4L2_MBUS_FMT_SGRBG8_1X8; 333 *code = MEDIA_BUS_FMT_SGRBG8_1X8;
334 return 0; 334 return 0;
335} 335}
336 336
337static int mt9v011_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) 337static int mt9v011_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
338{ 338{
339 if (fmt->code != V4L2_MBUS_FMT_SGRBG8_1X8) 339 if (fmt->code != MEDIA_BUS_FMT_SGRBG8_1X8)
340 return -EINVAL; 340 return -EINVAL;
341 341
342 v4l_bound_align_image(&fmt->width, 48, 639, 1, 342 v4l_bound_align_image(&fmt->width, 48, 639, 1,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index d044bce312e0..93687c1e4097 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -454,7 +454,7 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
454 if (code->index > 0) 454 if (code->index > 0)
455 return -EINVAL; 455 return -EINVAL;
456 456
457 code->code = V4L2_MBUS_FMT_SGRBG10_1X10; 457 code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
458 return 0; 458 return 0;
459} 459}
460 460
@@ -462,7 +462,7 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
462 struct v4l2_subdev_fh *fh, 462 struct v4l2_subdev_fh *fh,
463 struct v4l2_subdev_frame_size_enum *fse) 463 struct v4l2_subdev_frame_size_enum *fse)
464{ 464{
465 if (fse->index >= 3 || fse->code != V4L2_MBUS_FMT_SGRBG10_1X10) 465 if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
466 return -EINVAL; 466 return -EINVAL;
467 467
468 fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index); 468 fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
@@ -814,9 +814,9 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
814 format = v4l2_subdev_get_try_format(fh, 0); 814 format = v4l2_subdev_get_try_format(fh, 0);
815 815
816 if (mt9v032->model->color) 816 if (mt9v032->model->color)
817 format->code = V4L2_MBUS_FMT_SGRBG10_1X10; 817 format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
818 else 818 else
819 format->code = V4L2_MBUS_FMT_Y10_1X10; 819 format->code = MEDIA_BUS_FMT_Y10_1X10;
820 820
821 format->width = MT9V032_WINDOW_WIDTH_DEF; 821 format->width = MT9V032_WINDOW_WIDTH_DEF;
822 format->height = MT9V032_WINDOW_HEIGHT_DEF; 822 format->height = MT9V032_WINDOW_HEIGHT_DEF;
@@ -966,9 +966,9 @@ static int mt9v032_probe(struct i2c_client *client,
966 mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF; 966 mt9v032->crop.height = MT9V032_WINDOW_HEIGHT_DEF;
967 967
968 if (mt9v032->model->color) 968 if (mt9v032->model->color)
969 mt9v032->format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 969 mt9v032->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
970 else 970 else
971 mt9v032->format.code = V4L2_MBUS_FMT_Y10_1X10; 971 mt9v032->format.code = MEDIA_BUS_FMT_Y10_1X10;
972 972
973 mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF; 973 mt9v032->format.width = MT9V032_WINDOW_WIDTH_DEF;
974 mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF; 974 mt9v032->format.height = MT9V032_WINDOW_HEIGHT_DEF;
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index 7eae48766e2b..00c7b26f4823 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(debug, "Enable module debug trace. Set to 1 to enable.");
112#define REG_TERM 0xFFFF 112#define REG_TERM 0xFFFF
113 113
114struct noon010_format { 114struct noon010_format {
115 enum v4l2_mbus_pixelcode code; 115 u32 code;
116 enum v4l2_colorspace colorspace; 116 enum v4l2_colorspace colorspace;
117 u16 ispctl1_reg; 117 u16 ispctl1_reg;
118}; 118};
@@ -175,23 +175,23 @@ static const struct noon010_frmsize noon010_sizes[] = {
175/* Supported pixel formats. */ 175/* Supported pixel formats. */
176static const struct noon010_format noon010_formats[] = { 176static const struct noon010_format noon010_formats[] = {
177 { 177 {
178 .code = V4L2_MBUS_FMT_YUYV8_2X8, 178 .code = MEDIA_BUS_FMT_YUYV8_2X8,
179 .colorspace = V4L2_COLORSPACE_JPEG, 179 .colorspace = V4L2_COLORSPACE_JPEG,
180 .ispctl1_reg = 0x03, 180 .ispctl1_reg = 0x03,
181 }, { 181 }, {
182 .code = V4L2_MBUS_FMT_YVYU8_2X8, 182 .code = MEDIA_BUS_FMT_YVYU8_2X8,
183 .colorspace = V4L2_COLORSPACE_JPEG, 183 .colorspace = V4L2_COLORSPACE_JPEG,
184 .ispctl1_reg = 0x02, 184 .ispctl1_reg = 0x02,
185 }, { 185 }, {
186 .code = V4L2_MBUS_FMT_VYUY8_2X8, 186 .code = MEDIA_BUS_FMT_VYUY8_2X8,
187 .colorspace = V4L2_COLORSPACE_JPEG, 187 .colorspace = V4L2_COLORSPACE_JPEG,
188 .ispctl1_reg = 0, 188 .ispctl1_reg = 0,
189 }, { 189 }, {
190 .code = V4L2_MBUS_FMT_UYVY8_2X8, 190 .code = MEDIA_BUS_FMT_UYVY8_2X8,
191 .colorspace = V4L2_COLORSPACE_JPEG, 191 .colorspace = V4L2_COLORSPACE_JPEG,
192 .ispctl1_reg = 0x01, 192 .ispctl1_reg = 0x01,
193 }, { 193 }, {
194 .code = V4L2_MBUS_FMT_RGB565_2X8_BE, 194 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
195 .colorspace = V4L2_COLORSPACE_JPEG, 195 .colorspace = V4L2_COLORSPACE_JPEG,
196 .ispctl1_reg = 0x40, 196 .ispctl1_reg = 0x40,
197 }, 197 },
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index dd3db2458a4f..957927f7a353 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -632,31 +632,31 @@ static int ov7670_detect(struct v4l2_subdev *sd)
632 * The magic matrix numbers come from OmniVision. 632 * The magic matrix numbers come from OmniVision.
633 */ 633 */
634static struct ov7670_format_struct { 634static struct ov7670_format_struct {
635 enum v4l2_mbus_pixelcode mbus_code; 635 u32 mbus_code;
636 enum v4l2_colorspace colorspace; 636 enum v4l2_colorspace colorspace;
637 struct regval_list *regs; 637 struct regval_list *regs;
638 int cmatrix[CMATRIX_LEN]; 638 int cmatrix[CMATRIX_LEN];
639} ov7670_formats[] = { 639} ov7670_formats[] = {
640 { 640 {
641 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 641 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
642 .colorspace = V4L2_COLORSPACE_JPEG, 642 .colorspace = V4L2_COLORSPACE_JPEG,
643 .regs = ov7670_fmt_yuv422, 643 .regs = ov7670_fmt_yuv422,
644 .cmatrix = { 128, -128, 0, -34, -94, 128 }, 644 .cmatrix = { 128, -128, 0, -34, -94, 128 },
645 }, 645 },
646 { 646 {
647 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE, 647 .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
648 .colorspace = V4L2_COLORSPACE_SRGB, 648 .colorspace = V4L2_COLORSPACE_SRGB,
649 .regs = ov7670_fmt_rgb444, 649 .regs = ov7670_fmt_rgb444,
650 .cmatrix = { 179, -179, 0, -61, -176, 228 }, 650 .cmatrix = { 179, -179, 0, -61, -176, 228 },
651 }, 651 },
652 { 652 {
653 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE, 653 .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
654 .colorspace = V4L2_COLORSPACE_SRGB, 654 .colorspace = V4L2_COLORSPACE_SRGB,
655 .regs = ov7670_fmt_rgb565, 655 .regs = ov7670_fmt_rgb565,
656 .cmatrix = { 179, -179, 0, -61, -176, 228 }, 656 .cmatrix = { 179, -179, 0, -61, -176, 228 },
657 }, 657 },
658 { 658 {
659 .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8, 659 .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
660 .colorspace = V4L2_COLORSPACE_SRGB, 660 .colorspace = V4L2_COLORSPACE_SRGB,
661 .regs = ov7670_fmt_raw, 661 .regs = ov7670_fmt_raw,
662 .cmatrix = { 0, 0, 0, 0, 0, 0 }, 662 .cmatrix = { 0, 0, 0, 0, 0, 0 },
@@ -772,7 +772,7 @@ static void ov7675_get_framerate(struct v4l2_subdev *sd,
772 pll_factor = PLL_FACTOR; 772 pll_factor = PLL_FACTOR;
773 773
774 clkrc++; 774 clkrc++;
775 if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8) 775 if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8)
776 clkrc = (clkrc >> 1); 776 clkrc = (clkrc >> 1);
777 777
778 tpf->numerator = 1; 778 tpf->numerator = 1;
@@ -810,7 +810,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
810 } else { 810 } else {
811 clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) / 811 clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) /
812 (4 * tpf->denominator); 812 (4 * tpf->denominator);
813 if (info->fmt->mbus_code == V4L2_MBUS_FMT_SBGGR8_1X8) 813 if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8)
814 clkrc = (clkrc << 1); 814 clkrc = (clkrc << 1);
815 clkrc--; 815 clkrc--;
816 } 816 }
@@ -900,7 +900,7 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
900 900
901 901
902static int ov7670_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 902static int ov7670_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
903 enum v4l2_mbus_pixelcode *code) 903 u32 *code)
904{ 904{
905 if (index >= N_OV7670_FMTS) 905 if (index >= N_OV7670_FMTS)
906 return -EINVAL; 906 return -EINVAL;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 4da90c621f7e..2246bd5436ad 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -384,17 +384,17 @@ static const struct ov965x_framesize ov965x_framesizes[] = {
384}; 384};
385 385
386struct ov965x_pixfmt { 386struct ov965x_pixfmt {
387 enum v4l2_mbus_pixelcode code; 387 u32 code;
388 u32 colorspace; 388 u32 colorspace;
389 /* REG_TSLB value, only bits [3:2] may be set. */ 389 /* REG_TSLB value, only bits [3:2] may be set. */
390 u8 tslb_reg; 390 u8 tslb_reg;
391}; 391};
392 392
393static const struct ov965x_pixfmt ov965x_formats[] = { 393static const struct ov965x_pixfmt ov965x_formats[] = {
394 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00}, 394 { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 0x00},
395 { V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04}, 395 { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG, 0x04},
396 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c}, 396 { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG, 0x0c},
397 { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08}, 397 { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 0x08},
398}; 398};
399 399
400/* 400/*
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9656b6723dc6..13aed59f0f5d 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -27,8 +27,8 @@
27 27
28#define DRIVER_NAME "S5C73M3" 28#define DRIVER_NAME "S5C73M3"
29 29
30#define S5C73M3_ISP_FMT V4L2_MBUS_FMT_VYUY8_2X8 30#define S5C73M3_ISP_FMT MEDIA_BUS_FMT_VYUY8_2X8
31#define S5C73M3_JPEG_FMT V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 31#define S5C73M3_JPEG_FMT MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8
32 32
33/* Subdevs pad index definitions */ 33/* Subdevs pad index definitions */
34enum s5c73m3_pads { 34enum s5c73m3_pads {
@@ -402,7 +402,7 @@ struct s5c73m3 {
402 402
403 const struct s5c73m3_frame_size *sensor_pix_size[2]; 403 const struct s5c73m3_frame_size *sensor_pix_size[2];
404 const struct s5c73m3_frame_size *oif_pix_size[2]; 404 const struct s5c73m3_frame_size *oif_pix_size[2];
405 enum v4l2_mbus_pixelcode mbus_code; 405 u32 mbus_code;
406 406
407 const struct s5c73m3_interval *fiv; 407 const struct s5c73m3_interval *fiv;
408 408
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 1fcc76fd1bbf..d1c50c9d43ae 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -151,7 +151,7 @@ static const struct s5k4ecgx_frmsize s5k4ecgx_prev_sizes[] = {
151#define S5K4ECGX_NUM_PREV ARRAY_SIZE(s5k4ecgx_prev_sizes) 151#define S5K4ECGX_NUM_PREV ARRAY_SIZE(s5k4ecgx_prev_sizes)
152 152
153struct s5k4ecgx_pixfmt { 153struct s5k4ecgx_pixfmt {
154 enum v4l2_mbus_pixelcode code; 154 u32 code;
155 u32 colorspace; 155 u32 colorspace;
156 /* REG_TC_PCFG_Format register value */ 156 /* REG_TC_PCFG_Format register value */
157 u16 reg_p_format; 157 u16 reg_p_format;
@@ -159,7 +159,7 @@ struct s5k4ecgx_pixfmt {
159 159
160/* By default value, output from sensor will be YUV422 0-255 */ 160/* By default value, output from sensor will be YUV422 0-255 */
161static const struct s5k4ecgx_pixfmt s5k4ecgx_formats[] = { 161static const struct s5k4ecgx_pixfmt s5k4ecgx_formats[] = {
162 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 }, 162 { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
163}; 163};
164 164
165static const char * const s5k4ecgx_supply_names[] = { 165static const char * const s5k4ecgx_supply_names[] = {
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 0e461a6fd065..60a74d8d38d5 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -248,7 +248,7 @@ enum s5k5baf_gpio_id {
248#define NUM_ISP_PADS 2 248#define NUM_ISP_PADS 2
249 249
250struct s5k5baf_pixfmt { 250struct s5k5baf_pixfmt {
251 enum v4l2_mbus_pixelcode code; 251 u32 code;
252 u32 colorspace; 252 u32 colorspace;
253 /* REG_P_FMT(x) register value */ 253 /* REG_P_FMT(x) register value */
254 u16 reg_p_fmt; 254 u16 reg_p_fmt;
@@ -331,10 +331,10 @@ struct s5k5baf {
331}; 331};
332 332
333static const struct s5k5baf_pixfmt s5k5baf_formats[] = { 333static const struct s5k5baf_pixfmt s5k5baf_formats[] = {
334 { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 }, 334 { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG, 5 },
335 /* range 16-240 */ 335 /* range 16-240 */
336 { V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 }, 336 { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_REC709, 6 },
337 { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 }, 337 { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
338}; 338};
339 339
340static struct v4l2_rect s5k5baf_cis_rect = { 340static struct v4l2_rect s5k5baf_cis_rect = {
@@ -1206,7 +1206,7 @@ static int s5k5baf_enum_mbus_code(struct v4l2_subdev *sd,
1206 if (code->pad == PAD_CIS) { 1206 if (code->pad == PAD_CIS) {
1207 if (code->index > 0) 1207 if (code->index > 0)
1208 return -EINVAL; 1208 return -EINVAL;
1209 code->code = V4L2_MBUS_FMT_FIXED; 1209 code->code = MEDIA_BUS_FMT_FIXED;
1210 return 0; 1210 return 0;
1211 } 1211 }
1212 1212
@@ -1227,7 +1227,7 @@ static int s5k5baf_enum_frame_size(struct v4l2_subdev *sd,
1227 return -EINVAL; 1227 return -EINVAL;
1228 1228
1229 if (fse->pad == PAD_CIS) { 1229 if (fse->pad == PAD_CIS) {
1230 fse->code = V4L2_MBUS_FMT_FIXED; 1230 fse->code = MEDIA_BUS_FMT_FIXED;
1231 fse->min_width = S5K5BAF_CIS_WIDTH; 1231 fse->min_width = S5K5BAF_CIS_WIDTH;
1232 fse->max_width = S5K5BAF_CIS_WIDTH; 1232 fse->max_width = S5K5BAF_CIS_WIDTH;
1233 fse->min_height = S5K5BAF_CIS_HEIGHT; 1233 fse->min_height = S5K5BAF_CIS_HEIGHT;
@@ -1252,7 +1252,7 @@ static void s5k5baf_try_cis_format(struct v4l2_mbus_framefmt *mf)
1252{ 1252{
1253 mf->width = S5K5BAF_CIS_WIDTH; 1253 mf->width = S5K5BAF_CIS_WIDTH;
1254 mf->height = S5K5BAF_CIS_HEIGHT; 1254 mf->height = S5K5BAF_CIS_HEIGHT;
1255 mf->code = V4L2_MBUS_FMT_FIXED; 1255 mf->code = MEDIA_BUS_FMT_FIXED;
1256 mf->colorspace = V4L2_COLORSPACE_JPEG; 1256 mf->colorspace = V4L2_COLORSPACE_JPEG;
1257 mf->field = V4L2_FIELD_NONE; 1257 mf->field = V4L2_FIELD_NONE;
1258} 1258}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index c11a40850ed1..91b841a1b850 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -80,7 +80,7 @@ static inline struct s5k6a3 *sd_to_s5k6a3(struct v4l2_subdev *sd)
80 80
81static const struct v4l2_mbus_framefmt s5k6a3_formats[] = { 81static const struct v4l2_mbus_framefmt s5k6a3_formats[] = {
82 { 82 {
83 .code = V4L2_MBUS_FMT_SGRBG10_1X10, 83 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
84 .colorspace = V4L2_COLORSPACE_SRGB, 84 .colorspace = V4L2_COLORSPACE_SRGB,
85 .field = V4L2_FIELD_NONE, 85 .field = V4L2_FIELD_NONE,
86 } 86 }
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 629a5cdadd3a..2851581e0061 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -191,7 +191,7 @@ struct s5k6aa_regval {
191}; 191};
192 192
193struct s5k6aa_pixfmt { 193struct s5k6aa_pixfmt {
194 enum v4l2_mbus_pixelcode code; 194 u32 code;
195 u32 colorspace; 195 u32 colorspace;
196 /* REG_P_FMT(x) register value */ 196 /* REG_P_FMT(x) register value */
197 u16 reg_p_fmt; 197 u16 reg_p_fmt;
@@ -285,10 +285,10 @@ static struct s5k6aa_regval s5k6aa_analog_config[] = {
285 285
286/* TODO: Add RGB888 and Bayer format */ 286/* TODO: Add RGB888 and Bayer format */
287static const struct s5k6aa_pixfmt s5k6aa_formats[] = { 287static const struct s5k6aa_pixfmt s5k6aa_formats[] = {
288 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 }, 288 { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG, 5 },
289 /* range 16-240 */ 289 /* range 16-240 */
290 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_REC709, 6 }, 290 { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_REC709, 6 },
291 { V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 }, 291 { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_JPEG, 0 },
292}; 292};
293 293
294static const struct s5k6aa_interval s5k6aa_intervals[] = { 294static const struct s5k6aa_interval s5k6aa_intervals[] = {
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 4024ea6f1371..f14c0e6435a3 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -562,7 +562,7 @@ static int saa6752hs_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefm
562 h->video_format = SAA6752HS_VF_D1; 562 h->video_format = SAA6752HS_VF_D1;
563 f->width = v4l2_format_table[h->video_format].fmt.pix.width; 563 f->width = v4l2_format_table[h->video_format].fmt.pix.width;
564 f->height = v4l2_format_table[h->video_format].fmt.pix.height; 564 f->height = v4l2_format_table[h->video_format].fmt.pix.height;
565 f->code = V4L2_MBUS_FMT_FIXED; 565 f->code = MEDIA_BUS_FMT_FIXED;
566 f->field = V4L2_FIELD_INTERLACED; 566 f->field = V4L2_FIELD_INTERLACED;
567 f->colorspace = V4L2_COLORSPACE_SMPTE170M; 567 f->colorspace = V4L2_COLORSPACE_SMPTE170M;
568 return 0; 568 return 0;
@@ -572,7 +572,7 @@ static int saa6752hs_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_frame
572{ 572{
573 int dist_352, dist_480, dist_720; 573 int dist_352, dist_480, dist_720;
574 574
575 f->code = V4L2_MBUS_FMT_FIXED; 575 f->code = MEDIA_BUS_FMT_FIXED;
576 576
577 dist_352 = abs(f->width - 352); 577 dist_352 = abs(f->width - 352);
578 dist_480 = abs(f->width - 480); 578 dist_480 = abs(f->width - 480);
@@ -599,7 +599,7 @@ static int saa6752hs_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefm
599{ 599{
600 struct saa6752hs_state *h = to_state(sd); 600 struct saa6752hs_state *h = to_state(sd);
601 601
602 if (f->code != V4L2_MBUS_FMT_FIXED) 602 if (f->code != MEDIA_BUS_FMT_FIXED)
603 return -EINVAL; 603 return -EINVAL;
604 604
605 /* 605 /*
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 35a44648150e..7147c8b68fac 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1172,7 +1172,7 @@ static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
1172 1172
1173static int saa711x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) 1173static int saa711x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
1174{ 1174{
1175 if (fmt->code != V4L2_MBUS_FMT_FIXED) 1175 if (fmt->code != MEDIA_BUS_FMT_FIXED)
1176 return -EINVAL; 1176 return -EINVAL;
1177 fmt->field = V4L2_FIELD_INTERLACED; 1177 fmt->field = V4L2_FIELD_INTERLACED;
1178 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; 1178 fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 6922a9f9a5cd..0d0f9a917cd3 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -998,7 +998,7 @@ static int saa717x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
998 998
999 v4l2_dbg(1, debug, sd, "decoder set size\n"); 999 v4l2_dbg(1, debug, sd, "decoder set size\n");
1000 1000
1001 if (fmt->code != V4L2_MBUS_FMT_FIXED) 1001 if (fmt->code != MEDIA_BUS_FMT_FIXED)
1002 return -EINVAL; 1002 return -EINVAL;
1003 1003
1004 /* FIXME need better bounds checking here */ 1004 /* FIXME need better bounds checking here */
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index 2335529b195c..e40d9027df3d 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -65,26 +65,89 @@ static int bounds_check(struct device *dev, uint32_t val,
65 65
66static void print_pll(struct device *dev, struct smiapp_pll *pll) 66static void print_pll(struct device *dev, struct smiapp_pll *pll)
67{ 67{
68 dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div); 68 dev_dbg(dev, "pre_pll_clk_div\t%u\n", pll->pre_pll_clk_div);
69 dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier); 69 dev_dbg(dev, "pll_multiplier \t%u\n", pll->pll_multiplier);
70 if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { 70 if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
71 dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div); 71 dev_dbg(dev, "op_sys_clk_div \t%u\n", pll->op.sys_clk_div);
72 dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div); 72 dev_dbg(dev, "op_pix_clk_div \t%u\n", pll->op.pix_clk_div);
73 } 73 }
74 dev_dbg(dev, "vt_sys_clk_div \t%d\n", pll->vt_sys_clk_div); 74 dev_dbg(dev, "vt_sys_clk_div \t%u\n", pll->vt.sys_clk_div);
75 dev_dbg(dev, "vt_pix_clk_div \t%d\n", pll->vt_pix_clk_div); 75 dev_dbg(dev, "vt_pix_clk_div \t%u\n", pll->vt.pix_clk_div);
76 76
77 dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz); 77 dev_dbg(dev, "ext_clk_freq_hz \t%u\n", pll->ext_clk_freq_hz);
78 dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz); 78 dev_dbg(dev, "pll_ip_clk_freq_hz \t%u\n", pll->pll_ip_clk_freq_hz);
79 dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz); 79 dev_dbg(dev, "pll_op_clk_freq_hz \t%u\n", pll->pll_op_clk_freq_hz);
80 if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { 80 if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
81 dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n", 81 dev_dbg(dev, "op_sys_clk_freq_hz \t%u\n",
82 pll->op_sys_clk_freq_hz); 82 pll->op.sys_clk_freq_hz);
83 dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n", 83 dev_dbg(dev, "op_pix_clk_freq_hz \t%u\n",
84 pll->op_pix_clk_freq_hz); 84 pll->op.pix_clk_freq_hz);
85 } 85 }
86 dev_dbg(dev, "vt_sys_clk_freq_hz \t%d\n", pll->vt_sys_clk_freq_hz); 86 dev_dbg(dev, "vt_sys_clk_freq_hz \t%u\n", pll->vt.sys_clk_freq_hz);
87 dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz); 87 dev_dbg(dev, "vt_pix_clk_freq_hz \t%u\n", pll->vt.pix_clk_freq_hz);
88}
89
90static int check_all_bounds(struct device *dev,
91 const struct smiapp_pll_limits *limits,
92 const struct smiapp_pll_branch_limits *op_limits,
93 struct smiapp_pll *pll,
94 struct smiapp_pll_branch *op_pll)
95{
96 int rval;
97
98 rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
99 limits->min_pll_ip_freq_hz,
100 limits->max_pll_ip_freq_hz,
101 "pll_ip_clk_freq_hz");
102 if (!rval)
103 rval = bounds_check(
104 dev, pll->pll_multiplier,
105 limits->min_pll_multiplier, limits->max_pll_multiplier,
106 "pll_multiplier");
107 if (!rval)
108 rval = bounds_check(
109 dev, pll->pll_op_clk_freq_hz,
110 limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
111 "pll_op_clk_freq_hz");
112 if (!rval)
113 rval = bounds_check(
114 dev, op_pll->sys_clk_div,
115 op_limits->min_sys_clk_div, op_limits->max_sys_clk_div,
116 "op_sys_clk_div");
117 if (!rval)
118 rval = bounds_check(
119 dev, op_pll->sys_clk_freq_hz,
120 op_limits->min_sys_clk_freq_hz,
121 op_limits->max_sys_clk_freq_hz,
122 "op_sys_clk_freq_hz");
123 if (!rval)
124 rval = bounds_check(
125 dev, op_pll->pix_clk_freq_hz,
126 op_limits->min_pix_clk_freq_hz,
127 op_limits->max_pix_clk_freq_hz,
128 "op_pix_clk_freq_hz");
129
130 /*
131 * If there are no OP clocks, the VT clocks are contained in
132 * the OP clock struct.
133 */
134 if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)
135 return rval;
136
137 if (!rval)
138 rval = bounds_check(
139 dev, pll->vt.sys_clk_freq_hz,
140 limits->vt.min_sys_clk_freq_hz,
141 limits->vt.max_sys_clk_freq_hz,
142 "vt_sys_clk_freq_hz");
143 if (!rval)
144 rval = bounds_check(
145 dev, pll->vt.pix_clk_freq_hz,
146 limits->vt.min_pix_clk_freq_hz,
147 limits->vt.max_pix_clk_freq_hz,
148 "vt_pix_clk_freq_hz");
149
150 return rval;
88} 151}
89 152
90/* 153/*
@@ -98,10 +161,11 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
98 * 161 *
99 * @return Zero on success, error code on error. 162 * @return Zero on success, error code on error.
100 */ 163 */
101static int __smiapp_pll_calculate(struct device *dev, 164static int __smiapp_pll_calculate(
102 const struct smiapp_pll_limits *limits, 165 struct device *dev, const struct smiapp_pll_limits *limits,
103 struct smiapp_pll *pll, uint32_t mul, 166 const struct smiapp_pll_branch_limits *op_limits,
104 uint32_t div, uint32_t lane_op_clock_ratio) 167 struct smiapp_pll *pll, struct smiapp_pll_branch *op_pll, uint32_t mul,
168 uint32_t div, uint32_t lane_op_clock_ratio)
105{ 169{
106 uint32_t sys_div; 170 uint32_t sys_div;
107 uint32_t best_pix_div = INT_MAX >> 1; 171 uint32_t best_pix_div = INT_MAX >> 1;
@@ -117,17 +181,16 @@ static int __smiapp_pll_calculate(struct device *dev,
117 uint32_t min_vt_div, max_vt_div, vt_div; 181 uint32_t min_vt_div, max_vt_div, vt_div;
118 uint32_t min_sys_div, max_sys_div; 182 uint32_t min_sys_div, max_sys_div;
119 unsigned int i; 183 unsigned int i;
120 int rval;
121 184
122 /* 185 /*
123 * Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be 186 * Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
124 * too high. 187 * too high.
125 */ 188 */
126 dev_dbg(dev, "pre_pll_clk_div %d\n", pll->pre_pll_clk_div); 189 dev_dbg(dev, "pre_pll_clk_div %u\n", pll->pre_pll_clk_div);
127 190
128 /* Don't go above max pll multiplier. */ 191 /* Don't go above max pll multiplier. */
129 more_mul_max = limits->max_pll_multiplier / mul; 192 more_mul_max = limits->max_pll_multiplier / mul;
130 dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %d\n", 193 dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %u\n",
131 more_mul_max); 194 more_mul_max);
132 /* Don't go above max pll op frequency. */ 195 /* Don't go above max pll op frequency. */
133 more_mul_max = 196 more_mul_max =
@@ -135,30 +198,30 @@ static int __smiapp_pll_calculate(struct device *dev,
135 more_mul_max, 198 more_mul_max,
136 limits->max_pll_op_freq_hz 199 limits->max_pll_op_freq_hz
137 / (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul)); 200 / (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
138 dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %d\n", 201 dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %u\n",
139 more_mul_max); 202 more_mul_max);
140 /* Don't go above the division capability of op sys clock divider. */ 203 /* Don't go above the division capability of op sys clock divider. */
141 more_mul_max = min(more_mul_max, 204 more_mul_max = min(more_mul_max,
142 limits->op.max_sys_clk_div * pll->pre_pll_clk_div 205 op_limits->max_sys_clk_div * pll->pre_pll_clk_div
143 / div); 206 / div);
144 dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n", 207 dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %u\n",
145 more_mul_max); 208 more_mul_max);
146 /* Ensure we won't go above min_pll_multiplier. */ 209 /* Ensure we won't go above min_pll_multiplier. */
147 more_mul_max = min(more_mul_max, 210 more_mul_max = min(more_mul_max,
148 DIV_ROUND_UP(limits->max_pll_multiplier, mul)); 211 DIV_ROUND_UP(limits->max_pll_multiplier, mul));
149 dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %d\n", 212 dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %u\n",
150 more_mul_max); 213 more_mul_max);
151 214
152 /* Ensure we won't go below min_pll_op_freq_hz. */ 215 /* Ensure we won't go below min_pll_op_freq_hz. */
153 more_mul_min = DIV_ROUND_UP(limits->min_pll_op_freq_hz, 216 more_mul_min = DIV_ROUND_UP(limits->min_pll_op_freq_hz,
154 pll->ext_clk_freq_hz / pll->pre_pll_clk_div 217 pll->ext_clk_freq_hz / pll->pre_pll_clk_div
155 * mul); 218 * mul);
156 dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %d\n", 219 dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %u\n",
157 more_mul_min); 220 more_mul_min);
158 /* Ensure we won't go below min_pll_multiplier. */ 221 /* Ensure we won't go below min_pll_multiplier. */
159 more_mul_min = max(more_mul_min, 222 more_mul_min = max(more_mul_min,
160 DIV_ROUND_UP(limits->min_pll_multiplier, mul)); 223 DIV_ROUND_UP(limits->min_pll_multiplier, mul));
161 dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %d\n", 224 dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %u\n",
162 more_mul_min); 225 more_mul_min);
163 226
164 if (more_mul_min > more_mul_max) { 227 if (more_mul_min > more_mul_max) {
@@ -168,23 +231,23 @@ static int __smiapp_pll_calculate(struct device *dev,
168 } 231 }
169 232
170 more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div; 233 more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
171 dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor); 234 dev_dbg(dev, "more_mul_factor: %u\n", more_mul_factor);
172 more_mul_factor = lcm(more_mul_factor, limits->op.min_sys_clk_div); 235 more_mul_factor = lcm(more_mul_factor, op_limits->min_sys_clk_div);
173 dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n", 236 dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
174 more_mul_factor); 237 more_mul_factor);
175 i = roundup(more_mul_min, more_mul_factor); 238 i = roundup(more_mul_min, more_mul_factor);
176 if (!is_one_or_even(i)) 239 if (!is_one_or_even(i))
177 i <<= 1; 240 i <<= 1;
178 241
179 dev_dbg(dev, "final more_mul: %d\n", i); 242 dev_dbg(dev, "final more_mul: %u\n", i);
180 if (i > more_mul_max) { 243 if (i > more_mul_max) {
181 dev_dbg(dev, "final more_mul is bad, max %d\n", more_mul_max); 244 dev_dbg(dev, "final more_mul is bad, max %u\n", more_mul_max);
182 return -EINVAL; 245 return -EINVAL;
183 } 246 }
184 247
185 pll->pll_multiplier = mul * i; 248 pll->pll_multiplier = mul * i;
186 pll->op_sys_clk_div = div * i / pll->pre_pll_clk_div; 249 op_pll->sys_clk_div = div * i / pll->pre_pll_clk_div;
187 dev_dbg(dev, "op_sys_clk_div: %d\n", pll->op_sys_clk_div); 250 dev_dbg(dev, "op_sys_clk_div: %u\n", op_pll->sys_clk_div);
188 251
189 pll->pll_ip_clk_freq_hz = pll->ext_clk_freq_hz 252 pll->pll_ip_clk_freq_hz = pll->ext_clk_freq_hz
190 / pll->pre_pll_clk_div; 253 / pll->pre_pll_clk_div;
@@ -193,14 +256,19 @@ static int __smiapp_pll_calculate(struct device *dev,
193 * pll->pll_multiplier; 256 * pll->pll_multiplier;
194 257
195 /* Derive pll_op_clk_freq_hz. */ 258 /* Derive pll_op_clk_freq_hz. */
196 pll->op_sys_clk_freq_hz = 259 op_pll->sys_clk_freq_hz =
197 pll->pll_op_clk_freq_hz / pll->op_sys_clk_div; 260 pll->pll_op_clk_freq_hz / op_pll->sys_clk_div;
261
262 op_pll->pix_clk_div = pll->bits_per_pixel;
263 dev_dbg(dev, "op_pix_clk_div: %u\n", op_pll->pix_clk_div);
198 264
199 pll->op_pix_clk_div = pll->bits_per_pixel; 265 op_pll->pix_clk_freq_hz =
200 dev_dbg(dev, "op_pix_clk_div: %d\n", pll->op_pix_clk_div); 266 op_pll->sys_clk_freq_hz / op_pll->pix_clk_div;
201 267
202 pll->op_pix_clk_freq_hz = 268 if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
203 pll->op_sys_clk_freq_hz / pll->op_pix_clk_div; 269 /* No OP clocks --- VT clocks are used instead. */
270 goto out_skip_vt_calc;
271 }
204 272
205 /* 273 /*
206 * Some sensors perform analogue binning and some do this 274 * Some sensors perform analogue binning and some do this
@@ -214,7 +282,7 @@ static int __smiapp_pll_calculate(struct device *dev,
214 vt_op_binning_div = pll->binning_horizontal; 282 vt_op_binning_div = pll->binning_horizontal;
215 else 283 else
216 vt_op_binning_div = 1; 284 vt_op_binning_div = 1;
217 dev_dbg(dev, "vt_op_binning_div: %d\n", vt_op_binning_div); 285 dev_dbg(dev, "vt_op_binning_div: %u\n", vt_op_binning_div);
218 286
219 /* 287 /*
220 * Profile 2 supports vt_pix_clk_div E [4, 10] 288 * Profile 2 supports vt_pix_clk_div E [4, 10]
@@ -227,30 +295,30 @@ static int __smiapp_pll_calculate(struct device *dev,
227 * 295 *
228 * Find absolute limits for the factor of vt divider. 296 * Find absolute limits for the factor of vt divider.
229 */ 297 */
230 dev_dbg(dev, "scale_m: %d\n", pll->scale_m); 298 dev_dbg(dev, "scale_m: %u\n", pll->scale_m);
231 min_vt_div = DIV_ROUND_UP(pll->op_pix_clk_div * pll->op_sys_clk_div 299 min_vt_div = DIV_ROUND_UP(op_pll->pix_clk_div * op_pll->sys_clk_div
232 * pll->scale_n, 300 * pll->scale_n,
233 lane_op_clock_ratio * vt_op_binning_div 301 lane_op_clock_ratio * vt_op_binning_div
234 * pll->scale_m); 302 * pll->scale_m);
235 303
236 /* Find smallest and biggest allowed vt divisor. */ 304 /* Find smallest and biggest allowed vt divisor. */
237 dev_dbg(dev, "min_vt_div: %d\n", min_vt_div); 305 dev_dbg(dev, "min_vt_div: %u\n", min_vt_div);
238 min_vt_div = max(min_vt_div, 306 min_vt_div = max(min_vt_div,
239 DIV_ROUND_UP(pll->pll_op_clk_freq_hz, 307 DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
240 limits->vt.max_pix_clk_freq_hz)); 308 limits->vt.max_pix_clk_freq_hz));
241 dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n", 309 dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %u\n",
242 min_vt_div); 310 min_vt_div);
243 min_vt_div = max_t(uint32_t, min_vt_div, 311 min_vt_div = max_t(uint32_t, min_vt_div,
244 limits->vt.min_pix_clk_div 312 limits->vt.min_pix_clk_div
245 * limits->vt.min_sys_clk_div); 313 * limits->vt.min_sys_clk_div);
246 dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div); 314 dev_dbg(dev, "min_vt_div: min_vt_clk_div: %u\n", min_vt_div);
247 315
248 max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div; 316 max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div;
249 dev_dbg(dev, "max_vt_div: %d\n", max_vt_div); 317 dev_dbg(dev, "max_vt_div: %u\n", max_vt_div);
250 max_vt_div = min(max_vt_div, 318 max_vt_div = min(max_vt_div,
251 DIV_ROUND_UP(pll->pll_op_clk_freq_hz, 319 DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
252 limits->vt.min_pix_clk_freq_hz)); 320 limits->vt.min_pix_clk_freq_hz));
253 dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n", 321 dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %u\n",
254 max_vt_div); 322 max_vt_div);
255 323
256 /* 324 /*
@@ -258,28 +326,28 @@ static int __smiapp_pll_calculate(struct device *dev,
258 * with all values of pix_clk_div. 326 * with all values of pix_clk_div.
259 */ 327 */
260 min_sys_div = limits->vt.min_sys_clk_div; 328 min_sys_div = limits->vt.min_sys_clk_div;
261 dev_dbg(dev, "min_sys_div: %d\n", min_sys_div); 329 dev_dbg(dev, "min_sys_div: %u\n", min_sys_div);
262 min_sys_div = max(min_sys_div, 330 min_sys_div = max(min_sys_div,
263 DIV_ROUND_UP(min_vt_div, 331 DIV_ROUND_UP(min_vt_div,
264 limits->vt.max_pix_clk_div)); 332 limits->vt.max_pix_clk_div));
265 dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div); 333 dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %u\n", min_sys_div);
266 min_sys_div = max(min_sys_div, 334 min_sys_div = max(min_sys_div,
267 pll->pll_op_clk_freq_hz 335 pll->pll_op_clk_freq_hz
268 / limits->vt.max_sys_clk_freq_hz); 336 / limits->vt.max_sys_clk_freq_hz);
269 dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div); 337 dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %u\n", min_sys_div);
270 min_sys_div = clk_div_even_up(min_sys_div); 338 min_sys_div = clk_div_even_up(min_sys_div);
271 dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div); 339 dev_dbg(dev, "min_sys_div: one or even: %u\n", min_sys_div);
272 340
273 max_sys_div = limits->vt.max_sys_clk_div; 341 max_sys_div = limits->vt.max_sys_clk_div;
274 dev_dbg(dev, "max_sys_div: %d\n", max_sys_div); 342 dev_dbg(dev, "max_sys_div: %u\n", max_sys_div);
275 max_sys_div = min(max_sys_div, 343 max_sys_div = min(max_sys_div,
276 DIV_ROUND_UP(max_vt_div, 344 DIV_ROUND_UP(max_vt_div,
277 limits->vt.min_pix_clk_div)); 345 limits->vt.min_pix_clk_div));
278 dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div); 346 dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %u\n", max_sys_div);
279 max_sys_div = min(max_sys_div, 347 max_sys_div = min(max_sys_div,
280 DIV_ROUND_UP(pll->pll_op_clk_freq_hz, 348 DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
281 limits->vt.min_pix_clk_freq_hz)); 349 limits->vt.min_pix_clk_freq_hz));
282 dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div); 350 dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %u\n", max_sys_div);
283 351
284 /* 352 /*
285 * Find pix_div such that a legal pix_div * sys_div results 353 * Find pix_div such that a legal pix_div * sys_div results
@@ -296,7 +364,7 @@ static int __smiapp_pll_calculate(struct device *dev,
296 if (pix_div < limits->vt.min_pix_clk_div 364 if (pix_div < limits->vt.min_pix_clk_div
297 || pix_div > limits->vt.max_pix_clk_div) { 365 || pix_div > limits->vt.max_pix_clk_div) {
298 dev_dbg(dev, 366 dev_dbg(dev,
299 "pix_div %d too small or too big (%d--%d)\n", 367 "pix_div %u too small or too big (%u--%u)\n",
300 pix_div, 368 pix_div,
301 limits->vt.min_pix_clk_div, 369 limits->vt.min_pix_clk_div,
302 limits->vt.max_pix_clk_div); 370 limits->vt.max_pix_clk_div);
@@ -312,73 +380,28 @@ static int __smiapp_pll_calculate(struct device *dev,
312 break; 380 break;
313 } 381 }
314 382
315 pll->vt_sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div); 383 pll->vt.sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div);
316 pll->vt_pix_clk_div = best_pix_div; 384 pll->vt.pix_clk_div = best_pix_div;
317 385
318 pll->vt_sys_clk_freq_hz = 386 pll->vt.sys_clk_freq_hz =
319 pll->pll_op_clk_freq_hz / pll->vt_sys_clk_div; 387 pll->pll_op_clk_freq_hz / pll->vt.sys_clk_div;
320 pll->vt_pix_clk_freq_hz = 388 pll->vt.pix_clk_freq_hz =
321 pll->vt_sys_clk_freq_hz / pll->vt_pix_clk_div; 389 pll->vt.sys_clk_freq_hz / pll->vt.pix_clk_div;
322 390
391out_skip_vt_calc:
323 pll->pixel_rate_csi = 392 pll->pixel_rate_csi =
324 pll->op_pix_clk_freq_hz * lane_op_clock_ratio; 393 op_pll->pix_clk_freq_hz * lane_op_clock_ratio;
394 pll->pixel_rate_pixel_array = pll->vt.pix_clk_freq_hz;
325 395
326 rval = bounds_check(dev, pll->pll_ip_clk_freq_hz, 396 return check_all_bounds(dev, limits, op_limits, pll, op_pll);
327 limits->min_pll_ip_freq_hz,
328 limits->max_pll_ip_freq_hz,
329 "pll_ip_clk_freq_hz");
330 if (!rval)
331 rval = bounds_check(
332 dev, pll->pll_multiplier,
333 limits->min_pll_multiplier, limits->max_pll_multiplier,
334 "pll_multiplier");
335 if (!rval)
336 rval = bounds_check(
337 dev, pll->pll_op_clk_freq_hz,
338 limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
339 "pll_op_clk_freq_hz");
340 if (!rval)
341 rval = bounds_check(
342 dev, pll->op_sys_clk_div,
343 limits->op.min_sys_clk_div, limits->op.max_sys_clk_div,
344 "op_sys_clk_div");
345 if (!rval)
346 rval = bounds_check(
347 dev, pll->op_pix_clk_div,
348 limits->op.min_pix_clk_div, limits->op.max_pix_clk_div,
349 "op_pix_clk_div");
350 if (!rval)
351 rval = bounds_check(
352 dev, pll->op_sys_clk_freq_hz,
353 limits->op.min_sys_clk_freq_hz,
354 limits->op.max_sys_clk_freq_hz,
355 "op_sys_clk_freq_hz");
356 if (!rval)
357 rval = bounds_check(
358 dev, pll->op_pix_clk_freq_hz,
359 limits->op.min_pix_clk_freq_hz,
360 limits->op.max_pix_clk_freq_hz,
361 "op_pix_clk_freq_hz");
362 if (!rval)
363 rval = bounds_check(
364 dev, pll->vt_sys_clk_freq_hz,
365 limits->vt.min_sys_clk_freq_hz,
366 limits->vt.max_sys_clk_freq_hz,
367 "vt_sys_clk_freq_hz");
368 if (!rval)
369 rval = bounds_check(
370 dev, pll->vt_pix_clk_freq_hz,
371 limits->vt.min_pix_clk_freq_hz,
372 limits->vt.max_pix_clk_freq_hz,
373 "vt_pix_clk_freq_hz");
374
375 return rval;
376} 397}
377 398
378int smiapp_pll_calculate(struct device *dev, 399int smiapp_pll_calculate(struct device *dev,
379 const struct smiapp_pll_limits *limits, 400 const struct smiapp_pll_limits *limits,
380 struct smiapp_pll *pll) 401 struct smiapp_pll *pll)
381{ 402{
403 const struct smiapp_pll_branch_limits *op_limits = &limits->op;
404 struct smiapp_pll_branch *op_pll = &pll->op;
382 uint16_t min_pre_pll_clk_div; 405 uint16_t min_pre_pll_clk_div;
383 uint16_t max_pre_pll_clk_div; 406 uint16_t max_pre_pll_clk_div;
384 uint32_t lane_op_clock_ratio; 407 uint32_t lane_op_clock_ratio;
@@ -386,13 +409,23 @@ int smiapp_pll_calculate(struct device *dev,
386 unsigned int i; 409 unsigned int i;
387 int rval = -EINVAL; 410 int rval = -EINVAL;
388 411
412 if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
413 /*
414 * If there's no OP PLL at all, use the VT values
415 * instead. The OP values are ignored for the rest of
416 * the PLL calculation.
417 */
418 op_limits = &limits->vt;
419 op_pll = &pll->vt;
420 }
421
389 if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE) 422 if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
390 lane_op_clock_ratio = pll->csi2.lanes; 423 lane_op_clock_ratio = pll->csi2.lanes;
391 else 424 else
392 lane_op_clock_ratio = 1; 425 lane_op_clock_ratio = 1;
393 dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio); 426 dev_dbg(dev, "lane_op_clock_ratio: %u\n", lane_op_clock_ratio);
394 427
395 dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal, 428 dev_dbg(dev, "binning: %ux%u\n", pll->binning_horizontal,
396 pll->binning_vertical); 429 pll->binning_vertical);
397 430
398 switch (pll->bus_type) { 431 switch (pll->bus_type) {
@@ -411,7 +444,7 @@ int smiapp_pll_calculate(struct device *dev,
411 } 444 }
412 445
413 /* Figure out limits for pre-pll divider based on extclk */ 446 /* Figure out limits for pre-pll divider based on extclk */
414 dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n", 447 dev_dbg(dev, "min / max pre_pll_clk_div: %u / %u\n",
415 limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div); 448 limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
416 max_pre_pll_clk_div = 449 max_pre_pll_clk_div =
417 min_t(uint16_t, limits->max_pre_pll_clk_div, 450 min_t(uint16_t, limits->max_pre_pll_clk_div,
@@ -422,26 +455,27 @@ int smiapp_pll_calculate(struct device *dev,
422 clk_div_even_up( 455 clk_div_even_up(
423 DIV_ROUND_UP(pll->ext_clk_freq_hz, 456 DIV_ROUND_UP(pll->ext_clk_freq_hz,
424 limits->max_pll_ip_freq_hz))); 457 limits->max_pll_ip_freq_hz)));
425 dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n", 458 dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %u / %u\n",
426 min_pre_pll_clk_div, max_pre_pll_clk_div); 459 min_pre_pll_clk_div, max_pre_pll_clk_div);
427 460
428 i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz); 461 i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
429 mul = div_u64(pll->pll_op_clk_freq_hz, i); 462 mul = div_u64(pll->pll_op_clk_freq_hz, i);
430 div = pll->ext_clk_freq_hz / i; 463 div = pll->ext_clk_freq_hz / i;
431 dev_dbg(dev, "mul %d / div %d\n", mul, div); 464 dev_dbg(dev, "mul %u / div %u\n", mul, div);
432 465
433 min_pre_pll_clk_div = 466 min_pre_pll_clk_div =
434 max_t(uint16_t, min_pre_pll_clk_div, 467 max_t(uint16_t, min_pre_pll_clk_div,
435 clk_div_even_up( 468 clk_div_even_up(
436 DIV_ROUND_UP(mul * pll->ext_clk_freq_hz, 469 DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
437 limits->max_pll_op_freq_hz))); 470 limits->max_pll_op_freq_hz)));
438 dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n", 471 dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %u / %u\n",
439 min_pre_pll_clk_div, max_pre_pll_clk_div); 472 min_pre_pll_clk_div, max_pre_pll_clk_div);
440 473
441 for (pll->pre_pll_clk_div = min_pre_pll_clk_div; 474 for (pll->pre_pll_clk_div = min_pre_pll_clk_div;
442 pll->pre_pll_clk_div <= max_pre_pll_clk_div; 475 pll->pre_pll_clk_div <= max_pre_pll_clk_div;
443 pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) { 476 pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) {
444 rval = __smiapp_pll_calculate(dev, limits, pll, mul, div, 477 rval = __smiapp_pll_calculate(dev, limits, op_limits, pll,
478 op_pll, mul, div,
445 lane_op_clock_ratio); 479 lane_op_clock_ratio);
446 if (rval) 480 if (rval)
447 continue; 481 continue;
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index 5ce2b61da3c5..e8f035a50c76 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -35,6 +35,13 @@
35#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0) 35#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
36#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1) 36#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
37 37
38struct smiapp_pll_branch {
39 uint16_t sys_clk_div;
40 uint16_t pix_clk_div;
41 uint32_t sys_clk_freq_hz;
42 uint32_t pix_clk_freq_hz;
43};
44
38struct smiapp_pll { 45struct smiapp_pll {
39 /* input values */ 46 /* input values */
40 uint8_t bus_type; 47 uint8_t bus_type;
@@ -53,24 +60,18 @@ struct smiapp_pll {
53 uint8_t scale_n; 60 uint8_t scale_n;
54 uint8_t bits_per_pixel; 61 uint8_t bits_per_pixel;
55 uint32_t link_freq; 62 uint32_t link_freq;
63 uint32_t ext_clk_freq_hz;
56 64
57 /* output values */ 65 /* output values */
58 uint16_t pre_pll_clk_div; 66 uint16_t pre_pll_clk_div;
59 uint16_t pll_multiplier; 67 uint16_t pll_multiplier;
60 uint16_t op_sys_clk_div;
61 uint16_t op_pix_clk_div;
62 uint16_t vt_sys_clk_div;
63 uint16_t vt_pix_clk_div;
64
65 uint32_t ext_clk_freq_hz;
66 uint32_t pll_ip_clk_freq_hz; 68 uint32_t pll_ip_clk_freq_hz;
67 uint32_t pll_op_clk_freq_hz; 69 uint32_t pll_op_clk_freq_hz;
68 uint32_t op_sys_clk_freq_hz; 70 struct smiapp_pll_branch vt;
69 uint32_t op_pix_clk_freq_hz; 71 struct smiapp_pll_branch op;
70 uint32_t vt_sys_clk_freq_hz;
71 uint32_t vt_pix_clk_freq_hz;
72 72
73 uint32_t pixel_rate_csi; 73 uint32_t pixel_rate_csi;
74 uint32_t pixel_rate_pixel_array;
74}; 75};
75 76
76struct smiapp_pll_branch_limits { 77struct smiapp_pll_branch_limits {
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 932ed9be9ff3..0df5070e73c7 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -205,12 +205,12 @@ static int smiapp_pll_configure(struct smiapp_sensor *sensor)
205 int rval; 205 int rval;
206 206
207 rval = smiapp_write( 207 rval = smiapp_write(
208 sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt_pix_clk_div); 208 sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt.pix_clk_div);
209 if (rval < 0) 209 if (rval < 0)
210 return rval; 210 return rval;
211 211
212 rval = smiapp_write( 212 rval = smiapp_write(
213 sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt_sys_clk_div); 213 sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt.sys_clk_div);
214 if (rval < 0) 214 if (rval < 0)
215 return rval; 215 return rval;
216 216
@@ -227,20 +227,21 @@ static int smiapp_pll_configure(struct smiapp_sensor *sensor)
227 /* Lane op clock ratio does not apply here. */ 227 /* Lane op clock ratio does not apply here. */
228 rval = smiapp_write( 228 rval = smiapp_write(
229 sensor, SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS, 229 sensor, SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS,
230 DIV_ROUND_UP(pll->op_sys_clk_freq_hz, 1000000 / 256 / 256)); 230 DIV_ROUND_UP(pll->op.sys_clk_freq_hz, 1000000 / 256 / 256));
231 if (rval < 0 || sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) 231 if (rval < 0 || sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
232 return rval; 232 return rval;
233 233
234 rval = smiapp_write( 234 rval = smiapp_write(
235 sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op_pix_clk_div); 235 sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op.pix_clk_div);
236 if (rval < 0) 236 if (rval < 0)
237 return rval; 237 return rval;
238 238
239 return smiapp_write( 239 return smiapp_write(
240 sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op_sys_clk_div); 240 sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op.sys_clk_div);
241} 241}
242 242
243static int smiapp_pll_update(struct smiapp_sensor *sensor) 243static int smiapp_pll_try(struct smiapp_sensor *sensor,
244 struct smiapp_pll *pll)
244{ 245{
245 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); 246 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
246 struct smiapp_pll_limits lim = { 247 struct smiapp_pll_limits lim = {
@@ -274,19 +275,15 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
274 .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN], 275 .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
275 .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK], 276 .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
276 }; 277 };
278
279 return smiapp_pll_calculate(&client->dev, &lim, pll);
280}
281
282static int smiapp_pll_update(struct smiapp_sensor *sensor)
283{
277 struct smiapp_pll *pll = &sensor->pll; 284 struct smiapp_pll *pll = &sensor->pll;
278 int rval; 285 int rval;
279 286
280 if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
281 /*
282 * Fill in operational clock divisors limits from the
283 * video timing ones. On profile 0 sensors the
284 * requirements regarding them are essentially the
285 * same as on VT ones.
286 */
287 lim.op = lim.vt;
288 }
289
290 pll->binning_horizontal = sensor->binning_horizontal; 287 pll->binning_horizontal = sensor->binning_horizontal;
291 pll->binning_vertical = sensor->binning_vertical; 288 pll->binning_vertical = sensor->binning_vertical;
292 pll->link_freq = 289 pll->link_freq =
@@ -294,12 +291,12 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
294 pll->scale_m = sensor->scale_m; 291 pll->scale_m = sensor->scale_m;
295 pll->bits_per_pixel = sensor->csi_format->compressed; 292 pll->bits_per_pixel = sensor->csi_format->compressed;
296 293
297 rval = smiapp_pll_calculate(&client->dev, &lim, pll); 294 rval = smiapp_pll_try(sensor, pll);
298 if (rval < 0) 295 if (rval < 0)
299 return rval; 296 return rval;
300 297
301 __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_parray, 298 __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_parray,
302 pll->vt_pix_clk_freq_hz); 299 pll->pixel_rate_pixel_array);
303 __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, pll->pixel_rate_csi); 300 __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, pll->pixel_rate_csi);
304 301
305 return 0; 302 return 0;
@@ -333,22 +330,22 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
333 * orders must be defined. 330 * orders must be defined.
334 */ 331 */
335static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = { 332static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
336 { V4L2_MBUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, }, 333 { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
337 { V4L2_MBUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, }, 334 { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
338 { V4L2_MBUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, }, 335 { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
339 { V4L2_MBUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, }, 336 { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, },
340 { V4L2_MBUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, }, 337 { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, },
341 { V4L2_MBUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, }, 338 { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, },
342 { V4L2_MBUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, }, 339 { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, },
343 { V4L2_MBUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, }, 340 { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, },
344 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, }, 341 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, },
345 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, }, 342 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, },
346 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, }, 343 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, },
347 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, }, 344 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, },
348 { V4L2_MBUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, }, 345 { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, },
349 { V4L2_MBUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, }, 346 { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, },
350 { V4L2_MBUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, }, 347 { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, },
351 { V4L2_MBUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, }, 348 { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
352}; 349};
353 350
354const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" }; 351const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
@@ -526,6 +523,8 @@ static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
526static int smiapp_init_controls(struct smiapp_sensor *sensor) 523static int smiapp_init_controls(struct smiapp_sensor *sensor)
527{ 524{
528 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); 525 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
526 unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
527 sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
529 unsigned int max, i; 528 unsigned int max, i;
530 int rval; 529 int rval;
531 530
@@ -608,8 +607,8 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
608 607
609 sensor->link_freq = v4l2_ctrl_new_int_menu( 608 sensor->link_freq = v4l2_ctrl_new_int_menu(
610 &sensor->src->ctrl_handler, &smiapp_ctrl_ops, 609 &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
611 V4L2_CID_LINK_FREQ, max, 0, 610 V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
612 sensor->platform_data->op_sys_clock); 611 __ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
613 612
614 sensor->pixel_rate_csi = v4l2_ctrl_new_std( 613 sensor->pixel_rate_csi = v4l2_ctrl_new_std(
615 &sensor->src->ctrl_handler, &smiapp_ctrl_ops, 614 &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
@@ -745,6 +744,7 @@ static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
745static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor) 744static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
746{ 745{
747 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); 746 struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
747 struct smiapp_pll *pll = &sensor->pll;
748 unsigned int type, n; 748 unsigned int type, n;
749 unsigned int i, pixel_order; 749 unsigned int i, pixel_order;
750 int rval; 750 int rval;
@@ -808,14 +808,57 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
808 dev_dbg(&client->dev, "jolly good! %d\n", j); 808 dev_dbg(&client->dev, "jolly good! %d\n", j);
809 809
810 sensor->default_mbus_frame_fmts |= 1 << j; 810 sensor->default_mbus_frame_fmts |= 1 << j;
811 if (!sensor->csi_format 811 }
812 || f->width > sensor->csi_format->width 812 }
813 || (f->width == sensor->csi_format->width 813
814 && f->compressed 814 /* Figure out which BPP values can be used with which formats. */
815 > sensor->csi_format->compressed)) { 815 pll->binning_horizontal = 1;
816 sensor->csi_format = f; 816 pll->binning_vertical = 1;
817 sensor->internal_csi_format = f; 817 pll->scale_m = sensor->scale_m;
818 } 818
819 for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
820 const struct smiapp_csi_data_format *f =
821 &smiapp_csi_data_formats[i];
822 unsigned long *valid_link_freqs =
823 &sensor->valid_link_freqs[
824 f->compressed - SMIAPP_COMPRESSED_BASE];
825 unsigned int j;
826
827 BUG_ON(f->compressed < SMIAPP_COMPRESSED_BASE);
828 BUG_ON(f->compressed > SMIAPP_COMPRESSED_MAX);
829
830 if (!(sensor->default_mbus_frame_fmts & 1 << i))
831 continue;
832
833 pll->bits_per_pixel = f->compressed;
834
835 for (j = 0; sensor->platform_data->op_sys_clock[j]; j++) {
836 pll->link_freq = sensor->platform_data->op_sys_clock[j];
837
838 rval = smiapp_pll_try(sensor, pll);
839 dev_dbg(&client->dev, "link freq %u Hz, bpp %u %s\n",
840 pll->link_freq, pll->bits_per_pixel,
841 rval ? "not ok" : "ok");
842 if (rval)
843 continue;
844
845 set_bit(j, valid_link_freqs);
846 }
847
848 if (!*valid_link_freqs) {
849 dev_info(&client->dev,
850 "no valid link frequencies for %u bpp\n",
851 f->compressed);
852 sensor->default_mbus_frame_fmts &= ~BIT(i);
853 continue;
854 }
855
856 if (!sensor->csi_format
857 || f->width > sensor->csi_format->width
858 || (f->width == sensor->csi_format->width
859 && f->compressed > sensor->csi_format->compressed)) {
860 sensor->csi_format = f;
861 sensor->internal_csi_format = f;
819 } 862 }
820 } 863 }
821 864
@@ -904,7 +947,7 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
904 dev_dbg(&client->dev, "hblank\t\t%d\n", sensor->hblank->val); 947 dev_dbg(&client->dev, "hblank\t\t%d\n", sensor->hblank->val);
905 948
906 dev_dbg(&client->dev, "real timeperframe\t100/%d\n", 949 dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
907 sensor->pll.vt_pix_clk_freq_hz / 950 sensor->pll.pixel_rate_pixel_array /
908 ((sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width 951 ((sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
909 + sensor->hblank->val) * 952 + sensor->hblank->val) *
910 (sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height 953 (sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
@@ -1687,51 +1730,77 @@ static const struct smiapp_csi_data_format
1687 return csi_format; 1730 return csi_format;
1688} 1731}
1689 1732
1690static int smiapp_set_format(struct v4l2_subdev *subdev, 1733static int smiapp_set_format_source(struct v4l2_subdev *subdev,
1691 struct v4l2_subdev_fh *fh, 1734 struct v4l2_subdev_fh *fh,
1692 struct v4l2_subdev_format *fmt) 1735 struct v4l2_subdev_format *fmt)
1693{ 1736{
1694 struct smiapp_sensor *sensor = to_smiapp_sensor(subdev); 1737 struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
1695 struct smiapp_subdev *ssd = to_smiapp_subdev(subdev); 1738 const struct smiapp_csi_data_format *csi_format,
1696 struct v4l2_rect *crops[SMIAPP_PADS]; 1739 *old_csi_format = sensor->csi_format;
1740 unsigned long *valid_link_freqs;
1741 u32 code = fmt->format.code;
1742 unsigned int i;
1743 int rval;
1697 1744
1698 mutex_lock(&sensor->mutex); 1745 rval = __smiapp_get_format(subdev, fh, fmt);
1746 if (rval)
1747 return rval;
1699 1748
1700 /* 1749 /*
1701 * Media bus code is changeable on src subdev's source pad. On 1750 * Media bus code is changeable on src subdev's source pad. On
1702 * other source pads we just get format here. 1751 * other source pads we just get format here.
1703 */ 1752 */
1704 if (fmt->pad == ssd->source_pad) { 1753 if (subdev != &sensor->src->sd)
1705 u32 code = fmt->format.code; 1754 return 0;
1706 int rval = __smiapp_get_format(subdev, fh, fmt);
1707 bool range_changed = false;
1708 unsigned int i;
1709
1710 if (!rval && subdev == &sensor->src->sd) {
1711 const struct smiapp_csi_data_format *csi_format =
1712 smiapp_validate_csi_data_format(sensor, code);
1713 1755
1714 if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { 1756 csi_format = smiapp_validate_csi_data_format(sensor, code);
1715 if (csi_format->width !=
1716 sensor->csi_format->width)
1717 range_changed = true;
1718 1757
1719 sensor->csi_format = csi_format; 1758 fmt->format.code = csi_format->code;
1720 }
1721 1759
1722 fmt->format.code = csi_format->code; 1760 if (fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE)
1723 } 1761 return 0;
1724 1762
1725 mutex_unlock(&sensor->mutex); 1763 sensor->csi_format = csi_format;
1726 if (rval || !range_changed)
1727 return rval;
1728 1764
1765 if (csi_format->width != old_csi_format->width)
1729 for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) 1766 for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
1730 v4l2_ctrl_modify_range( 1767 __v4l2_ctrl_modify_range(
1731 sensor->test_data[i], 1768 sensor->test_data[i], 0,
1732 0, (1 << sensor->csi_format->width) - 1, 1, 0); 1769 (1 << csi_format->width) - 1, 1, 0);
1733 1770
1771 if (csi_format->compressed == old_csi_format->compressed)
1734 return 0; 1772 return 0;
1773
1774 valid_link_freqs =
1775 &sensor->valid_link_freqs[sensor->csi_format->compressed
1776 - SMIAPP_COMPRESSED_BASE];
1777
1778 __v4l2_ctrl_modify_range(
1779 sensor->link_freq, 0,
1780 __fls(*valid_link_freqs), ~*valid_link_freqs,
1781 __ffs(*valid_link_freqs));
1782
1783 return smiapp_pll_update(sensor);
1784}
1785
1786static int smiapp_set_format(struct v4l2_subdev *subdev,
1787 struct v4l2_subdev_fh *fh,
1788 struct v4l2_subdev_format *fmt)
1789{
1790 struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
1791 struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
1792 struct v4l2_rect *crops[SMIAPP_PADS];
1793
1794 mutex_lock(&sensor->mutex);
1795
1796 if (fmt->pad == ssd->source_pad) {
1797 int rval;
1798
1799 rval = smiapp_set_format_source(subdev, fh, fmt);
1800
1801 mutex_unlock(&sensor->mutex);
1802
1803 return rval;
1735 } 1804 }
1736 1805
1737 /* Sink pad. Width and height are changeable here. */ 1806 /* Sink pad. Width and height are changeable here. */
@@ -2023,6 +2092,11 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
2023 == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) 2092 == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
2024 return 0; 2093 return 0;
2025 return -EINVAL; 2094 return -EINVAL;
2095 case V4L2_SEL_TGT_NATIVE_SIZE:
2096 if (ssd == sensor->pixel_array
2097 && sel->pad == SMIAPP_PA_PAD_SRC)
2098 return 0;
2099 return -EINVAL;
2026 case V4L2_SEL_TGT_COMPOSE: 2100 case V4L2_SEL_TGT_COMPOSE:
2027 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 2101 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
2028 if (sel->pad == ssd->source_pad) 2102 if (sel->pad == ssd->source_pad)
@@ -2121,7 +2195,9 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
2121 2195
2122 switch (sel->target) { 2196 switch (sel->target) {
2123 case V4L2_SEL_TGT_CROP_BOUNDS: 2197 case V4L2_SEL_TGT_CROP_BOUNDS:
2198 case V4L2_SEL_TGT_NATIVE_SIZE:
2124 if (ssd == sensor->pixel_array) { 2199 if (ssd == sensor->pixel_array) {
2200 sel->r.left = sel->r.top = 0;
2125 sel->r.width = 2201 sel->r.width =
2126 sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1; 2202 sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
2127 sel->r.height = 2203 sel->r.height =
@@ -2190,7 +2266,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
2190 ret = smiapp_set_compose(subdev, fh, sel); 2266 ret = smiapp_set_compose(subdev, fh, sel);
2191 break; 2267 break;
2192 default: 2268 default:
2193 BUG(); 2269 ret = -EINVAL;
2194 } 2270 }
2195 2271
2196 mutex_unlock(&sensor->mutex); 2272 mutex_unlock(&sensor->mutex);
@@ -2482,12 +2558,6 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
2482 goto out_power_off; 2558 goto out_power_off;
2483 } 2559 }
2484 2560
2485 rval = smiapp_get_mbus_formats(sensor);
2486 if (rval) {
2487 rval = -ENODEV;
2488 goto out_power_off;
2489 }
2490
2491 if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) { 2561 if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
2492 u32 val; 2562 u32 val;
2493 2563
@@ -2569,6 +2639,22 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
2569 2639
2570 sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; 2640 sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
2571 2641
2642 /* prepare PLL configuration input values */
2643 pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
2644 pll->csi2.lanes = sensor->platform_data->lanes;
2645 pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
2646 pll->flags = smiapp_call_quirk(sensor, pll_flags);
2647 pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
2648 /* Profile 0 sensors have no separate OP clock branch. */
2649 if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
2650 pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
2651
2652 rval = smiapp_get_mbus_formats(sensor);
2653 if (rval) {
2654 rval = -ENODEV;
2655 goto out_nvm_release;
2656 }
2657
2572 for (i = 0; i < SMIAPP_SUBDEVS; i++) { 2658 for (i = 0; i < SMIAPP_SUBDEVS; i++) {
2573 struct { 2659 struct {
2574 struct smiapp_subdev *ssd; 2660 struct smiapp_subdev *ssd;
@@ -2666,18 +2752,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
2666 if (rval < 0) 2752 if (rval < 0)
2667 goto out_nvm_release; 2753 goto out_nvm_release;
2668 2754
2669 /* prepare PLL configuration input values */ 2755 mutex_lock(&sensor->mutex);
2670 pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
2671 pll->csi2.lanes = sensor->platform_data->lanes;
2672 pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
2673 pll->flags = smiapp_call_quirk(sensor, pll_flags);
2674
2675 /* Profile 0 sensors have no separate OP clock branch. */
2676 if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
2677 pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
2678 pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
2679
2680 rval = smiapp_update_mode(sensor); 2756 rval = smiapp_update_mode(sensor);
2757 mutex_unlock(&sensor->mutex);
2681 if (rval) { 2758 if (rval) {
2682 dev_err(&client->dev, "update mode failed\n"); 2759 dev_err(&client->dev, "update mode failed\n");
2683 goto out_nvm_release; 2760 goto out_nvm_release;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 874b49ffd88f..f88f8ec344d3 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -156,6 +156,11 @@ struct smiapp_csi_data_format {
156#define SMIAPP_PAD_SRC 1 156#define SMIAPP_PAD_SRC 1
157#define SMIAPP_PADS 2 157#define SMIAPP_PADS 2
158 158
159#define SMIAPP_COMPRESSED_BASE 8
160#define SMIAPP_COMPRESSED_MAX 12
161#define SMIAPP_NR_OF_COMPRESSED (SMIAPP_COMPRESSED_MAX - \
162 SMIAPP_COMPRESSED_BASE + 1)
163
159struct smiapp_binning_subtype { 164struct smiapp_binning_subtype {
160 u8 horizontal:4; 165 u8 horizontal:4;
161 u8 vertical:4; 166 u8 vertical:4;
@@ -232,6 +237,9 @@ struct smiapp_sensor {
232 237
233 struct smiapp_pll pll; 238 struct smiapp_pll pll;
234 239
240 /* Is a default format supported for a given BPP? */
241 unsigned long valid_link_freqs[SMIAPP_NR_OF_COMPRESSED];
242
235 /* Pixel array controls */ 243 /* Pixel array controls */
236 struct v4l2_ctrl *analog_gain; 244 struct v4l2_ctrl *analog_gain;
237 struct v4l2_ctrl *exposure; 245 struct v4l2_ctrl *exposure;
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index 5b915936c3f3..ec89cfa927a2 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -71,7 +71,7 @@
71 71
72/* IMX074 has only one fixed colorspace per pixelcode */ 72/* IMX074 has only one fixed colorspace per pixelcode */
73struct imx074_datafmt { 73struct imx074_datafmt {
74 enum v4l2_mbus_pixelcode code; 74 u32 code;
75 enum v4l2_colorspace colorspace; 75 enum v4l2_colorspace colorspace;
76}; 76};
77 77
@@ -82,7 +82,7 @@ struct imx074 {
82}; 82};
83 83
84static const struct imx074_datafmt imx074_colour_fmts[] = { 84static const struct imx074_datafmt imx074_colour_fmts[] = {
85 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, 85 {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
86}; 86};
87 87
88static struct imx074 *to_imx074(const struct i2c_client *client) 88static struct imx074 *to_imx074(const struct i2c_client *client)
@@ -91,7 +91,7 @@ static struct imx074 *to_imx074(const struct i2c_client *client)
91} 91}
92 92
93/* Find a data format by a pixel code in an array */ 93/* Find a data format by a pixel code in an array */
94static const struct imx074_datafmt *imx074_find_datafmt(enum v4l2_mbus_pixelcode code) 94static const struct imx074_datafmt *imx074_find_datafmt(u32 code)
95{ 95{
96 int i; 96 int i;
97 97
@@ -236,7 +236,7 @@ static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
236} 236}
237 237
238static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 238static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
239 enum v4l2_mbus_pixelcode *code) 239 u32 *code)
240{ 240{
241 if ((unsigned int)index >= ARRAY_SIZE(imx074_colour_fmts)) 241 if ((unsigned int)index >= ARRAY_SIZE(imx074_colour_fmts))
242 return -EINVAL; 242 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index dbd8c142d6ef..2e9a53502551 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -53,13 +53,13 @@
53 53
54/* MT9M001 has only one fixed colorspace per pixelcode */ 54/* MT9M001 has only one fixed colorspace per pixelcode */
55struct mt9m001_datafmt { 55struct mt9m001_datafmt {
56 enum v4l2_mbus_pixelcode code; 56 u32 code;
57 enum v4l2_colorspace colorspace; 57 enum v4l2_colorspace colorspace;
58}; 58};
59 59
60/* Find a data format by a pixel code in an array */ 60/* Find a data format by a pixel code in an array */
61static const struct mt9m001_datafmt *mt9m001_find_datafmt( 61static const struct mt9m001_datafmt *mt9m001_find_datafmt(
62 enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt, 62 u32 code, const struct mt9m001_datafmt *fmt,
63 int n) 63 int n)
64{ 64{
65 int i; 65 int i;
@@ -75,14 +75,14 @@ static const struct mt9m001_datafmt mt9m001_colour_fmts[] = {
75 * Order important: first natively supported, 75 * Order important: first natively supported,
76 * second supported with a GPIO extender 76 * second supported with a GPIO extender
77 */ 77 */
78 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, 78 {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
79 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, 79 {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
80}; 80};
81 81
82static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = { 82static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = {
83 /* Order important - see above */ 83 /* Order important - see above */
84 {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, 84 {MEDIA_BUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
85 {V4L2_MBUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG}, 85 {MEDIA_BUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
86}; 86};
87 87
88struct mt9m001 { 88struct mt9m001 {
@@ -563,7 +563,7 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
563}; 563};
564 564
565static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 565static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
566 enum v4l2_mbus_pixelcode *code) 566 u32 *code)
567{ 567{
568 struct i2c_client *client = v4l2_get_subdevdata(sd); 568 struct i2c_client *client = v4l2_get_subdevdata(sd);
569 struct mt9m001 *mt9m001 = to_mt9m001(client); 569 struct mt9m001 *mt9m001 = to_mt9m001(client);
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index b51e8562e775..5992ea93257a 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -182,23 +182,23 @@ static struct mt9m111_context context_b = {
182 182
183/* MT9M111 has only one fixed colorspace per pixelcode */ 183/* MT9M111 has only one fixed colorspace per pixelcode */
184struct mt9m111_datafmt { 184struct mt9m111_datafmt {
185 enum v4l2_mbus_pixelcode code; 185 u32 code;
186 enum v4l2_colorspace colorspace; 186 enum v4l2_colorspace colorspace;
187}; 187};
188 188
189static const struct mt9m111_datafmt mt9m111_colour_fmts[] = { 189static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
190 {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, 190 {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
191 {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, 191 {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
192 {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, 192 {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
193 {V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG}, 193 {MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
194 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, 194 {MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
195 {V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, 195 {MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
196 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, 196 {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
197 {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, 197 {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
198 {V4L2_MBUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB}, 198 {MEDIA_BUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
199 {V4L2_MBUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB}, 199 {MEDIA_BUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
200 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, 200 {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
201 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, 201 {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
202}; 202};
203 203
204struct mt9m111 { 204struct mt9m111 {
@@ -218,7 +218,7 @@ struct mt9m111 {
218 218
219/* Find a data format by a pixel code */ 219/* Find a data format by a pixel code */
220static const struct mt9m111_datafmt *mt9m111_find_datafmt(struct mt9m111 *mt9m111, 220static const struct mt9m111_datafmt *mt9m111_find_datafmt(struct mt9m111 *mt9m111,
221 enum v4l2_mbus_pixelcode code) 221 u32 code)
222{ 222{
223 int i; 223 int i;
224 for (i = 0; i < ARRAY_SIZE(mt9m111_colour_fmts); i++) 224 for (i = 0; i < ARRAY_SIZE(mt9m111_colour_fmts); i++)
@@ -331,7 +331,7 @@ static int mt9m111_setup_rect_ctx(struct mt9m111 *mt9m111,
331} 331}
332 332
333static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rect, 333static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rect,
334 int width, int height, enum v4l2_mbus_pixelcode code) 334 int width, int height, u32 code)
335{ 335{
336 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev); 336 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
337 int ret; 337 int ret;
@@ -345,7 +345,7 @@ static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rec
345 if (!ret) 345 if (!ret)
346 ret = reg_write(WINDOW_HEIGHT, rect->height); 346 ret = reg_write(WINDOW_HEIGHT, rect->height);
347 347
348 if (code != V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) { 348 if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
349 /* IFP in use, down-scaling possible */ 349 /* IFP in use, down-scaling possible */
350 if (!ret) 350 if (!ret)
351 ret = mt9m111_setup_rect_ctx(mt9m111, &context_b, 351 ret = mt9m111_setup_rect_ctx(mt9m111, &context_b,
@@ -393,8 +393,8 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
393 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 393 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
394 return -EINVAL; 394 return -EINVAL;
395 395
396 if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 || 396 if (mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
397 mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) { 397 mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
398 /* Bayer format - even size lengths */ 398 /* Bayer format - even size lengths */
399 rect.width = ALIGN(rect.width, 2); 399 rect.width = ALIGN(rect.width, 2);
400 rect.height = ALIGN(rect.height, 2); 400 rect.height = ALIGN(rect.height, 2);
@@ -462,7 +462,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
462} 462}
463 463
464static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111, 464static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
465 enum v4l2_mbus_pixelcode code) 465 u32 code)
466{ 466{
467 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev); 467 struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
468 u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER | 468 u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
@@ -474,46 +474,46 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
474 int ret; 474 int ret;
475 475
476 switch (code) { 476 switch (code) {
477 case V4L2_MBUS_FMT_SBGGR8_1X8: 477 case MEDIA_BUS_FMT_SBGGR8_1X8:
478 data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER | 478 data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
479 MT9M111_OUTFMT_RGB; 479 MT9M111_OUTFMT_RGB;
480 break; 480 break;
481 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: 481 case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
482 data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB; 482 data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB;
483 break; 483 break;
484 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 484 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
485 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 | 485 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 |
486 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN; 486 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
487 break; 487 break;
488 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: 488 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
489 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555; 489 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
490 break; 490 break;
491 case V4L2_MBUS_FMT_RGB565_2X8_LE: 491 case MEDIA_BUS_FMT_RGB565_2X8_LE:
492 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 | 492 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
493 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN; 493 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
494 break; 494 break;
495 case V4L2_MBUS_FMT_RGB565_2X8_BE: 495 case MEDIA_BUS_FMT_RGB565_2X8_BE:
496 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565; 496 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
497 break; 497 break;
498 case V4L2_MBUS_FMT_BGR565_2X8_BE: 498 case MEDIA_BUS_FMT_BGR565_2X8_BE:
499 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 | 499 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
500 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B; 500 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
501 break; 501 break;
502 case V4L2_MBUS_FMT_BGR565_2X8_LE: 502 case MEDIA_BUS_FMT_BGR565_2X8_LE:
503 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 | 503 data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
504 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN | 504 MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
505 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B; 505 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
506 break; 506 break;
507 case V4L2_MBUS_FMT_UYVY8_2X8: 507 case MEDIA_BUS_FMT_UYVY8_2X8:
508 data_outfmt2 = 0; 508 data_outfmt2 = 0;
509 break; 509 break;
510 case V4L2_MBUS_FMT_VYUY8_2X8: 510 case MEDIA_BUS_FMT_VYUY8_2X8:
511 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B; 511 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
512 break; 512 break;
513 case V4L2_MBUS_FMT_YUYV8_2X8: 513 case MEDIA_BUS_FMT_YUYV8_2X8:
514 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN; 514 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
515 break; 515 break;
516 case V4L2_MBUS_FMT_YVYU8_2X8: 516 case MEDIA_BUS_FMT_YVYU8_2X8:
517 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN | 517 data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
518 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B; 518 MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
519 break; 519 break;
@@ -542,8 +542,8 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
542 542
543 fmt = mt9m111_find_datafmt(mt9m111, mf->code); 543 fmt = mt9m111_find_datafmt(mt9m111, mf->code);
544 544
545 bayer = fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 || 545 bayer = fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
546 fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE; 546 fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE;
547 547
548 /* 548 /*
549 * With Bayer format enforce even side lengths, but let the user play 549 * With Bayer format enforce even side lengths, but let the user play
@@ -554,7 +554,7 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
554 rect->height = ALIGN(rect->height, 2); 554 rect->height = ALIGN(rect->height, 2);
555 } 555 }
556 556
557 if (fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) { 557 if (fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
558 /* IFP bypass mode, no scaling */ 558 /* IFP bypass mode, no scaling */
559 mf->width = rect->width; 559 mf->width = rect->width;
560 mf->height = rect->height; 560 mf->height = rect->height;
@@ -840,7 +840,7 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
840}; 840};
841 841
842static int mt9m111_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 842static int mt9m111_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
843 enum v4l2_mbus_pixelcode *code) 843 u32 *code)
844{ 844{
845 if (index >= ARRAY_SIZE(mt9m111_colour_fmts)) 845 if (index >= ARRAY_SIZE(mt9m111_colour_fmts))
846 return -EINVAL; 846 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index f8358c4071a9..35d9c8d25589 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -345,7 +345,7 @@ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
345 345
346 mf->width = mt9t031->rect.width / mt9t031->xskip; 346 mf->width = mt9t031->rect.width / mt9t031->xskip;
347 mf->height = mt9t031->rect.height / mt9t031->yskip; 347 mf->height = mt9t031->rect.height / mt9t031->yskip;
348 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; 348 mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
349 mf->colorspace = V4L2_COLORSPACE_SRGB; 349 mf->colorspace = V4L2_COLORSPACE_SRGB;
350 mf->field = V4L2_FIELD_NONE; 350 mf->field = V4L2_FIELD_NONE;
351 351
@@ -367,7 +367,7 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd,
367 xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH); 367 xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH);
368 yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT); 368 yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT);
369 369
370 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; 370 mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
371 mf->colorspace = V4L2_COLORSPACE_SRGB; 371 mf->colorspace = V4L2_COLORSPACE_SRGB;
372 372
373 /* mt9t031_set_params() doesn't change width and height */ 373 /* mt9t031_set_params() doesn't change width and height */
@@ -385,7 +385,7 @@ static int mt9t031_try_fmt(struct v4l2_subdev *sd,
385 &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, 385 &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
386 &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); 386 &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
387 387
388 mf->code = V4L2_MBUS_FMT_SBGGR10_1X10; 388 mf->code = MEDIA_BUS_FMT_SBGGR10_1X10;
389 mf->colorspace = V4L2_COLORSPACE_SRGB; 389 mf->colorspace = V4L2_COLORSPACE_SRGB;
390 390
391 return 0; 391 return 0;
@@ -673,12 +673,12 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
673}; 673};
674 674
675static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 675static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
676 enum v4l2_mbus_pixelcode *code) 676 u32 *code)
677{ 677{
678 if (index) 678 if (index)
679 return -EINVAL; 679 return -EINVAL;
680 680
681 *code = V4L2_MBUS_FMT_SBGGR10_1X10; 681 *code = MEDIA_BUS_FMT_SBGGR10_1X10;
682 return 0; 682 return 0;
683} 683}
684 684
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 996d7b4007a5..64f08365e6b2 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -77,7 +77,7 @@
77 struct 77 struct
78************************************************************************/ 78************************************************************************/
79struct mt9t112_format { 79struct mt9t112_format {
80 enum v4l2_mbus_pixelcode code; 80 u32 code;
81 enum v4l2_colorspace colorspace; 81 enum v4l2_colorspace colorspace;
82 u16 fmt; 82 u16 fmt;
83 u16 order; 83 u16 order;
@@ -103,32 +103,32 @@ struct mt9t112_priv {
103 103
104static const struct mt9t112_format mt9t112_cfmts[] = { 104static const struct mt9t112_format mt9t112_cfmts[] = {
105 { 105 {
106 .code = V4L2_MBUS_FMT_UYVY8_2X8, 106 .code = MEDIA_BUS_FMT_UYVY8_2X8,
107 .colorspace = V4L2_COLORSPACE_JPEG, 107 .colorspace = V4L2_COLORSPACE_JPEG,
108 .fmt = 1, 108 .fmt = 1,
109 .order = 0, 109 .order = 0,
110 }, { 110 }, {
111 .code = V4L2_MBUS_FMT_VYUY8_2X8, 111 .code = MEDIA_BUS_FMT_VYUY8_2X8,
112 .colorspace = V4L2_COLORSPACE_JPEG, 112 .colorspace = V4L2_COLORSPACE_JPEG,
113 .fmt = 1, 113 .fmt = 1,
114 .order = 1, 114 .order = 1,
115 }, { 115 }, {
116 .code = V4L2_MBUS_FMT_YUYV8_2X8, 116 .code = MEDIA_BUS_FMT_YUYV8_2X8,
117 .colorspace = V4L2_COLORSPACE_JPEG, 117 .colorspace = V4L2_COLORSPACE_JPEG,
118 .fmt = 1, 118 .fmt = 1,
119 .order = 2, 119 .order = 2,
120 }, { 120 }, {
121 .code = V4L2_MBUS_FMT_YVYU8_2X8, 121 .code = MEDIA_BUS_FMT_YVYU8_2X8,
122 .colorspace = V4L2_COLORSPACE_JPEG, 122 .colorspace = V4L2_COLORSPACE_JPEG,
123 .fmt = 1, 123 .fmt = 1,
124 .order = 3, 124 .order = 3,
125 }, { 125 }, {
126 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, 126 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
127 .colorspace = V4L2_COLORSPACE_SRGB, 127 .colorspace = V4L2_COLORSPACE_SRGB,
128 .fmt = 8, 128 .fmt = 8,
129 .order = 2, 129 .order = 2,
130 }, { 130 }, {
131 .code = V4L2_MBUS_FMT_RGB565_2X8_LE, 131 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
132 .colorspace = V4L2_COLORSPACE_SRGB, 132 .colorspace = V4L2_COLORSPACE_SRGB,
133 .fmt = 4, 133 .fmt = 4,
134 .order = 2, 134 .order = 2,
@@ -840,7 +840,7 @@ static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
840 840
841static int mt9t112_set_params(struct mt9t112_priv *priv, 841static int mt9t112_set_params(struct mt9t112_priv *priv,
842 const struct v4l2_rect *rect, 842 const struct v4l2_rect *rect,
843 enum v4l2_mbus_pixelcode code) 843 u32 code)
844{ 844{
845 int i; 845 int i;
846 846
@@ -953,7 +953,7 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
953 break; 953 break;
954 954
955 if (i == priv->num_formats) { 955 if (i == priv->num_formats) {
956 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 956 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
957 mf->colorspace = V4L2_COLORSPACE_JPEG; 957 mf->colorspace = V4L2_COLORSPACE_JPEG;
958 } else { 958 } else {
959 mf->colorspace = mt9t112_cfmts[i].colorspace; 959 mf->colorspace = mt9t112_cfmts[i].colorspace;
@@ -967,7 +967,7 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd,
967} 967}
968 968
969static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 969static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
970 enum v4l2_mbus_pixelcode *code) 970 u32 *code)
971{ 971{
972 struct i2c_client *client = v4l2_get_subdevdata(sd); 972 struct i2c_client *client = v4l2_get_subdevdata(sd);
973 struct mt9t112_priv *priv = to_mt9t112(client); 973 struct mt9t112_priv *priv = to_mt9t112(client);
@@ -1101,7 +1101,7 @@ static int mt9t112_probe(struct i2c_client *client,
1101 1101
1102 /* Cannot fail: using the default supported pixel code */ 1102 /* Cannot fail: using the default supported pixel code */
1103 if (!ret) 1103 if (!ret)
1104 mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8); 1104 mt9t112_set_params(priv, &rect, MEDIA_BUS_FMT_UYVY8_2X8);
1105 else 1105 else
1106 v4l2_clk_put(priv->clk); 1106 v4l2_clk_put(priv->clk);
1107 1107
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 99022c8d76eb..a246d4d64b8b 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -85,13 +85,13 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
85 85
86/* MT9V022 has only one fixed colorspace per pixelcode */ 86/* MT9V022 has only one fixed colorspace per pixelcode */
87struct mt9v022_datafmt { 87struct mt9v022_datafmt {
88 enum v4l2_mbus_pixelcode code; 88 u32 code;
89 enum v4l2_colorspace colorspace; 89 enum v4l2_colorspace colorspace;
90}; 90};
91 91
92/* Find a data format by a pixel code in an array */ 92/* Find a data format by a pixel code in an array */
93static const struct mt9v022_datafmt *mt9v022_find_datafmt( 93static const struct mt9v022_datafmt *mt9v022_find_datafmt(
94 enum v4l2_mbus_pixelcode code, const struct mt9v022_datafmt *fmt, 94 u32 code, const struct mt9v022_datafmt *fmt,
95 int n) 95 int n)
96{ 96{
97 int i; 97 int i;
@@ -107,14 +107,14 @@ static const struct mt9v022_datafmt mt9v022_colour_fmts[] = {
107 * Order important: first natively supported, 107 * Order important: first natively supported,
108 * second supported with a GPIO extender 108 * second supported with a GPIO extender
109 */ 109 */
110 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, 110 {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
111 {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, 111 {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
112}; 112};
113 113
114static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = { 114static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = {
115 /* Order important - see above */ 115 /* Order important - see above */
116 {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, 116 {MEDIA_BUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
117 {V4L2_MBUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG}, 117 {MEDIA_BUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG},
118}; 118};
119 119
120/* only registers with different addresses on different mt9v02x sensors */ 120/* only registers with different addresses on different mt9v02x sensors */
@@ -410,13 +410,13 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
410 * .try_mbus_fmt(), datawidth is from our supported format list 410 * .try_mbus_fmt(), datawidth is from our supported format list
411 */ 411 */
412 switch (mf->code) { 412 switch (mf->code) {
413 case V4L2_MBUS_FMT_Y8_1X8: 413 case MEDIA_BUS_FMT_Y8_1X8:
414 case V4L2_MBUS_FMT_Y10_1X10: 414 case MEDIA_BUS_FMT_Y10_1X10:
415 if (mt9v022->model != MT9V022IX7ATM) 415 if (mt9v022->model != MT9V022IX7ATM)
416 return -EINVAL; 416 return -EINVAL;
417 break; 417 break;
418 case V4L2_MBUS_FMT_SBGGR8_1X8: 418 case MEDIA_BUS_FMT_SBGGR8_1X8:
419 case V4L2_MBUS_FMT_SBGGR10_1X10: 419 case MEDIA_BUS_FMT_SBGGR10_1X10:
420 if (mt9v022->model != MT9V022IX7ATC) 420 if (mt9v022->model != MT9V022IX7ATC)
421 return -EINVAL; 421 return -EINVAL;
422 break; 422 break;
@@ -443,8 +443,8 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
443 struct i2c_client *client = v4l2_get_subdevdata(sd); 443 struct i2c_client *client = v4l2_get_subdevdata(sd);
444 struct mt9v022 *mt9v022 = to_mt9v022(client); 444 struct mt9v022 *mt9v022 = to_mt9v022(client);
445 const struct mt9v022_datafmt *fmt; 445 const struct mt9v022_datafmt *fmt;
446 int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 || 446 int align = mf->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
447 mf->code == V4L2_MBUS_FMT_SBGGR10_1X10; 447 mf->code == MEDIA_BUS_FMT_SBGGR10_1X10;
448 448
449 v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH, 449 v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH,
450 MT9V022_MAX_WIDTH, align, 450 MT9V022_MAX_WIDTH, align,
@@ -759,7 +759,7 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
759}; 759};
760 760
761static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 761static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
762 enum v4l2_mbus_pixelcode *code) 762 u32 *code)
763{ 763{
764 struct i2c_client *client = v4l2_get_subdevdata(sd); 764 struct i2c_client *client = v4l2_get_subdevdata(sd);
765 struct mt9v022 *mt9v022 = to_mt9v022(client); 765 struct mt9v022 *mt9v022 = to_mt9v022(client);
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 6c6b1c3b45e3..6f2dd9093d94 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -302,7 +302,7 @@ struct ov2640_win_size {
302struct ov2640_priv { 302struct ov2640_priv {
303 struct v4l2_subdev subdev; 303 struct v4l2_subdev subdev;
304 struct v4l2_ctrl_handler hdl; 304 struct v4l2_ctrl_handler hdl;
305 enum v4l2_mbus_pixelcode cfmt_code; 305 u32 cfmt_code;
306 struct v4l2_clk *clk; 306 struct v4l2_clk *clk;
307 const struct ov2640_win_size *win; 307 const struct ov2640_win_size *win;
308}; 308};
@@ -623,11 +623,11 @@ static const struct regval_list ov2640_rgb565_le_regs[] = {
623 ENDMARKER, 623 ENDMARKER,
624}; 624};
625 625
626static enum v4l2_mbus_pixelcode ov2640_codes[] = { 626static u32 ov2640_codes[] = {
627 V4L2_MBUS_FMT_YUYV8_2X8, 627 MEDIA_BUS_FMT_YUYV8_2X8,
628 V4L2_MBUS_FMT_UYVY8_2X8, 628 MEDIA_BUS_FMT_UYVY8_2X8,
629 V4L2_MBUS_FMT_RGB565_2X8_BE, 629 MEDIA_BUS_FMT_RGB565_2X8_BE,
630 V4L2_MBUS_FMT_RGB565_2X8_LE, 630 MEDIA_BUS_FMT_RGB565_2X8_LE,
631}; 631};
632 632
633/* 633/*
@@ -785,7 +785,7 @@ static const struct ov2640_win_size *ov2640_select_win(u32 *width, u32 *height)
785} 785}
786 786
787static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height, 787static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
788 enum v4l2_mbus_pixelcode code) 788 u32 code)
789{ 789{
790 struct ov2640_priv *priv = to_ov2640(client); 790 struct ov2640_priv *priv = to_ov2640(client);
791 const struct regval_list *selected_cfmt_regs; 791 const struct regval_list *selected_cfmt_regs;
@@ -797,20 +797,20 @@ static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
797 /* select format */ 797 /* select format */
798 priv->cfmt_code = 0; 798 priv->cfmt_code = 0;
799 switch (code) { 799 switch (code) {
800 case V4L2_MBUS_FMT_RGB565_2X8_BE: 800 case MEDIA_BUS_FMT_RGB565_2X8_BE:
801 dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__); 801 dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
802 selected_cfmt_regs = ov2640_rgb565_be_regs; 802 selected_cfmt_regs = ov2640_rgb565_be_regs;
803 break; 803 break;
804 case V4L2_MBUS_FMT_RGB565_2X8_LE: 804 case MEDIA_BUS_FMT_RGB565_2X8_LE:
805 dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__); 805 dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
806 selected_cfmt_regs = ov2640_rgb565_le_regs; 806 selected_cfmt_regs = ov2640_rgb565_le_regs;
807 break; 807 break;
808 case V4L2_MBUS_FMT_YUYV8_2X8: 808 case MEDIA_BUS_FMT_YUYV8_2X8:
809 dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__); 809 dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
810 selected_cfmt_regs = ov2640_yuyv_regs; 810 selected_cfmt_regs = ov2640_yuyv_regs;
811 break; 811 break;
812 default: 812 default:
813 case V4L2_MBUS_FMT_UYVY8_2X8: 813 case MEDIA_BUS_FMT_UYVY8_2X8:
814 dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__); 814 dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
815 selected_cfmt_regs = ov2640_uyvy_regs; 815 selected_cfmt_regs = ov2640_uyvy_regs;
816 } 816 }
@@ -869,7 +869,7 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
869 if (!priv->win) { 869 if (!priv->win) {
870 u32 width = W_SVGA, height = H_SVGA; 870 u32 width = W_SVGA, height = H_SVGA;
871 priv->win = ov2640_select_win(&width, &height); 871 priv->win = ov2640_select_win(&width, &height);
872 priv->cfmt_code = V4L2_MBUS_FMT_UYVY8_2X8; 872 priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
873 } 873 }
874 874
875 mf->width = priv->win->width; 875 mf->width = priv->win->width;
@@ -877,13 +877,13 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
877 mf->code = priv->cfmt_code; 877 mf->code = priv->cfmt_code;
878 878
879 switch (mf->code) { 879 switch (mf->code) {
880 case V4L2_MBUS_FMT_RGB565_2X8_BE: 880 case MEDIA_BUS_FMT_RGB565_2X8_BE:
881 case V4L2_MBUS_FMT_RGB565_2X8_LE: 881 case MEDIA_BUS_FMT_RGB565_2X8_LE:
882 mf->colorspace = V4L2_COLORSPACE_SRGB; 882 mf->colorspace = V4L2_COLORSPACE_SRGB;
883 break; 883 break;
884 default: 884 default:
885 case V4L2_MBUS_FMT_YUYV8_2X8: 885 case MEDIA_BUS_FMT_YUYV8_2X8:
886 case V4L2_MBUS_FMT_UYVY8_2X8: 886 case MEDIA_BUS_FMT_UYVY8_2X8:
887 mf->colorspace = V4L2_COLORSPACE_JPEG; 887 mf->colorspace = V4L2_COLORSPACE_JPEG;
888 } 888 }
889 mf->field = V4L2_FIELD_NONE; 889 mf->field = V4L2_FIELD_NONE;
@@ -899,14 +899,14 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
899 899
900 900
901 switch (mf->code) { 901 switch (mf->code) {
902 case V4L2_MBUS_FMT_RGB565_2X8_BE: 902 case MEDIA_BUS_FMT_RGB565_2X8_BE:
903 case V4L2_MBUS_FMT_RGB565_2X8_LE: 903 case MEDIA_BUS_FMT_RGB565_2X8_LE:
904 mf->colorspace = V4L2_COLORSPACE_SRGB; 904 mf->colorspace = V4L2_COLORSPACE_SRGB;
905 break; 905 break;
906 default: 906 default:
907 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 907 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
908 case V4L2_MBUS_FMT_YUYV8_2X8: 908 case MEDIA_BUS_FMT_YUYV8_2X8:
909 case V4L2_MBUS_FMT_UYVY8_2X8: 909 case MEDIA_BUS_FMT_UYVY8_2X8:
910 mf->colorspace = V4L2_COLORSPACE_JPEG; 910 mf->colorspace = V4L2_COLORSPACE_JPEG;
911 } 911 }
912 912
@@ -926,14 +926,14 @@ static int ov2640_try_fmt(struct v4l2_subdev *sd,
926 mf->field = V4L2_FIELD_NONE; 926 mf->field = V4L2_FIELD_NONE;
927 927
928 switch (mf->code) { 928 switch (mf->code) {
929 case V4L2_MBUS_FMT_RGB565_2X8_BE: 929 case MEDIA_BUS_FMT_RGB565_2X8_BE:
930 case V4L2_MBUS_FMT_RGB565_2X8_LE: 930 case MEDIA_BUS_FMT_RGB565_2X8_LE:
931 mf->colorspace = V4L2_COLORSPACE_SRGB; 931 mf->colorspace = V4L2_COLORSPACE_SRGB;
932 break; 932 break;
933 default: 933 default:
934 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 934 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
935 case V4L2_MBUS_FMT_YUYV8_2X8: 935 case MEDIA_BUS_FMT_YUYV8_2X8:
936 case V4L2_MBUS_FMT_UYVY8_2X8: 936 case MEDIA_BUS_FMT_UYVY8_2X8:
937 mf->colorspace = V4L2_COLORSPACE_JPEG; 937 mf->colorspace = V4L2_COLORSPACE_JPEG;
938 } 938 }
939 939
@@ -941,7 +941,7 @@ static int ov2640_try_fmt(struct v4l2_subdev *sd,
941} 941}
942 942
943static int ov2640_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 943static int ov2640_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
944 enum v4l2_mbus_pixelcode *code) 944 u32 *code)
945{ 945{
946 if (index >= ARRAY_SIZE(ov2640_codes)) 946 if (index >= ARRAY_SIZE(ov2640_codes))
947 return -EINVAL; 947 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index d2daa6a8f272..93ae031bdafb 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -602,7 +602,7 @@ static struct regval_list ov5642_default_regs_finalise[] = {
602}; 602};
603 603
604struct ov5642_datafmt { 604struct ov5642_datafmt {
605 enum v4l2_mbus_pixelcode code; 605 u32 code;
606 enum v4l2_colorspace colorspace; 606 enum v4l2_colorspace colorspace;
607}; 607};
608 608
@@ -618,7 +618,7 @@ struct ov5642 {
618}; 618};
619 619
620static const struct ov5642_datafmt ov5642_colour_fmts[] = { 620static const struct ov5642_datafmt ov5642_colour_fmts[] = {
621 {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, 621 {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
622}; 622};
623 623
624static struct ov5642 *to_ov5642(const struct i2c_client *client) 624static struct ov5642 *to_ov5642(const struct i2c_client *client)
@@ -628,7 +628,7 @@ static struct ov5642 *to_ov5642(const struct i2c_client *client)
628 628
629/* Find a data format by a pixel code in an array */ 629/* Find a data format by a pixel code in an array */
630static const struct ov5642_datafmt 630static const struct ov5642_datafmt
631 *ov5642_find_datafmt(enum v4l2_mbus_pixelcode code) 631 *ov5642_find_datafmt(u32 code)
632{ 632{
633 int i; 633 int i;
634 634
@@ -840,7 +840,7 @@ static int ov5642_g_fmt(struct v4l2_subdev *sd,
840} 840}
841 841
842static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 842static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
843 enum v4l2_mbus_pixelcode *code) 843 u32 *code)
844{ 844{
845 if (index >= ARRAY_SIZE(ov5642_colour_fmts)) 845 if (index >= ARRAY_SIZE(ov5642_colour_fmts))
846 return -EINVAL; 846 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index ab01598ec83f..f4eef2fa6f6f 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -202,18 +202,18 @@ struct ov6650 {
202 unsigned long pclk_limit; /* from host */ 202 unsigned long pclk_limit; /* from host */
203 unsigned long pclk_max; /* from resolution and format */ 203 unsigned long pclk_max; /* from resolution and format */
204 struct v4l2_fract tpf; /* as requested with s_parm */ 204 struct v4l2_fract tpf; /* as requested with s_parm */
205 enum v4l2_mbus_pixelcode code; 205 u32 code;
206 enum v4l2_colorspace colorspace; 206 enum v4l2_colorspace colorspace;
207}; 207};
208 208
209 209
210static enum v4l2_mbus_pixelcode ov6650_codes[] = { 210static u32 ov6650_codes[] = {
211 V4L2_MBUS_FMT_YUYV8_2X8, 211 MEDIA_BUS_FMT_YUYV8_2X8,
212 V4L2_MBUS_FMT_UYVY8_2X8, 212 MEDIA_BUS_FMT_UYVY8_2X8,
213 V4L2_MBUS_FMT_YVYU8_2X8, 213 MEDIA_BUS_FMT_YVYU8_2X8,
214 V4L2_MBUS_FMT_VYUY8_2X8, 214 MEDIA_BUS_FMT_VYUY8_2X8,
215 V4L2_MBUS_FMT_SBGGR8_1X8, 215 MEDIA_BUS_FMT_SBGGR8_1X8,
216 V4L2_MBUS_FMT_Y8_1X8, 216 MEDIA_BUS_FMT_Y8_1X8,
217}; 217};
218 218
219/* read a register */ 219/* read a register */
@@ -555,29 +555,29 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
555 .height = mf->height << half_scale, 555 .height = mf->height << half_scale,
556 }, 556 },
557 }; 557 };
558 enum v4l2_mbus_pixelcode code = mf->code; 558 u32 code = mf->code;
559 unsigned long mclk, pclk; 559 unsigned long mclk, pclk;
560 u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc; 560 u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
561 int ret; 561 int ret;
562 562
563 /* select color matrix configuration for given color encoding */ 563 /* select color matrix configuration for given color encoding */
564 switch (code) { 564 switch (code) {
565 case V4L2_MBUS_FMT_Y8_1X8: 565 case MEDIA_BUS_FMT_Y8_1X8:
566 dev_dbg(&client->dev, "pixel format GREY8_1X8\n"); 566 dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
567 coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP; 567 coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
568 coma_set |= COMA_BW; 568 coma_set |= COMA_BW;
569 break; 569 break;
570 case V4L2_MBUS_FMT_YUYV8_2X8: 570 case MEDIA_BUS_FMT_YUYV8_2X8:
571 dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n"); 571 dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
572 coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP; 572 coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
573 coma_set |= COMA_WORD_SWAP; 573 coma_set |= COMA_WORD_SWAP;
574 break; 574 break;
575 case V4L2_MBUS_FMT_YVYU8_2X8: 575 case MEDIA_BUS_FMT_YVYU8_2X8:
576 dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n"); 576 dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
577 coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP | 577 coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
578 COMA_BYTE_SWAP; 578 COMA_BYTE_SWAP;
579 break; 579 break;
580 case V4L2_MBUS_FMT_UYVY8_2X8: 580 case MEDIA_BUS_FMT_UYVY8_2X8:
581 dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n"); 581 dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
582 if (half_scale) { 582 if (half_scale) {
583 coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP; 583 coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
@@ -587,7 +587,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
587 coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP; 587 coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
588 } 588 }
589 break; 589 break;
590 case V4L2_MBUS_FMT_VYUY8_2X8: 590 case MEDIA_BUS_FMT_VYUY8_2X8:
591 dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n"); 591 dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
592 if (half_scale) { 592 if (half_scale) {
593 coma_mask |= COMA_RGB | COMA_BW; 593 coma_mask |= COMA_RGB | COMA_BW;
@@ -597,7 +597,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
597 coma_set |= COMA_BYTE_SWAP; 597 coma_set |= COMA_BYTE_SWAP;
598 } 598 }
599 break; 599 break;
600 case V4L2_MBUS_FMT_SBGGR8_1X8: 600 case MEDIA_BUS_FMT_SBGGR8_1X8:
601 dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n"); 601 dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
602 coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; 602 coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
603 coma_set |= COMA_RAW_RGB | COMA_RGB; 603 coma_set |= COMA_RAW_RGB | COMA_RGB;
@@ -608,8 +608,8 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
608 } 608 }
609 priv->code = code; 609 priv->code = code;
610 610
611 if (code == V4L2_MBUS_FMT_Y8_1X8 || 611 if (code == MEDIA_BUS_FMT_Y8_1X8 ||
612 code == V4L2_MBUS_FMT_SBGGR8_1X8) { 612 code == MEDIA_BUS_FMT_SBGGR8_1X8) {
613 coml_mask = COML_ONE_CHANNEL; 613 coml_mask = COML_ONE_CHANNEL;
614 coml_set = 0; 614 coml_set = 0;
615 priv->pclk_max = 4000000; 615 priv->pclk_max = 4000000;
@@ -619,7 +619,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
619 priv->pclk_max = 8000000; 619 priv->pclk_max = 8000000;
620 } 620 }
621 621
622 if (code == V4L2_MBUS_FMT_SBGGR8_1X8) 622 if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
623 priv->colorspace = V4L2_COLORSPACE_SRGB; 623 priv->colorspace = V4L2_COLORSPACE_SRGB;
624 else if (code != 0) 624 else if (code != 0)
625 priv->colorspace = V4L2_COLORSPACE_JPEG; 625 priv->colorspace = V4L2_COLORSPACE_JPEG;
@@ -697,18 +697,18 @@ static int ov6650_try_fmt(struct v4l2_subdev *sd,
697 mf->field = V4L2_FIELD_NONE; 697 mf->field = V4L2_FIELD_NONE;
698 698
699 switch (mf->code) { 699 switch (mf->code) {
700 case V4L2_MBUS_FMT_Y10_1X10: 700 case MEDIA_BUS_FMT_Y10_1X10:
701 mf->code = V4L2_MBUS_FMT_Y8_1X8; 701 mf->code = MEDIA_BUS_FMT_Y8_1X8;
702 case V4L2_MBUS_FMT_Y8_1X8: 702 case MEDIA_BUS_FMT_Y8_1X8:
703 case V4L2_MBUS_FMT_YVYU8_2X8: 703 case MEDIA_BUS_FMT_YVYU8_2X8:
704 case V4L2_MBUS_FMT_YUYV8_2X8: 704 case MEDIA_BUS_FMT_YUYV8_2X8:
705 case V4L2_MBUS_FMT_VYUY8_2X8: 705 case MEDIA_BUS_FMT_VYUY8_2X8:
706 case V4L2_MBUS_FMT_UYVY8_2X8: 706 case MEDIA_BUS_FMT_UYVY8_2X8:
707 mf->colorspace = V4L2_COLORSPACE_JPEG; 707 mf->colorspace = V4L2_COLORSPACE_JPEG;
708 break; 708 break;
709 default: 709 default:
710 mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; 710 mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
711 case V4L2_MBUS_FMT_SBGGR8_1X8: 711 case MEDIA_BUS_FMT_SBGGR8_1X8:
712 mf->colorspace = V4L2_COLORSPACE_SRGB; 712 mf->colorspace = V4L2_COLORSPACE_SRGB;
713 break; 713 break;
714 } 714 }
@@ -717,7 +717,7 @@ static int ov6650_try_fmt(struct v4l2_subdev *sd,
717} 717}
718 718
719static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 719static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
720 enum v4l2_mbus_pixelcode *code) 720 u32 *code)
721{ 721{
722 if (index >= ARRAY_SIZE(ov6650_codes)) 722 if (index >= ARRAY_SIZE(ov6650_codes))
723 return -EINVAL; 723 return -EINVAL;
@@ -1013,7 +1013,7 @@ static int ov6650_probe(struct i2c_client *client,
1013 priv->rect.width = W_CIF; 1013 priv->rect.width = W_CIF;
1014 priv->rect.height = H_CIF; 1014 priv->rect.height = H_CIF;
1015 priv->half_scale = false; 1015 priv->half_scale = false;
1016 priv->code = V4L2_MBUS_FMT_YUYV8_2X8; 1016 priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
1017 priv->colorspace = V4L2_COLORSPACE_JPEG; 1017 priv->colorspace = V4L2_COLORSPACE_JPEG;
1018 1018
1019 priv->clk = v4l2_clk_get(&client->dev, "mclk"); 1019 priv->clk = v4l2_clk_get(&client->dev, "mclk");
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 970a04e1e56e..8daac88b33fe 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -376,7 +376,7 @@
376 */ 376 */
377 377
378struct ov772x_color_format { 378struct ov772x_color_format {
379 enum v4l2_mbus_pixelcode code; 379 u32 code;
380 enum v4l2_colorspace colorspace; 380 enum v4l2_colorspace colorspace;
381 u8 dsp3; 381 u8 dsp3;
382 u8 dsp4; 382 u8 dsp4;
@@ -408,7 +408,7 @@ struct ov772x_priv {
408 */ 408 */
409static const struct ov772x_color_format ov772x_cfmts[] = { 409static const struct ov772x_color_format ov772x_cfmts[] = {
410 { 410 {
411 .code = V4L2_MBUS_FMT_YUYV8_2X8, 411 .code = MEDIA_BUS_FMT_YUYV8_2X8,
412 .colorspace = V4L2_COLORSPACE_JPEG, 412 .colorspace = V4L2_COLORSPACE_JPEG,
413 .dsp3 = 0x0, 413 .dsp3 = 0x0,
414 .dsp4 = DSP_OFMT_YUV, 414 .dsp4 = DSP_OFMT_YUV,
@@ -416,7 +416,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
416 .com7 = OFMT_YUV, 416 .com7 = OFMT_YUV,
417 }, 417 },
418 { 418 {
419 .code = V4L2_MBUS_FMT_YVYU8_2X8, 419 .code = MEDIA_BUS_FMT_YVYU8_2X8,
420 .colorspace = V4L2_COLORSPACE_JPEG, 420 .colorspace = V4L2_COLORSPACE_JPEG,
421 .dsp3 = UV_ON, 421 .dsp3 = UV_ON,
422 .dsp4 = DSP_OFMT_YUV, 422 .dsp4 = DSP_OFMT_YUV,
@@ -424,7 +424,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
424 .com7 = OFMT_YUV, 424 .com7 = OFMT_YUV,
425 }, 425 },
426 { 426 {
427 .code = V4L2_MBUS_FMT_UYVY8_2X8, 427 .code = MEDIA_BUS_FMT_UYVY8_2X8,
428 .colorspace = V4L2_COLORSPACE_JPEG, 428 .colorspace = V4L2_COLORSPACE_JPEG,
429 .dsp3 = 0x0, 429 .dsp3 = 0x0,
430 .dsp4 = DSP_OFMT_YUV, 430 .dsp4 = DSP_OFMT_YUV,
@@ -432,7 +432,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
432 .com7 = OFMT_YUV, 432 .com7 = OFMT_YUV,
433 }, 433 },
434 { 434 {
435 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, 435 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
436 .colorspace = V4L2_COLORSPACE_SRGB, 436 .colorspace = V4L2_COLORSPACE_SRGB,
437 .dsp3 = 0x0, 437 .dsp3 = 0x0,
438 .dsp4 = DSP_OFMT_YUV, 438 .dsp4 = DSP_OFMT_YUV,
@@ -440,7 +440,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
440 .com7 = FMT_RGB555 | OFMT_RGB, 440 .com7 = FMT_RGB555 | OFMT_RGB,
441 }, 441 },
442 { 442 {
443 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, 443 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
444 .colorspace = V4L2_COLORSPACE_SRGB, 444 .colorspace = V4L2_COLORSPACE_SRGB,
445 .dsp3 = 0x0, 445 .dsp3 = 0x0,
446 .dsp4 = DSP_OFMT_YUV, 446 .dsp4 = DSP_OFMT_YUV,
@@ -448,7 +448,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
448 .com7 = FMT_RGB555 | OFMT_RGB, 448 .com7 = FMT_RGB555 | OFMT_RGB,
449 }, 449 },
450 { 450 {
451 .code = V4L2_MBUS_FMT_RGB565_2X8_LE, 451 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
452 .colorspace = V4L2_COLORSPACE_SRGB, 452 .colorspace = V4L2_COLORSPACE_SRGB,
453 .dsp3 = 0x0, 453 .dsp3 = 0x0,
454 .dsp4 = DSP_OFMT_YUV, 454 .dsp4 = DSP_OFMT_YUV,
@@ -456,7 +456,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
456 .com7 = FMT_RGB565 | OFMT_RGB, 456 .com7 = FMT_RGB565 | OFMT_RGB,
457 }, 457 },
458 { 458 {
459 .code = V4L2_MBUS_FMT_RGB565_2X8_BE, 459 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
460 .colorspace = V4L2_COLORSPACE_SRGB, 460 .colorspace = V4L2_COLORSPACE_SRGB,
461 .dsp3 = 0x0, 461 .dsp3 = 0x0,
462 .dsp4 = DSP_OFMT_YUV, 462 .dsp4 = DSP_OFMT_YUV,
@@ -468,7 +468,7 @@ static const struct ov772x_color_format ov772x_cfmts[] = {
468 * regardless of the COM7 value. We can thus only support 10-bit 468 * regardless of the COM7 value. We can thus only support 10-bit
469 * Bayer until someone figures it out. 469 * Bayer until someone figures it out.
470 */ 470 */
471 .code = V4L2_MBUS_FMT_SBGGR10_1X10, 471 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
472 .colorspace = V4L2_COLORSPACE_SRGB, 472 .colorspace = V4L2_COLORSPACE_SRGB,
473 .dsp3 = 0x0, 473 .dsp3 = 0x0,
474 .dsp4 = DSP_OFMT_RAW10, 474 .dsp4 = DSP_OFMT_RAW10,
@@ -990,7 +990,7 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
990}; 990};
991 991
992static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 992static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
993 enum v4l2_mbus_pixelcode *code) 993 u32 *code)
994{ 994{
995 if (index >= ARRAY_SIZE(ov772x_cfmts)) 995 if (index >= ARRAY_SIZE(ov772x_cfmts))
996 return -EINVAL; 996 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index bc74224503e7..aa93d2e88572 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -159,10 +159,10 @@ static const struct ov9640_reg ov9640_regs_rgb[] = {
159 { OV9640_MTXS, 0x65 }, 159 { OV9640_MTXS, 0x65 },
160}; 160};
161 161
162static enum v4l2_mbus_pixelcode ov9640_codes[] = { 162static u32 ov9640_codes[] = {
163 V4L2_MBUS_FMT_UYVY8_2X8, 163 MEDIA_BUS_FMT_UYVY8_2X8,
164 V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, 164 MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
165 V4L2_MBUS_FMT_RGB565_2X8_LE, 165 MEDIA_BUS_FMT_RGB565_2X8_LE,
166}; 166};
167 167
168/* read a register */ 168/* read a register */
@@ -351,22 +351,22 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
351} 351}
352 352
353/* Prepare necessary register changes depending on color encoding */ 353/* Prepare necessary register changes depending on color encoding */
354static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code, 354static void ov9640_alter_regs(u32 code,
355 struct ov9640_reg_alt *alt) 355 struct ov9640_reg_alt *alt)
356{ 356{
357 switch (code) { 357 switch (code) {
358 default: 358 default:
359 case V4L2_MBUS_FMT_UYVY8_2X8: 359 case MEDIA_BUS_FMT_UYVY8_2X8:
360 alt->com12 = OV9640_COM12_YUV_AVG; 360 alt->com12 = OV9640_COM12_YUV_AVG;
361 alt->com13 = OV9640_COM13_Y_DELAY_EN | 361 alt->com13 = OV9640_COM13_Y_DELAY_EN |
362 OV9640_COM13_YUV_DLY(0x01); 362 OV9640_COM13_YUV_DLY(0x01);
363 break; 363 break;
364 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 364 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
365 alt->com7 = OV9640_COM7_RGB; 365 alt->com7 = OV9640_COM7_RGB;
366 alt->com13 = OV9640_COM13_RGB_AVG; 366 alt->com13 = OV9640_COM13_RGB_AVG;
367 alt->com15 = OV9640_COM15_RGB_555; 367 alt->com15 = OV9640_COM15_RGB_555;
368 break; 368 break;
369 case V4L2_MBUS_FMT_RGB565_2X8_LE: 369 case MEDIA_BUS_FMT_RGB565_2X8_LE:
370 alt->com7 = OV9640_COM7_RGB; 370 alt->com7 = OV9640_COM7_RGB;
371 alt->com13 = OV9640_COM13_RGB_AVG; 371 alt->com13 = OV9640_COM13_RGB_AVG;
372 alt->com15 = OV9640_COM15_RGB_565; 372 alt->com15 = OV9640_COM15_RGB_565;
@@ -376,7 +376,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
376 376
377/* Setup registers according to resolution and color encoding */ 377/* Setup registers according to resolution and color encoding */
378static int ov9640_write_regs(struct i2c_client *client, u32 width, 378static int ov9640_write_regs(struct i2c_client *client, u32 width,
379 enum v4l2_mbus_pixelcode code, struct ov9640_reg_alt *alts) 379 u32 code, struct ov9640_reg_alt *alts)
380{ 380{
381 const struct ov9640_reg *ov9640_regs, *matrix_regs; 381 const struct ov9640_reg *ov9640_regs, *matrix_regs;
382 int ov9640_regs_len, matrix_regs_len; 382 int ov9640_regs_len, matrix_regs_len;
@@ -419,7 +419,7 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width,
419 } 419 }
420 420
421 /* select color matrix configuration for given color encoding */ 421 /* select color matrix configuration for given color encoding */
422 if (code == V4L2_MBUS_FMT_UYVY8_2X8) { 422 if (code == MEDIA_BUS_FMT_UYVY8_2X8) {
423 matrix_regs = ov9640_regs_yuv; 423 matrix_regs = ov9640_regs_yuv;
424 matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv); 424 matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv);
425 } else { 425 } else {
@@ -487,7 +487,7 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
487 struct i2c_client *client = v4l2_get_subdevdata(sd); 487 struct i2c_client *client = v4l2_get_subdevdata(sd);
488 struct ov9640_reg_alt alts = {0}; 488 struct ov9640_reg_alt alts = {0};
489 enum v4l2_colorspace cspace; 489 enum v4l2_colorspace cspace;
490 enum v4l2_mbus_pixelcode code = mf->code; 490 u32 code = mf->code;
491 int ret; 491 int ret;
492 492
493 ov9640_res_roundup(&mf->width, &mf->height); 493 ov9640_res_roundup(&mf->width, &mf->height);
@@ -500,13 +500,13 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
500 return ret; 500 return ret;
501 501
502 switch (code) { 502 switch (code) {
503 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 503 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
504 case V4L2_MBUS_FMT_RGB565_2X8_LE: 504 case MEDIA_BUS_FMT_RGB565_2X8_LE:
505 cspace = V4L2_COLORSPACE_SRGB; 505 cspace = V4L2_COLORSPACE_SRGB;
506 break; 506 break;
507 default: 507 default:
508 code = V4L2_MBUS_FMT_UYVY8_2X8; 508 code = MEDIA_BUS_FMT_UYVY8_2X8;
509 case V4L2_MBUS_FMT_UYVY8_2X8: 509 case MEDIA_BUS_FMT_UYVY8_2X8:
510 cspace = V4L2_COLORSPACE_JPEG; 510 cspace = V4L2_COLORSPACE_JPEG;
511 } 511 }
512 512
@@ -527,13 +527,13 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd,
527 mf->field = V4L2_FIELD_NONE; 527 mf->field = V4L2_FIELD_NONE;
528 528
529 switch (mf->code) { 529 switch (mf->code) {
530 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 530 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
531 case V4L2_MBUS_FMT_RGB565_2X8_LE: 531 case MEDIA_BUS_FMT_RGB565_2X8_LE:
532 mf->colorspace = V4L2_COLORSPACE_SRGB; 532 mf->colorspace = V4L2_COLORSPACE_SRGB;
533 break; 533 break;
534 default: 534 default:
535 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 535 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
536 case V4L2_MBUS_FMT_UYVY8_2X8: 536 case MEDIA_BUS_FMT_UYVY8_2X8:
537 mf->colorspace = V4L2_COLORSPACE_JPEG; 537 mf->colorspace = V4L2_COLORSPACE_JPEG;
538 } 538 }
539 539
@@ -541,7 +541,7 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd,
541} 541}
542 542
543static int ov9640_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 543static int ov9640_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
544 enum v4l2_mbus_pixelcode *code) 544 u32 *code)
545{ 545{
546 if (index >= ARRAY_SIZE(ov9640_codes)) 546 if (index >= ARRAY_SIZE(ov9640_codes))
547 return -EINVAL; 547 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index ee9eb635d540..841dc55457cf 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -392,8 +392,8 @@ static const struct ov9740_reg ov9740_defaults[] = {
392 { OV9740_ISP_CTRL19, 0x02 }, 392 { OV9740_ISP_CTRL19, 0x02 },
393}; 393};
394 394
395static enum v4l2_mbus_pixelcode ov9740_codes[] = { 395static u32 ov9740_codes[] = {
396 V4L2_MBUS_FMT_YUYV8_2X8, 396 MEDIA_BUS_FMT_YUYV8_2X8,
397}; 397};
398 398
399/* read a register */ 399/* read a register */
@@ -674,13 +674,13 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
674 struct i2c_client *client = v4l2_get_subdevdata(sd); 674 struct i2c_client *client = v4l2_get_subdevdata(sd);
675 struct ov9740_priv *priv = to_ov9740(sd); 675 struct ov9740_priv *priv = to_ov9740(sd);
676 enum v4l2_colorspace cspace; 676 enum v4l2_colorspace cspace;
677 enum v4l2_mbus_pixelcode code = mf->code; 677 u32 code = mf->code;
678 int ret; 678 int ret;
679 679
680 ov9740_res_roundup(&mf->width, &mf->height); 680 ov9740_res_roundup(&mf->width, &mf->height);
681 681
682 switch (code) { 682 switch (code) {
683 case V4L2_MBUS_FMT_YUYV8_2X8: 683 case MEDIA_BUS_FMT_YUYV8_2X8:
684 cspace = V4L2_COLORSPACE_SRGB; 684 cspace = V4L2_COLORSPACE_SRGB;
685 break; 685 break;
686 default: 686 default:
@@ -710,14 +710,14 @@ static int ov9740_try_fmt(struct v4l2_subdev *sd,
710 ov9740_res_roundup(&mf->width, &mf->height); 710 ov9740_res_roundup(&mf->width, &mf->height);
711 711
712 mf->field = V4L2_FIELD_NONE; 712 mf->field = V4L2_FIELD_NONE;
713 mf->code = V4L2_MBUS_FMT_YUYV8_2X8; 713 mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
714 mf->colorspace = V4L2_COLORSPACE_SRGB; 714 mf->colorspace = V4L2_COLORSPACE_SRGB;
715 715
716 return 0; 716 return 0;
717} 717}
718 718
719static int ov9740_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 719static int ov9740_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
720 enum v4l2_mbus_pixelcode *code) 720 u32 *code)
721{ 721{
722 if (index >= ARRAY_SIZE(ov9740_codes)) 722 if (index >= ARRAY_SIZE(ov9740_codes))
723 return -EINVAL; 723 return -EINVAL;
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index 7e6d97847874..1752428c43c5 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -111,13 +111,13 @@
111 111
112/* RJ54N1CB0C has only one fixed colorspace per pixelcode */ 112/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
113struct rj54n1_datafmt { 113struct rj54n1_datafmt {
114 enum v4l2_mbus_pixelcode code; 114 u32 code;
115 enum v4l2_colorspace colorspace; 115 enum v4l2_colorspace colorspace;
116}; 116};
117 117
118/* Find a data format by a pixel code in an array */ 118/* Find a data format by a pixel code in an array */
119static const struct rj54n1_datafmt *rj54n1_find_datafmt( 119static const struct rj54n1_datafmt *rj54n1_find_datafmt(
120 enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt, 120 u32 code, const struct rj54n1_datafmt *fmt,
121 int n) 121 int n)
122{ 122{
123 int i; 123 int i;
@@ -129,15 +129,15 @@ static const struct rj54n1_datafmt *rj54n1_find_datafmt(
129} 129}
130 130
131static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { 131static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
132 {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, 132 {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
133 {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, 133 {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
134 {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, 134 {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
135 {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, 135 {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
136 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, 136 {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
137 {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB}, 137 {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
138 {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, 138 {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
139 {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB}, 139 {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
140 {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, 140 {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
141}; 141};
142 142
143struct rj54n1_clock_div { 143struct rj54n1_clock_div {
@@ -486,7 +486,7 @@ static int reg_write_multiple(struct i2c_client *client,
486} 486}
487 487
488static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 488static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
489 enum v4l2_mbus_pixelcode *code) 489 u32 *code)
490{ 490{
491 if (index >= ARRAY_SIZE(rj54n1_colour_fmts)) 491 if (index >= ARRAY_SIZE(rj54n1_colour_fmts))
492 return -EINVAL; 492 return -EINVAL;
@@ -965,11 +965,11 @@ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
965 struct i2c_client *client = v4l2_get_subdevdata(sd); 965 struct i2c_client *client = v4l2_get_subdevdata(sd);
966 struct rj54n1 *rj54n1 = to_rj54n1(client); 966 struct rj54n1 *rj54n1 = to_rj54n1(client);
967 const struct rj54n1_datafmt *fmt; 967 const struct rj54n1_datafmt *fmt;
968 int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 || 968 int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 ||
969 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE || 969 mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE ||
970 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE || 970 mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE ||
971 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE || 971 mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE ||
972 mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE; 972 mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE;
973 973
974 dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", 974 dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
975 __func__, mf->code, mf->width, mf->height); 975 __func__, mf->code, mf->width, mf->height);
@@ -1025,55 +1025,55 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
1025 1025
1026 /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ 1026 /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
1027 switch (mf->code) { 1027 switch (mf->code) {
1028 case V4L2_MBUS_FMT_YUYV8_2X8: 1028 case MEDIA_BUS_FMT_YUYV8_2X8:
1029 ret = reg_write(client, RJ54N1_OUT_SEL, 0); 1029 ret = reg_write(client, RJ54N1_OUT_SEL, 0);
1030 if (!ret) 1030 if (!ret)
1031 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1031 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1032 break; 1032 break;
1033 case V4L2_MBUS_FMT_YVYU8_2X8: 1033 case MEDIA_BUS_FMT_YVYU8_2X8:
1034 ret = reg_write(client, RJ54N1_OUT_SEL, 0); 1034 ret = reg_write(client, RJ54N1_OUT_SEL, 0);
1035 if (!ret) 1035 if (!ret)
1036 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); 1036 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1037 break; 1037 break;
1038 case V4L2_MBUS_FMT_RGB565_2X8_LE: 1038 case MEDIA_BUS_FMT_RGB565_2X8_LE:
1039 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); 1039 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
1040 if (!ret) 1040 if (!ret)
1041 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1041 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1042 break; 1042 break;
1043 case V4L2_MBUS_FMT_RGB565_2X8_BE: 1043 case MEDIA_BUS_FMT_RGB565_2X8_BE:
1044 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); 1044 ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
1045 if (!ret) 1045 if (!ret)
1046 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); 1046 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1047 break; 1047 break;
1048 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: 1048 case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE:
1049 ret = reg_write(client, RJ54N1_OUT_SEL, 4); 1049 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1050 if (!ret) 1050 if (!ret)
1051 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1051 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1052 if (!ret) 1052 if (!ret)
1053 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); 1053 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
1054 break; 1054 break;
1055 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: 1055 case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE:
1056 ret = reg_write(client, RJ54N1_OUT_SEL, 4); 1056 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1057 if (!ret) 1057 if (!ret)
1058 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); 1058 ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
1059 if (!ret) 1059 if (!ret)
1060 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); 1060 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
1061 break; 1061 break;
1062 case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: 1062 case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE:
1063 ret = reg_write(client, RJ54N1_OUT_SEL, 4); 1063 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1064 if (!ret) 1064 if (!ret)
1065 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); 1065 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1066 if (!ret) 1066 if (!ret)
1067 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); 1067 ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
1068 break; 1068 break;
1069 case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: 1069 case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE:
1070 ret = reg_write(client, RJ54N1_OUT_SEL, 4); 1070 ret = reg_write(client, RJ54N1_OUT_SEL, 4);
1071 if (!ret) 1071 if (!ret)
1072 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); 1072 ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
1073 if (!ret) 1073 if (!ret)
1074 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); 1074 ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
1075 break; 1075 break;
1076 case V4L2_MBUS_FMT_SBGGR10_1X10: 1076 case MEDIA_BUS_FMT_SBGGR10_1X10:
1077 ret = reg_write(client, RJ54N1_OUT_SEL, 5); 1077 ret = reg_write(client, RJ54N1_OUT_SEL, 5);
1078 break; 1078 break;
1079 default: 1079 default:
@@ -1083,7 +1083,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
1083 /* Special case: a raw mode with 10 bits of data per clock tick */ 1083 /* Special case: a raw mode with 10 bits of data per clock tick */
1084 if (!ret) 1084 if (!ret)
1085 ret = reg_set(client, RJ54N1_OCLK_SEL_EN, 1085 ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
1086 (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2); 1086 (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2);
1087 1087
1088 if (ret < 0) 1088 if (ret < 0)
1089 return ret; 1089 return ret;
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 416402eb4f82..9b853215d146 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -705,7 +705,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
705 705
706 mf->width = priv->scale->width; 706 mf->width = priv->scale->width;
707 mf->height = priv->scale->height; 707 mf->height = priv->scale->height;
708 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 708 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
709 mf->colorspace = V4L2_COLORSPACE_JPEG; 709 mf->colorspace = V4L2_COLORSPACE_JPEG;
710 mf->field = V4L2_FIELD_INTERLACED_BT; 710 mf->field = V4L2_FIELD_INTERLACED_BT;
711 711
@@ -724,7 +724,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
724 /* 724 /*
725 * check color format 725 * check color format
726 */ 726 */
727 if (mf->code != V4L2_MBUS_FMT_UYVY8_2X8) 727 if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8)
728 return -EINVAL; 728 return -EINVAL;
729 729
730 mf->colorspace = V4L2_COLORSPACE_JPEG; 730 mf->colorspace = V4L2_COLORSPACE_JPEG;
@@ -751,7 +751,7 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd,
751 return -EINVAL; 751 return -EINVAL;
752 } 752 }
753 753
754 mf->code = V4L2_MBUS_FMT_UYVY8_2X8; 754 mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
755 mf->colorspace = V4L2_COLORSPACE_JPEG; 755 mf->colorspace = V4L2_COLORSPACE_JPEG;
756 756
757 /* 757 /*
@@ -822,12 +822,12 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
822}; 822};
823 823
824static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 824static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
825 enum v4l2_mbus_pixelcode *code) 825 u32 *code)
826{ 826{
827 if (index) 827 if (index)
828 return -EINVAL; 828 return -EINVAL;
829 829
830 *code = V4L2_MBUS_FMT_UYVY8_2X8; 830 *code = MEDIA_BUS_FMT_UYVY8_2X8;
831 return 0; 831 return 0;
832} 832}
833 833
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 118f8ee88465..10c735c3a082 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -165,7 +165,7 @@ struct sr030pc30_info {
165}; 165};
166 166
167struct sr030pc30_format { 167struct sr030pc30_format {
168 enum v4l2_mbus_pixelcode code; 168 u32 code;
169 enum v4l2_colorspace colorspace; 169 enum v4l2_colorspace colorspace;
170 u16 ispctl1_reg; 170 u16 ispctl1_reg;
171}; 171};
@@ -201,23 +201,23 @@ static const struct sr030pc30_frmsize sr030pc30_sizes[] = {
201/* supported pixel formats */ 201/* supported pixel formats */
202static const struct sr030pc30_format sr030pc30_formats[] = { 202static const struct sr030pc30_format sr030pc30_formats[] = {
203 { 203 {
204 .code = V4L2_MBUS_FMT_YUYV8_2X8, 204 .code = MEDIA_BUS_FMT_YUYV8_2X8,
205 .colorspace = V4L2_COLORSPACE_JPEG, 205 .colorspace = V4L2_COLORSPACE_JPEG,
206 .ispctl1_reg = 0x03, 206 .ispctl1_reg = 0x03,
207 }, { 207 }, {
208 .code = V4L2_MBUS_FMT_YVYU8_2X8, 208 .code = MEDIA_BUS_FMT_YVYU8_2X8,
209 .colorspace = V4L2_COLORSPACE_JPEG, 209 .colorspace = V4L2_COLORSPACE_JPEG,
210 .ispctl1_reg = 0x02, 210 .ispctl1_reg = 0x02,
211 }, { 211 }, {
212 .code = V4L2_MBUS_FMT_VYUY8_2X8, 212 .code = MEDIA_BUS_FMT_VYUY8_2X8,
213 .colorspace = V4L2_COLORSPACE_JPEG, 213 .colorspace = V4L2_COLORSPACE_JPEG,
214 .ispctl1_reg = 0, 214 .ispctl1_reg = 0,
215 }, { 215 }, {
216 .code = V4L2_MBUS_FMT_UYVY8_2X8, 216 .code = MEDIA_BUS_FMT_UYVY8_2X8,
217 .colorspace = V4L2_COLORSPACE_JPEG, 217 .colorspace = V4L2_COLORSPACE_JPEG,
218 .ispctl1_reg = 0x01, 218 .ispctl1_reg = 0x01,
219 }, { 219 }, {
220 .code = V4L2_MBUS_FMT_RGB565_2X8_BE, 220 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
221 .colorspace = V4L2_COLORSPACE_JPEG, 221 .colorspace = V4L2_COLORSPACE_JPEG,
222 .ispctl1_reg = 0x40, 222 .ispctl1_reg = 0x40,
223 }, 223 },
@@ -472,7 +472,7 @@ static int sr030pc30_s_ctrl(struct v4l2_ctrl *ctrl)
472} 472}
473 473
474static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 474static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
475 enum v4l2_mbus_pixelcode *code) 475 u32 *code)
476{ 476{
477 if (!code || index >= ARRAY_SIZE(sr030pc30_formats)) 477 if (!code || index >= ARRAY_SIZE(sr030pc30_formats))
478 return -EINVAL; 478 return -EINVAL;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index b9dabc9f4050..204204259ac6 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -756,12 +756,12 @@ static int tvp514x_s_ctrl(struct v4l2_ctrl *ctrl)
756 */ 756 */
757static int 757static int
758tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 758tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
759 enum v4l2_mbus_pixelcode *code) 759 u32 *code)
760{ 760{
761 if (index) 761 if (index)
762 return -EINVAL; 762 return -EINVAL;
763 763
764 *code = V4L2_MBUS_FMT_YUYV10_2X10; 764 *code = MEDIA_BUS_FMT_YUYV10_2X10;
765 return 0; 765 return 0;
766} 766}
767 767
@@ -784,7 +784,7 @@ tvp514x_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
784 /* Calculate height and width based on current standard */ 784 /* Calculate height and width based on current standard */
785 current_std = decoder->current_std; 785 current_std = decoder->current_std;
786 786
787 f->code = V4L2_MBUS_FMT_YUYV8_2X8; 787 f->code = MEDIA_BUS_FMT_YUYV8_2X8;
788 f->width = decoder->std_list[current_std].width; 788 f->width = decoder->std_list[current_std].width;
789 f->height = decoder->std_list[current_std].height; 789 f->height = decoder->std_list[current_std].height;
790 f->field = V4L2_FIELD_INTERLACED; 790 f->field = V4L2_FIELD_INTERLACED;
@@ -942,7 +942,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
942 if (index != 0) 942 if (index != 0)
943 return -EINVAL; 943 return -EINVAL;
944 944
945 code->code = V4L2_MBUS_FMT_YUYV8_2X8; 945 code->code = MEDIA_BUS_FMT_YUYV8_2X8;
946 946
947 return 0; 947 return 0;
948} 948}
@@ -967,7 +967,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
967 return 0; 967 return 0;
968 } 968 }
969 969
970 format->format.code = V4L2_MBUS_FMT_YUYV8_2X8; 970 format->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
971 format->format.width = tvp514x_std_list[decoder->current_std].width; 971 format->format.width = tvp514x_std_list[decoder->current_std].width;
972 format->format.height = tvp514x_std_list[decoder->current_std].height; 972 format->format.height = tvp514x_std_list[decoder->current_std].height;
973 format->format.colorspace = V4L2_COLORSPACE_SMPTE170M; 973 format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -991,7 +991,7 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
991 struct tvp514x_decoder *decoder = to_decoder(sd); 991 struct tvp514x_decoder *decoder = to_decoder(sd);
992 992
993 if (fmt->format.field != V4L2_FIELD_INTERLACED || 993 if (fmt->format.field != V4L2_FIELD_INTERLACED ||
994 fmt->format.code != V4L2_MBUS_FMT_YUYV8_2X8 || 994 fmt->format.code != MEDIA_BUS_FMT_YUYV8_2X8 ||
995 fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M || 995 fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M ||
996 fmt->format.width != tvp514x_std_list[decoder->current_std].width || 996 fmt->format.width != tvp514x_std_list[decoder->current_std].width ||
997 fmt->format.height != tvp514x_std_list[decoder->current_std].height) 997 fmt->format.height != tvp514x_std_list[decoder->current_std].height)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 193e7d6c29c8..68cdab9c0903 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -818,12 +818,12 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
818} 818}
819 819
820static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 820static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
821 enum v4l2_mbus_pixelcode *code) 821 u32 *code)
822{ 822{
823 if (index) 823 if (index)
824 return -EINVAL; 824 return -EINVAL;
825 825
826 *code = V4L2_MBUS_FMT_UYVY8_2X8; 826 *code = MEDIA_BUS_FMT_UYVY8_2X8;
827 return 0; 827 return 0;
828} 828}
829 829
@@ -840,7 +840,7 @@ static int tvp5150_mbus_fmt(struct v4l2_subdev *sd,
840 f->width = decoder->rect.width; 840 f->width = decoder->rect.width;
841 f->height = decoder->rect.height; 841 f->height = decoder->rect.height;
842 842
843 f->code = V4L2_MBUS_FMT_UYVY8_2X8; 843 f->code = MEDIA_BUS_FMT_UYVY8_2X8;
844 f->field = V4L2_FIELD_SEQ_TB; 844 f->field = V4L2_FIELD_SEQ_TB;
845 f->colorspace = V4L2_COLORSPACE_SMPTE170M; 845 f->colorspace = V4L2_COLORSPACE_SMPTE170M;
846 846
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 51bac762638b..fe4870e22cfe 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -626,7 +626,7 @@ static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f
626 626
627 f->width = bt->width; 627 f->width = bt->width;
628 f->height = bt->height; 628 f->height = bt->height;
629 f->code = V4L2_MBUS_FMT_YUYV10_1X20; 629 f->code = MEDIA_BUS_FMT_YUYV10_1X20;
630 f->field = device->current_timings->scanmode; 630 f->field = device->current_timings->scanmode;
631 f->colorspace = device->current_timings->color_space; 631 f->colorspace = device->current_timings->color_space;
632 632
@@ -756,12 +756,12 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
756 */ 756 */
757 757
758static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 758static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
759 enum v4l2_mbus_pixelcode *code) 759 u32 *code)
760{ 760{
761 /* Check requested format index is within range */ 761 /* Check requested format index is within range */
762 if (index) 762 if (index)
763 return -EINVAL; 763 return -EINVAL;
764 *code = V4L2_MBUS_FMT_YUYV10_1X20; 764 *code = MEDIA_BUS_FMT_YUYV10_1X20;
765 return 0; 765 return 0;
766} 766}
767 767
@@ -859,7 +859,7 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
859 if (code->index != 0) 859 if (code->index != 0)
860 return -EINVAL; 860 return -EINVAL;
861 861
862 code->code = V4L2_MBUS_FMT_YUYV10_1X20; 862 code->code = MEDIA_BUS_FMT_YUYV10_1X20;
863 863
864 return 0; 864 return 0;
865} 865}
@@ -878,7 +878,7 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
878{ 878{
879 struct tvp7002 *tvp7002 = to_tvp7002(sd); 879 struct tvp7002 *tvp7002 = to_tvp7002(sd);
880 880
881 fmt->format.code = V4L2_MBUS_FMT_YUYV10_1X20; 881 fmt->format.code = MEDIA_BUS_FMT_YUYV10_1X20;
882 fmt->format.width = tvp7002->current_timings->timings.bt.width; 882 fmt->format.width = tvp7002->current_timings->timings.bt.width;
883 fmt->format.height = tvp7002->current_timings->timings.bt.height; 883 fmt->format.height = tvp7002->current_timings->timings.bt.height;
884 fmt->format.field = tvp7002->current_timings->scanmode; 884 fmt->format.field = tvp7002->current_timings->scanmode;
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 373f2df52492..00e7f043977e 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -45,19 +45,19 @@ struct vs6624 {
45}; 45};
46 46
47static const struct vs6624_format { 47static const struct vs6624_format {
48 enum v4l2_mbus_pixelcode mbus_code; 48 u32 mbus_code;
49 enum v4l2_colorspace colorspace; 49 enum v4l2_colorspace colorspace;
50} vs6624_formats[] = { 50} vs6624_formats[] = {
51 { 51 {
52 .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8, 52 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
53 .colorspace = V4L2_COLORSPACE_JPEG, 53 .colorspace = V4L2_COLORSPACE_JPEG,
54 }, 54 },
55 { 55 {
56 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 56 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
57 .colorspace = V4L2_COLORSPACE_JPEG, 57 .colorspace = V4L2_COLORSPACE_JPEG,
58 }, 58 },
59 { 59 {
60 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE, 60 .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
61 .colorspace = V4L2_COLORSPACE_SRGB, 61 .colorspace = V4L2_COLORSPACE_SRGB,
62 }, 62 },
63}; 63};
@@ -65,7 +65,7 @@ static const struct vs6624_format {
65static struct v4l2_mbus_framefmt vs6624_default_fmt = { 65static struct v4l2_mbus_framefmt vs6624_default_fmt = {
66 .width = VGA_WIDTH, 66 .width = VGA_WIDTH,
67 .height = VGA_HEIGHT, 67 .height = VGA_HEIGHT,
68 .code = V4L2_MBUS_FMT_UYVY8_2X8, 68 .code = MEDIA_BUS_FMT_UYVY8_2X8,
69 .field = V4L2_FIELD_NONE, 69 .field = V4L2_FIELD_NONE,
70 .colorspace = V4L2_COLORSPACE_JPEG, 70 .colorspace = V4L2_COLORSPACE_JPEG,
71}; 71};
@@ -558,7 +558,7 @@ static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl)
558} 558}
559 559
560static int vs6624_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, 560static int vs6624_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
561 enum v4l2_mbus_pixelcode *code) 561 u32 *code)
562{ 562{
563 if (index >= ARRAY_SIZE(vs6624_formats)) 563 if (index >= ARRAY_SIZE(vs6624_formats))
564 return -EINVAL; 564 return -EINVAL;
@@ -605,15 +605,15 @@ static int vs6624_s_mbus_fmt(struct v4l2_subdev *sd,
605 605
606 /* set image format */ 606 /* set image format */
607 switch (fmt->code) { 607 switch (fmt->code) {
608 case V4L2_MBUS_FMT_UYVY8_2X8: 608 case MEDIA_BUS_FMT_UYVY8_2X8:
609 vs6624_write(sd, VS6624_IMG_FMT0, 0x0); 609 vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
610 vs6624_write(sd, VS6624_YUV_SETUP, 0x1); 610 vs6624_write(sd, VS6624_YUV_SETUP, 0x1);
611 break; 611 break;
612 case V4L2_MBUS_FMT_YUYV8_2X8: 612 case MEDIA_BUS_FMT_YUYV8_2X8:
613 vs6624_write(sd, VS6624_IMG_FMT0, 0x0); 613 vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
614 vs6624_write(sd, VS6624_YUV_SETUP, 0x3); 614 vs6624_write(sd, VS6624_YUV_SETUP, 0x3);
615 break; 615 break;
616 case V4L2_MBUS_FMT_RGB565_2X8_LE: 616 case MEDIA_BUS_FMT_RGB565_2X8_LE:
617 vs6624_write(sd, VS6624_IMG_FMT0, 0x4); 617 vs6624_write(sd, VS6624_IMG_FMT0, 0x4);
618 vs6624_write(sd, VS6624_RGB_SETUP, 0x0); 618 vs6624_write(sd, VS6624_RGB_SETUP, 0x0);
619 break; 619 break;
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 37c334edc7e8..4d8e01c7b1b2 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -279,8 +279,14 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
279 continue; 279 continue;
280 280
281 ret = entity->ops->link_validate(link); 281 ret = entity->ops->link_validate(link);
282 if (ret < 0 && ret != -ENOIOCTLCMD) 282 if (ret < 0 && ret != -ENOIOCTLCMD) {
283 dev_dbg(entity->parent->dev,
284 "link validation failed for \"%s\":%u -> \"%s\":%u, error %d\n",
285 entity->name, link->source->index,
286 link->sink->entity->name,
287 link->sink->index, ret);
283 goto error; 288 goto error;
289 }
284 } 290 }
285 291
286 /* Either no links or validated links are fine. */ 292 /* Either no links or validated links are fine. */
@@ -288,6 +294,11 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
288 294
289 if (!bitmap_full(active, entity->num_pads)) { 295 if (!bitmap_full(active, entity->num_pads)) {
290 ret = -EPIPE; 296 ret = -EPIPE;
297 dev_dbg(entity->parent->dev,
298 "\"%s\":%u must be connected by an enabled link\n",
299 entity->name,
300 (unsigned)find_first_zero_bit(
301 active, entity->num_pads));
291 goto error; 302 goto error;
292 } 303 }
293 } 304 }
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index f8cec8e8cf82..218144a99016 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -46,6 +46,7 @@ source "drivers/media/pci/pt3/Kconfig"
46source "drivers/media/pci/mantis/Kconfig" 46source "drivers/media/pci/mantis/Kconfig"
47source "drivers/media/pci/ngene/Kconfig" 47source "drivers/media/pci/ngene/Kconfig"
48source "drivers/media/pci/ddbridge/Kconfig" 48source "drivers/media/pci/ddbridge/Kconfig"
49source "drivers/media/pci/smipcie/Kconfig"
49endif 50endif
50 51
51endif #MEDIA_PCI_SUPPORT 52endif #MEDIA_PCI_SUPPORT
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index a12926e4b51f..0baf0d2967ee 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -11,7 +11,8 @@ obj-y += ttpci/ \
11 mantis/ \ 11 mantis/ \
12 ngene/ \ 12 ngene/ \
13 ddbridge/ \ 13 ddbridge/ \
14 saa7146/ 14 saa7146/ \
15 smipcie/
15 16
16obj-$(CONFIG_VIDEO_IVTV) += ivtv/ 17obj-$(CONFIG_VIDEO_IVTV) += ivtv/
17obj-$(CONFIG_VIDEO_ZORAN) += zoran/ 18obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d8ec583c154c..41055606b969 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -3870,10 +3870,10 @@ static void osprey_eeprom(struct bttv *btv, const u8 ee[256])
3870 } else { 3870 } else {
3871 unsigned short type; 3871 unsigned short type;
3872 3872
3873 for (i = 4*16; i < 8*16; i += 16) { 3873 for (i = 4 * 16; i < 8 * 16; i += 16) {
3874 u16 checksum = ip_compute_csum(ee + i, 16); 3874 u16 checksum = (__force u16)ip_compute_csum(ee + i, 16);
3875 3875
3876 if ((checksum&0xff) + (checksum>>8) == 0xff) 3876 if ((checksum & 0xff) + (checksum >> 8) == 0xff)
3877 break; 3877 break;
3878 } 3878 }
3879 if (i >= 8*16) 3879 if (i >= 8*16)
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 82cc47d2e3fa..4d3f05a19af3 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -84,7 +84,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
84 continue; 84 continue;
85 while (offset && offset >= sg_dma_len(sg)) { 85 while (offset && offset >= sg_dma_len(sg)) {
86 offset -= sg_dma_len(sg); 86 offset -= sg_dma_len(sg);
87 sg++; 87 sg = sg_next(sg);
88 } 88 }
89 if (bpl <= sg_dma_len(sg)-offset) { 89 if (bpl <= sg_dma_len(sg)-offset) {
90 /* fits into current chunk */ 90 /* fits into current chunk */
@@ -100,13 +100,13 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
100 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); 100 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
101 todo -= (sg_dma_len(sg)-offset); 101 todo -= (sg_dma_len(sg)-offset);
102 offset = 0; 102 offset = 0;
103 sg++; 103 sg = sg_next(sg);
104 while (todo > sg_dma_len(sg)) { 104 while (todo > sg_dma_len(sg)) {
105 *(rp++)=cpu_to_le32(BT848_RISC_WRITE| 105 *(rp++)=cpu_to_le32(BT848_RISC_WRITE|
106 sg_dma_len(sg)); 106 sg_dma_len(sg));
107 *(rp++)=cpu_to_le32(sg_dma_address(sg)); 107 *(rp++)=cpu_to_le32(sg_dma_address(sg));
108 todo -= sg_dma_len(sg); 108 todo -= sg_dma_len(sg);
109 sg++; 109 sg = sg_next(sg);
110 } 110 }
111 *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL| 111 *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_EOL|
112 todo); 112 todo);
@@ -187,15 +187,15 @@ bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc,
187 /* go to next sg entry if needed */ 187 /* go to next sg entry if needed */
188 while (yoffset && yoffset >= sg_dma_len(ysg)) { 188 while (yoffset && yoffset >= sg_dma_len(ysg)) {
189 yoffset -= sg_dma_len(ysg); 189 yoffset -= sg_dma_len(ysg);
190 ysg++; 190 ysg = sg_next(ysg);
191 } 191 }
192 while (uoffset && uoffset >= sg_dma_len(usg)) { 192 while (uoffset && uoffset >= sg_dma_len(usg)) {
193 uoffset -= sg_dma_len(usg); 193 uoffset -= sg_dma_len(usg);
194 usg++; 194 usg = sg_next(usg);
195 } 195 }
196 while (voffset && voffset >= sg_dma_len(vsg)) { 196 while (voffset && voffset >= sg_dma_len(vsg)) {
197 voffset -= sg_dma_len(vsg); 197 voffset -= sg_dma_len(vsg);
198 vsg++; 198 vsg = sg_next(vsg);
199 } 199 }
200 200
201 /* calculate max number of bytes we can write */ 201 /* calculate max number of bytes we can write */
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 2d3afe0431a9..5a55630d09db 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -490,8 +490,8 @@ void cx18_av_std_setup(struct cx18 *cx)
490 490
491 /* Sets horizontal blanking delay and active lines */ 491 /* Sets horizontal blanking delay and active lines */
492 cx18_av_write(cx, 0x470, hblank); 492 cx18_av_write(cx, 0x470, hblank);
493 cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) | 493 cx18_av_write(cx, 0x471,
494 (hactive << 4))); 494 (((hblank >> 8) & 0x3) | (hactive << 4)) & 0xff);
495 cx18_av_write(cx, 0x472, hactive >> 4); 495 cx18_av_write(cx, 0x472, hactive >> 4);
496 496
497 /* Sets burst gate delay */ 497 /* Sets burst gate delay */
@@ -499,14 +499,14 @@ void cx18_av_std_setup(struct cx18 *cx)
499 499
500 /* Sets vertical blanking delay and active duration */ 500 /* Sets vertical blanking delay and active duration */
501 cx18_av_write(cx, 0x474, vblank); 501 cx18_av_write(cx, 0x474, vblank);
502 cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) | 502 cx18_av_write(cx, 0x475,
503 (vactive << 4))); 503 (((vblank >> 8) & 0x3) | (vactive << 4)) & 0xff);
504 cx18_av_write(cx, 0x476, vactive >> 4); 504 cx18_av_write(cx, 0x476, vactive >> 4);
505 cx18_av_write(cx, 0x477, vblank656); 505 cx18_av_write(cx, 0x477, vblank656);
506 506
507 /* Sets src decimation rate */ 507 /* Sets src decimation rate */
508 cx18_av_write(cx, 0x478, 0xff & src_decimation); 508 cx18_av_write(cx, 0x478, src_decimation & 0xff);
509 cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8)); 509 cx18_av_write(cx, 0x479, (src_decimation >> 8) & 0xff);
510 510
511 /* Sets Luma and UV Low pass filters */ 511 /* Sets Luma and UV Low pass filters */
512 cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); 512 cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
@@ -516,8 +516,8 @@ void cx18_av_std_setup(struct cx18 *cx)
516 516
517 /* Sets SC Step*/ 517 /* Sets SC Step*/
518 cx18_av_write(cx, 0x47c, sc); 518 cx18_av_write(cx, 0x47c, sc);
519 cx18_av_write(cx, 0x47d, 0xff & sc >> 8); 519 cx18_av_write(cx, 0x47d, (sc >> 8) & 0xff);
520 cx18_av_write(cx, 0x47e, 0xff & sc >> 16); 520 cx18_av_write(cx, 0x47e, (sc >> 16) & 0xff);
521 521
522 if (std & V4L2_STD_625_50) { 522 if (std & V4L2_STD_625_50) {
523 state->slicer_line_delay = 1; 523 state->slicer_line_delay = 1;
@@ -952,7 +952,7 @@ static int cx18_av_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
952 int HSC, VSC, Vsrc, Hsrc, filter, Vlines; 952 int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
953 int is_50Hz = !(state->std & V4L2_STD_525_60); 953 int is_50Hz = !(state->std & V4L2_STD_525_60);
954 954
955 if (fmt->code != V4L2_MBUS_FMT_FIXED) 955 if (fmt->code != MEDIA_BUS_FMT_FIXED)
956 return -EINVAL; 956 return -EINVAL;
957 957
958 fmt->field = V4L2_FIELD_INTERLACED; 958 fmt->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index add7391ecaba..f6b921f3b0ac 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -57,7 +57,8 @@
57/* V4L2 capability aliases */ 57/* V4L2 capability aliases */
58#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \ 58#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
59 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | \ 59 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | \
60 V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE) 60 V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE | \
61 V4L2_CAP_SLICED_VBI_CAPTURE)
61 62
62struct cx18_card_video_input { 63struct cx18_card_video_input {
63 u8 video_type; /* video input type */ 64 u8 video_type; /* video input type */
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index 282a3d29fdaa..4aeb7c6b8ce1 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -98,7 +98,7 @@ static int cx18_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
98 /* fix videodecoder resolution */ 98 /* fix videodecoder resolution */
99 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1); 99 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
100 fmt.height = cxhdl->height; 100 fmt.height = cxhdl->height;
101 fmt.code = V4L2_MBUS_FMT_FIXED; 101 fmt.code = MEDIA_BUS_FMT_FIXED;
102 v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &fmt); 102 v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &fmt);
103 return 0; 103 return 0;
104} 104}
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 57f4688ea55b..dcfd7a1d317e 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -379,6 +379,7 @@ struct cx18_stream {
379 const char *name; /* name of the stream */ 379 const char *name; /* name of the stream */
380 int type; /* stream type */ 380 int type; /* stream type */
381 u32 handle; /* task handle */ 381 u32 handle; /* task handle */
382 u32 v4l2_dev_caps; /* device capabilities */
382 unsigned int mdl_base_idx; 383 unsigned int mdl_base_idx;
383 384
384 u32 id; 385 u32 id;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 6f2b59042b73..b8e4b68a9196 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -294,7 +294,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
294 294
295 mbus_fmt.width = cx->cxhdl.width = w; 295 mbus_fmt.width = cx->cxhdl.width = w;
296 mbus_fmt.height = cx->cxhdl.height = h; 296 mbus_fmt.height = cx->cxhdl.height = h;
297 mbus_fmt.code = V4L2_MBUS_FMT_FIXED; 297 mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
298 v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &mbus_fmt); 298 v4l2_subdev_call(cx->sd_av, video, s_mbus_fmt, &mbus_fmt);
299 return cx18_g_fmt_vid_cap(file, fh, fmt); 299 return cx18_g_fmt_vid_cap(file, fh, fmt);
300} 300}
@@ -393,15 +393,16 @@ static int cx18_querycap(struct file *file, void *fh,
393 struct v4l2_capability *vcap) 393 struct v4l2_capability *vcap)
394{ 394{
395 struct cx18_open_id *id = fh2id(fh); 395 struct cx18_open_id *id = fh2id(fh);
396 struct cx18_stream *s = video_drvdata(file);
396 struct cx18 *cx = id->cx; 397 struct cx18 *cx = id->cx;
397 398
398 strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver)); 399 strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
399 strlcpy(vcap->card, cx->card_name, sizeof(vcap->card)); 400 strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
400 snprintf(vcap->bus_info, sizeof(vcap->bus_info), 401 snprintf(vcap->bus_info, sizeof(vcap->bus_info),
401 "PCI:%s", pci_name(cx->pci_dev)); 402 "PCI:%s", pci_name(cx->pci_dev));
402 vcap->capabilities = cx->v4l2_cap; /* capabilities */ 403 vcap->capabilities = cx->v4l2_cap; /* capabilities */
403 if (id->type == CX18_ENC_STREAM_TYPE_YUV) 404 vcap->device_caps = s->v4l2_dev_caps; /* device capabilities */
404 vcap->capabilities |= V4L2_CAP_STREAMING; 405 vcap->capabilities |= V4L2_CAP_DEVICE_CAPS;
405 return 0; 406 return 0;
406} 407}
407 408
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index f3541b5156ce..369445fcf3e5 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -58,11 +58,14 @@ static struct {
58 int vfl_type; 58 int vfl_type;
59 int num_offset; 59 int num_offset;
60 int dma; 60 int dma;
61 u32 caps;
61} cx18_stream_info[] = { 62} cx18_stream_info[] = {
62 { /* CX18_ENC_STREAM_TYPE_MPG */ 63 { /* CX18_ENC_STREAM_TYPE_MPG */
63 "encoder MPEG", 64 "encoder MPEG",
64 VFL_TYPE_GRABBER, 0, 65 VFL_TYPE_GRABBER, 0,
65 PCI_DMA_FROMDEVICE, 66 PCI_DMA_FROMDEVICE,
67 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
68 V4L2_CAP_AUDIO | V4L2_CAP_TUNER
66 }, 69 },
67 { /* CX18_ENC_STREAM_TYPE_TS */ 70 { /* CX18_ENC_STREAM_TYPE_TS */
68 "TS", 71 "TS",
@@ -73,11 +76,15 @@ static struct {
73 "encoder YUV", 76 "encoder YUV",
74 VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET, 77 VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
75 PCI_DMA_FROMDEVICE, 78 PCI_DMA_FROMDEVICE,
79 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
80 V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
76 }, 81 },
77 { /* CX18_ENC_STREAM_TYPE_VBI */ 82 { /* CX18_ENC_STREAM_TYPE_VBI */
78 "encoder VBI", 83 "encoder VBI",
79 VFL_TYPE_VBI, 0, 84 VFL_TYPE_VBI, 0,
80 PCI_DMA_FROMDEVICE, 85 PCI_DMA_FROMDEVICE,
86 V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE |
87 V4L2_CAP_READWRITE | V4L2_CAP_TUNER
81 }, 88 },
82 { /* CX18_ENC_STREAM_TYPE_PCM */ 89 { /* CX18_ENC_STREAM_TYPE_PCM */
83 "encoder PCM audio", 90 "encoder PCM audio",
@@ -93,6 +100,7 @@ static struct {
93 "encoder radio", 100 "encoder radio",
94 VFL_TYPE_RADIO, 0, 101 VFL_TYPE_RADIO, 0,
95 PCI_DMA_NONE, 102 PCI_DMA_NONE,
103 V4L2_CAP_RADIO | V4L2_CAP_TUNER
96 }, 104 },
97}; 105};
98 106
@@ -260,6 +268,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
260 s->handle = CX18_INVALID_TASK_HANDLE; 268 s->handle = CX18_INVALID_TASK_HANDLE;
261 269
262 s->dma = cx18_stream_info[type].dma; 270 s->dma = cx18_stream_info[type].dma;
271 s->v4l2_dev_caps = cx18_stream_info[type].caps;
263 s->buffers = cx->stream_buffers[type]; 272 s->buffers = cx->stream_buffers[type];
264 s->buf_size = cx->stream_buf_size[type]; 273 s->buf_size = cx->stream_buf_size[type];
265 INIT_LIST_HEAD(&s->buf_pool); 274 INIT_LIST_HEAD(&s->buf_pool);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 3948db386fb5..e4901a503c73 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1148,6 +1148,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
1148 dev->ts1.ts_packet_count = mpeglines; 1148 dev->ts1.ts_packet_count = mpeglines;
1149 *num_planes = 1; 1149 *num_planes = 1;
1150 sizes[0] = mpeglinesize * mpeglines; 1150 sizes[0] = mpeglinesize * mpeglines;
1151 alloc_ctxs[0] = dev->alloc_ctx;
1151 *num_buffers = mpegbufs; 1152 *num_buffers = mpegbufs;
1152 return 0; 1153 return 0;
1153} 1154}
@@ -1166,11 +1167,8 @@ static void buffer_finish(struct vb2_buffer *vb)
1166 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 1167 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
1167 struct cx23885_buffer *buf = container_of(vb, 1168 struct cx23885_buffer *buf = container_of(vb,
1168 struct cx23885_buffer, vb); 1169 struct cx23885_buffer, vb);
1169 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
1170 1170
1171 cx23885_free_buffer(dev, buf); 1171 cx23885_free_buffer(dev, buf);
1172
1173 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1174} 1172}
1175 1173
1176static void buffer_queue(struct vb2_buffer *vb) 1174static void buffer_queue(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 88c257d1161b..db99ca2613ba 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -680,6 +680,32 @@ struct cx23885_board cx23885_boards[] = {
680 .portb = CX23885_MPEG_DVB, 680 .portb = CX23885_MPEG_DVB,
681 .portc = CX23885_MPEG_DVB, 681 .portc = CX23885_MPEG_DVB,
682 }, 682 },
683 [CX23885_BOARD_DVBSKY_T980C] = {
684 .name = "DVBSky T980C",
685 .portb = CX23885_MPEG_DVB,
686 },
687 [CX23885_BOARD_DVBSKY_S950C] = {
688 .name = "DVBSky S950C",
689 .portb = CX23885_MPEG_DVB,
690 },
691 [CX23885_BOARD_TT_CT2_4500_CI] = {
692 .name = "Technotrend TT-budget CT2-4500 CI",
693 .portb = CX23885_MPEG_DVB,
694 },
695 [CX23885_BOARD_DVBSKY_S950] = {
696 .name = "DVBSky S950",
697 .portb = CX23885_MPEG_DVB,
698 },
699 [CX23885_BOARD_DVBSKY_S952] = {
700 .name = "DVBSky S952",
701 .portb = CX23885_MPEG_DVB,
702 .portc = CX23885_MPEG_DVB,
703 },
704 [CX23885_BOARD_DVBSKY_T982] = {
705 .name = "DVBSky T982",
706 .portb = CX23885_MPEG_DVB,
707 .portc = CX23885_MPEG_DVB,
708 },
683}; 709};
684const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards); 710const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
685 711
@@ -939,6 +965,30 @@ struct cx23885_subid cx23885_subids[] = {
939 .subvendor = 0x4254, 965 .subvendor = 0x4254,
940 .subdevice = 0x9580, 966 .subdevice = 0x9580,
941 .card = CX23885_BOARD_DVBSKY_T9580, 967 .card = CX23885_BOARD_DVBSKY_T9580,
968 }, {
969 .subvendor = 0x4254,
970 .subdevice = 0x980c,
971 .card = CX23885_BOARD_DVBSKY_T980C,
972 }, {
973 .subvendor = 0x4254,
974 .subdevice = 0x950c,
975 .card = CX23885_BOARD_DVBSKY_S950C,
976 }, {
977 .subvendor = 0x13c2,
978 .subdevice = 0x3013,
979 .card = CX23885_BOARD_TT_CT2_4500_CI,
980 }, {
981 .subvendor = 0x4254,
982 .subdevice = 0x0950,
983 .card = CX23885_BOARD_DVBSKY_S950,
984 }, {
985 .subvendor = 0x4254,
986 .subdevice = 0x0952,
987 .card = CX23885_BOARD_DVBSKY_S952,
988 }, {
989 .subvendor = 0x4254,
990 .subdevice = 0x0982,
991 .card = CX23885_BOARD_DVBSKY_T982,
942 }, 992 },
943}; 993};
944const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids); 994const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1534,6 +1584,8 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
1534 mdelay(60); 1584 mdelay(60);
1535 break; 1585 break;
1536 case CX23885_BOARD_DVBSKY_T9580: 1586 case CX23885_BOARD_DVBSKY_T9580:
1587 case CX23885_BOARD_DVBSKY_S952:
1588 case CX23885_BOARD_DVBSKY_T982:
1537 /* enable GPIO3-18 pins */ 1589 /* enable GPIO3-18 pins */
1538 cx_write(MC417_CTL, 0x00000037); 1590 cx_write(MC417_CTL, 0x00000037);
1539 cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1); 1591 cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1);
@@ -1541,6 +1593,45 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
1541 mdelay(100); 1593 mdelay(100);
1542 cx23885_gpio_set(dev, GPIO_2 | GPIO_11); 1594 cx23885_gpio_set(dev, GPIO_2 | GPIO_11);
1543 break; 1595 break;
1596 case CX23885_BOARD_DVBSKY_T980C:
1597 case CX23885_BOARD_DVBSKY_S950C:
1598 case CX23885_BOARD_TT_CT2_4500_CI:
1599 /*
1600 * GPIO-0 INTA from CiMax, input
1601 * GPIO-1 reset CiMax, output, high active
1602 * GPIO-2 reset demod, output, low active
1603 * GPIO-3 to GPIO-10 data/addr for CAM
1604 * GPIO-11 ~CS0 to CiMax1
1605 * GPIO-12 ~CS1 to CiMax2
1606 * GPIO-13 ADL0 load LSB addr
1607 * GPIO-14 ADL1 load MSB addr
1608 * GPIO-15 ~RDY from CiMax
1609 * GPIO-17 ~RD to CiMax
1610 * GPIO-18 ~WR to CiMax
1611 */
1612
1613 cx_set(GP0_IO, 0x00060002); /* GPIO 1/2 as output */
1614 cx_clear(GP0_IO, 0x00010004); /* GPIO 0 as input */
1615 mdelay(100); /* reset delay */
1616 cx_set(GP0_IO, 0x00060004); /* GPIO as out, reset high */
1617 cx_clear(GP0_IO, 0x00010002);
1618 cx_write(MC417_CTL, 0x00000037); /* enable GPIO3-18 pins */
1619
1620 /* GPIO-15 IN as ~ACK, rest as OUT */
1621 cx_write(MC417_OEN, 0x00001000);
1622
1623 /* ~RD, ~WR high; ADL0, ADL1 low; ~CS0, ~CS1 high */
1624 cx_write(MC417_RWD, 0x0000c300);
1625
1626 /* enable irq */
1627 cx_write(GPIO_ISM, 0x00000000); /* INTERRUPTS active low */
1628 break;
1629 case CX23885_BOARD_DVBSKY_S950:
1630 cx23885_gpio_enable(dev, GPIO_2, 1);
1631 cx23885_gpio_clear(dev, GPIO_2);
1632 msleep(100);
1633 cx23885_gpio_set(dev, GPIO_2);
1634 break;
1544 } 1635 }
1545} 1636}
1546 1637
@@ -1621,6 +1712,13 @@ int cx23885_ir_init(struct cx23885_dev *dev)
1621 case CX23885_BOARD_MYGICA_X8507: 1712 case CX23885_BOARD_MYGICA_X8507:
1622 case CX23885_BOARD_TBS_6980: 1713 case CX23885_BOARD_TBS_6980:
1623 case CX23885_BOARD_TBS_6981: 1714 case CX23885_BOARD_TBS_6981:
1715 case CX23885_BOARD_DVBSKY_T9580:
1716 case CX23885_BOARD_DVBSKY_T980C:
1717 case CX23885_BOARD_DVBSKY_S950C:
1718 case CX23885_BOARD_TT_CT2_4500_CI:
1719 case CX23885_BOARD_DVBSKY_S950:
1720 case CX23885_BOARD_DVBSKY_S952:
1721 case CX23885_BOARD_DVBSKY_T982:
1624 if (!enable_885_ir) 1722 if (!enable_885_ir)
1625 break; 1723 break;
1626 dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE); 1724 dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
@@ -1667,6 +1765,13 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
1667 case CX23885_BOARD_MYGICA_X8507: 1765 case CX23885_BOARD_MYGICA_X8507:
1668 case CX23885_BOARD_TBS_6980: 1766 case CX23885_BOARD_TBS_6980:
1669 case CX23885_BOARD_TBS_6981: 1767 case CX23885_BOARD_TBS_6981:
1768 case CX23885_BOARD_DVBSKY_T9580:
1769 case CX23885_BOARD_DVBSKY_T980C:
1770 case CX23885_BOARD_DVBSKY_S950C:
1771 case CX23885_BOARD_TT_CT2_4500_CI:
1772 case CX23885_BOARD_DVBSKY_S950:
1773 case CX23885_BOARD_DVBSKY_S952:
1774 case CX23885_BOARD_DVBSKY_T982:
1670 cx23885_irq_remove(dev, PCI_MSK_AV_CORE); 1775 cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
1671 /* sd_ir is a duplicate pointer to the AV Core, just clear it */ 1776 /* sd_ir is a duplicate pointer to the AV Core, just clear it */
1672 dev->sd_ir = NULL; 1777 dev->sd_ir = NULL;
@@ -1714,6 +1819,13 @@ void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
1714 case CX23885_BOARD_MYGICA_X8507: 1819 case CX23885_BOARD_MYGICA_X8507:
1715 case CX23885_BOARD_TBS_6980: 1820 case CX23885_BOARD_TBS_6980:
1716 case CX23885_BOARD_TBS_6981: 1821 case CX23885_BOARD_TBS_6981:
1822 case CX23885_BOARD_DVBSKY_T9580:
1823 case CX23885_BOARD_DVBSKY_T980C:
1824 case CX23885_BOARD_DVBSKY_S950C:
1825 case CX23885_BOARD_TT_CT2_4500_CI:
1826 case CX23885_BOARD_DVBSKY_S950:
1827 case CX23885_BOARD_DVBSKY_S952:
1828 case CX23885_BOARD_DVBSKY_T982:
1717 if (dev->sd_ir) 1829 if (dev->sd_ir)
1718 cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE); 1830 cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
1719 break; 1831 break;
@@ -1817,6 +1929,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1817 case CX23885_BOARD_TEVII_S471: 1929 case CX23885_BOARD_TEVII_S471:
1818 case CX23885_BOARD_DVBWORLD_2005: 1930 case CX23885_BOARD_DVBWORLD_2005:
1819 case CX23885_BOARD_PROF_8000: 1931 case CX23885_BOARD_PROF_8000:
1932 case CX23885_BOARD_DVBSKY_T980C:
1933 case CX23885_BOARD_DVBSKY_S950C:
1934 case CX23885_BOARD_TT_CT2_4500_CI:
1935 case CX23885_BOARD_DVBSKY_S950:
1820 ts1->gen_ctrl_val = 0x5; /* Parallel */ 1936 ts1->gen_ctrl_val = 0x5; /* Parallel */
1821 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 1937 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1822 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 1938 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
@@ -1865,6 +1981,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1865 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 1981 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1866 break; 1982 break;
1867 case CX23885_BOARD_DVBSKY_T9580: 1983 case CX23885_BOARD_DVBSKY_T9580:
1984 case CX23885_BOARD_DVBSKY_T982:
1868 ts1->gen_ctrl_val = 0x5; /* Parallel */ 1985 ts1->gen_ctrl_val = 0x5; /* Parallel */
1869 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 1986 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1870 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 1987 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
@@ -1872,6 +1989,14 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1872 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 1989 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1873 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 1990 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1874 break; 1991 break;
1992 case CX23885_BOARD_DVBSKY_S952:
1993 ts1->gen_ctrl_val = 0x5; /* Parallel */
1994 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1995 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1996 ts2->gen_ctrl_val = 0xe; /* Serial bus */
1997 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
1998 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
1999 break;
1875 case CX23885_BOARD_HAUPPAUGE_HVR1250: 2000 case CX23885_BOARD_HAUPPAUGE_HVR1250:
1876 case CX23885_BOARD_HAUPPAUGE_HVR1500: 2001 case CX23885_BOARD_HAUPPAUGE_HVR1500:
1877 case CX23885_BOARD_HAUPPAUGE_HVR1500Q: 2002 case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -1935,6 +2060,12 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1935 case CX23885_BOARD_TBS_6980: 2060 case CX23885_BOARD_TBS_6980:
1936 case CX23885_BOARD_TBS_6981: 2061 case CX23885_BOARD_TBS_6981:
1937 case CX23885_BOARD_DVBSKY_T9580: 2062 case CX23885_BOARD_DVBSKY_T9580:
2063 case CX23885_BOARD_DVBSKY_T980C:
2064 case CX23885_BOARD_DVBSKY_S950C:
2065 case CX23885_BOARD_TT_CT2_4500_CI:
2066 case CX23885_BOARD_DVBSKY_S950:
2067 case CX23885_BOARD_DVBSKY_S952:
2068 case CX23885_BOARD_DVBSKY_T982:
1938 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, 2069 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1939 &dev->i2c_bus[2].i2c_adap, 2070 &dev->i2c_bus[2].i2c_adap,
1940 "cx25840", 0x88 >> 1, NULL); 2071 "cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 331eddac7222..1d9d0f86ca8c 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1078 for (line = 0; line < lines; line++) { 1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) { 1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg); 1080 offset -= sg_dma_len(sg);
1081 sg++; 1081 sg = sg_next(sg);
1082 } 1082 }
1083 1083
1084 if (lpi && line > 0 && !(line % lpi)) 1084 if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset); 1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0; 1103 offset = 0;
1104 sg++; 1104 sg = sg_next(sg);
1105 while (todo > sg_dma_len(sg)) { 1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE| 1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg)); 1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg); 1110 todo -= sg_dma_len(sg);
1111 sg++; 1111 sg = sg_next(sg);
1112 } 1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
@@ -1453,17 +1453,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1453 struct cx23885_dev *dev = port->dev; 1453 struct cx23885_dev *dev = port->dev;
1454 int size = port->ts_packet_size * port->ts_packet_count; 1454 int size = port->ts_packet_size * port->ts_packet_count;
1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0); 1455 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1456 int rc;
1457 1456
1458 dprintk(1, "%s: %p\n", __func__, buf); 1457 dprintk(1, "%s: %p\n", __func__, buf);
1459 if (vb2_plane_size(&buf->vb, 0) < size) 1458 if (vb2_plane_size(&buf->vb, 0) < size)
1460 return -EINVAL; 1459 return -EINVAL;
1461 vb2_set_plane_payload(&buf->vb, 0, size); 1460 vb2_set_plane_payload(&buf->vb, 0, size);
1462 1461
1463 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1464 if (!rc)
1465 return -EIO;
1466
1467 cx23885_risc_databuffer(dev->pci, &buf->risc, 1462 cx23885_risc_databuffer(dev->pci, &buf->risc,
1468 sgt->sgl, 1463 sgt->sgl,
1469 port->ts_packet_size, port->ts_packet_count, 0); 1464 port->ts_packet_size, port->ts_packet_count, 0);
@@ -1997,9 +1992,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1997 if (!pci_dma_supported(pci_dev, 0xffffffff)) { 1992 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1998 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1993 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1999 err = -EIO; 1994 err = -EIO;
2000 goto fail_irq; 1995 goto fail_context;
2001 } 1996 }
2002 1997
1998 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
1999 if (IS_ERR(dev->alloc_ctx)) {
2000 err = PTR_ERR(dev->alloc_ctx);
2001 goto fail_context;
2002 }
2003 err = request_irq(pci_dev->irq, cx23885_irq, 2003 err = request_irq(pci_dev->irq, cx23885_irq,
2004 IRQF_SHARED, dev->name, dev); 2004 IRQF_SHARED, dev->name, dev);
2005 if (err < 0) { 2005 if (err < 0) {
@@ -2028,6 +2028,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
2028 return 0; 2028 return 0;
2029 2029
2030fail_irq: 2030fail_irq:
2031 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2032fail_context:
2031 cx23885_dev_unregister(dev); 2033 cx23885_dev_unregister(dev);
2032fail_ctrl: 2034fail_ctrl:
2033 v4l2_ctrl_handler_free(hdl); 2035 v4l2_ctrl_handler_free(hdl);
@@ -2053,6 +2055,7 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
2053 free_irq(pci_dev->irq, dev); 2055 free_irq(pci_dev->irq, dev);
2054 2056
2055 cx23885_dev_unregister(dev); 2057 cx23885_dev_unregister(dev);
2058 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
2056 v4l2_ctrl_handler_free(&dev->ctrl_handler); 2059 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2057 v4l2_device_unregister(v4l2_dev); 2060 v4l2_device_unregister(v4l2_dev);
2058 kfree(dev); 2061 kfree(dev);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4cb90317ff45..c47d18270cfc 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -71,6 +71,7 @@
71#include "si2165.h" 71#include "si2165.h"
72#include "si2168.h" 72#include "si2168.h"
73#include "si2157.h" 73#include "si2157.h"
74#include "sp2.h"
74#include "m88ds3103.h" 75#include "m88ds3103.h"
75#include "m88ts2022.h" 76#include "m88ts2022.h"
76 77
@@ -101,6 +102,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
101 port->ts_packet_count = 32; 102 port->ts_packet_count = 32;
102 *num_planes = 1; 103 *num_planes = 1;
103 sizes[0] = port->ts_packet_size * port->ts_packet_count; 104 sizes[0] = port->ts_packet_size * port->ts_packet_count;
105 alloc_ctxs[0] = port->dev->alloc_ctx;
104 *num_buffers = 32; 106 *num_buffers = 32;
105 return 0; 107 return 0;
106} 108}
@@ -121,11 +123,8 @@ static void buffer_finish(struct vb2_buffer *vb)
121 struct cx23885_dev *dev = port->dev; 123 struct cx23885_dev *dev = port->dev;
122 struct cx23885_buffer *buf = container_of(vb, 124 struct cx23885_buffer *buf = container_of(vb,
123 struct cx23885_buffer, vb); 125 struct cx23885_buffer, vb);
124 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
125 126
126 cx23885_free_buffer(dev, buf); 127 cx23885_free_buffer(dev, buf);
127
128 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
129} 128}
130 129
131static void buffer_queue(struct vb2_buffer *vb) 130static void buffer_queue(struct vb2_buffer *vb)
@@ -616,6 +615,103 @@ static int dvbsky_t9580_set_voltage(struct dvb_frontend *fe,
616 return 0; 615 return 0;
617} 616}
618 617
618static int dvbsky_s952_portc_set_voltage(struct dvb_frontend *fe,
619 fe_sec_voltage_t voltage)
620{
621 struct cx23885_tsport *port = fe->dvb->priv;
622 struct cx23885_dev *dev = port->dev;
623
624 cx23885_gpio_enable(dev, GPIO_12 | GPIO_13, 1);
625
626 switch (voltage) {
627 case SEC_VOLTAGE_13:
628 cx23885_gpio_set(dev, GPIO_13);
629 cx23885_gpio_clear(dev, GPIO_12);
630 break;
631 case SEC_VOLTAGE_18:
632 cx23885_gpio_set(dev, GPIO_13);
633 cx23885_gpio_set(dev, GPIO_12);
634 break;
635 case SEC_VOLTAGE_OFF:
636 cx23885_gpio_clear(dev, GPIO_13);
637 cx23885_gpio_clear(dev, GPIO_12);
638 break;
639 }
640 /* call the frontend set_voltage function */
641 return port->fe_set_voltage(fe, voltage);
642}
643
644static int cx23885_sp2_ci_ctrl(void *priv, u8 read, int addr,
645 u8 data, int *mem)
646{
647 /* MC417 */
648 #define SP2_DATA 0x000000ff
649 #define SP2_WR 0x00008000
650 #define SP2_RD 0x00004000
651 #define SP2_ACK 0x00001000
652 #define SP2_ADHI 0x00000800
653 #define SP2_ADLO 0x00000400
654 #define SP2_CS1 0x00000200
655 #define SP2_CS0 0x00000100
656 #define SP2_EN_ALL 0x00001000
657 #define SP2_CTRL_OFF (SP2_CS1 | SP2_CS0 | SP2_WR | SP2_RD)
658
659 struct cx23885_tsport *port = priv;
660 struct cx23885_dev *dev = port->dev;
661 int ret;
662 int tmp = 0;
663 unsigned long timeout;
664
665 mutex_lock(&dev->gpio_lock);
666
667 /* write addr */
668 cx_write(MC417_OEN, SP2_EN_ALL);
669 cx_write(MC417_RWD, SP2_CTRL_OFF |
670 SP2_ADLO | (0xff & addr));
671 cx_clear(MC417_RWD, SP2_ADLO);
672 cx_write(MC417_RWD, SP2_CTRL_OFF |
673 SP2_ADHI | (0xff & (addr >> 8)));
674 cx_clear(MC417_RWD, SP2_ADHI);
675
676 if (read)
677 /* data in */
678 cx_write(MC417_OEN, SP2_EN_ALL | SP2_DATA);
679 else
680 /* data out */
681 cx_write(MC417_RWD, SP2_CTRL_OFF | data);
682
683 /* chip select 0 */
684 cx_clear(MC417_RWD, SP2_CS0);
685
686 /* read/write */
687 cx_clear(MC417_RWD, (read) ? SP2_RD : SP2_WR);
688
689 /* wait for a maximum of 1 msec */
690 timeout = jiffies + msecs_to_jiffies(1);
691 while (!time_after(jiffies, timeout)) {
692 tmp = cx_read(MC417_RWD);
693 if ((tmp & SP2_ACK) == 0)
694 break;
695 usleep_range(50, 100);
696 }
697
698 cx_set(MC417_RWD, SP2_CTRL_OFF);
699 *mem = tmp & 0xff;
700
701 mutex_unlock(&dev->gpio_lock);
702
703 if (!read) {
704 if (*mem < 0) {
705 ret = -EREMOTEIO;
706 goto err;
707 }
708 }
709
710 return 0;
711err:
712 return ret;
713}
714
619static int cx23885_dvb_set_frontend(struct dvb_frontend *fe) 715static int cx23885_dvb_set_frontend(struct dvb_frontend *fe)
620{ 716{
621 struct dtv_frontend_properties *p = &fe->dtv_property_cache; 717 struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -793,6 +889,32 @@ static const struct m88ds3103_config dvbsky_t9580_m88ds3103_config = {
793 .agc = 0x99, 889 .agc = 0x99,
794}; 890};
795 891
892static const struct m88ds3103_config dvbsky_s950c_m88ds3103_config = {
893 .i2c_addr = 0x68,
894 .clock = 27000000,
895 .i2c_wr_max = 33,
896 .clock_out = 0,
897 .ts_mode = M88DS3103_TS_CI,
898 .ts_clk = 10000,
899 .ts_clk_pol = 1,
900 .lnb_en_pol = 1,
901 .lnb_hv_pol = 0,
902 .agc = 0x99,
903};
904
905static const struct m88ds3103_config dvbsky_s952_portc_m88ds3103_config = {
906 .i2c_addr = 0x68,
907 .clock = 27000000,
908 .i2c_wr_max = 33,
909 .clock_out = 0,
910 .ts_mode = M88DS3103_TS_SERIAL,
911 .ts_clk = 96000,
912 .ts_clk_pol = 0,
913 .lnb_en_pol = 1,
914 .lnb_hv_pol = 0,
915 .agc = 0x99,
916};
917
796static int netup_altera_fpga_rw(void *device, int flag, int data, int read) 918static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
797{ 919{
798 struct cx23885_dev *dev = (struct cx23885_dev *)device; 920 struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -944,11 +1066,13 @@ static int dvb_register(struct cx23885_tsport *port)
944 struct vb2_dvb_frontend *fe0, *fe1 = NULL; 1066 struct vb2_dvb_frontend *fe0, *fe1 = NULL;
945 struct si2168_config si2168_config; 1067 struct si2168_config si2168_config;
946 struct si2157_config si2157_config; 1068 struct si2157_config si2157_config;
1069 struct sp2_config sp2_config;
947 struct m88ts2022_config m88ts2022_config; 1070 struct m88ts2022_config m88ts2022_config;
948 struct i2c_board_info info; 1071 struct i2c_board_info info;
949 struct i2c_adapter *adapter; 1072 struct i2c_adapter *adapter;
950 struct i2c_client *client_demod; 1073 struct i2c_client *client_demod = NULL, *client_tuner = NULL, *client_ci = NULL;
951 struct i2c_client *client_tuner; 1074 const struct m88ds3103_config *p_m88ds3103_config = NULL;
1075 int (*p_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage) = NULL;
952 int mfe_shared = 0; /* bus not shared by default */ 1076 int mfe_shared = 0; /* bus not shared by default */
953 int ret; 1077 int ret;
954 1078
@@ -973,11 +1097,11 @@ static int dvb_register(struct cx23885_tsport *port)
973 fe0->dvb.frontend = dvb_attach(s5h1409_attach, 1097 fe0->dvb.frontend = dvb_attach(s5h1409_attach,
974 &hauppauge_generic_config, 1098 &hauppauge_generic_config,
975 &i2c_bus->i2c_adap); 1099 &i2c_bus->i2c_adap);
976 if (fe0->dvb.frontend != NULL) { 1100 if (fe0->dvb.frontend == NULL)
977 dvb_attach(mt2131_attach, fe0->dvb.frontend, 1101 break;
978 &i2c_bus->i2c_adap, 1102 dvb_attach(mt2131_attach, fe0->dvb.frontend,
979 &hauppauge_generic_tunerconfig, 0); 1103 &i2c_bus->i2c_adap,
980 } 1104 &hauppauge_generic_tunerconfig, 0);
981 break; 1105 break;
982 case CX23885_BOARD_HAUPPAUGE_HVR1270: 1106 case CX23885_BOARD_HAUPPAUGE_HVR1270:
983 case CX23885_BOARD_HAUPPAUGE_HVR1275: 1107 case CX23885_BOARD_HAUPPAUGE_HVR1275:
@@ -985,11 +1109,11 @@ static int dvb_register(struct cx23885_tsport *port)
985 fe0->dvb.frontend = dvb_attach(lgdt3305_attach, 1109 fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
986 &hauppauge_lgdt3305_config, 1110 &hauppauge_lgdt3305_config,
987 &i2c_bus->i2c_adap); 1111 &i2c_bus->i2c_adap);
988 if (fe0->dvb.frontend != NULL) { 1112 if (fe0->dvb.frontend == NULL)
989 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1113 break;
990 0x60, &dev->i2c_bus[1].i2c_adap, 1114 dvb_attach(tda18271_attach, fe0->dvb.frontend,
991 &hauppauge_hvr127x_config); 1115 0x60, &dev->i2c_bus[1].i2c_adap,
992 } 1116 &hauppauge_hvr127x_config);
993 if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1275) 1117 if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1275)
994 cx23885_set_frontend_hook(port, fe0->dvb.frontend); 1118 cx23885_set_frontend_hook(port, fe0->dvb.frontend);
995 break; 1119 break;
@@ -999,11 +1123,12 @@ static int dvb_register(struct cx23885_tsport *port)
999 fe0->dvb.frontend = dvb_attach(s5h1411_attach, 1123 fe0->dvb.frontend = dvb_attach(s5h1411_attach,
1000 &hcw_s5h1411_config, 1124 &hcw_s5h1411_config,
1001 &i2c_bus->i2c_adap); 1125 &i2c_bus->i2c_adap);
1002 if (fe0->dvb.frontend != NULL) { 1126 if (fe0->dvb.frontend == NULL)
1003 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1127 break;
1004 0x60, &dev->i2c_bus[1].i2c_adap, 1128
1005 &hauppauge_tda18271_config); 1129 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1006 } 1130 0x60, &dev->i2c_bus[1].i2c_adap,
1131 &hauppauge_tda18271_config);
1007 1132
1008 tda18271_attach(&dev->ts1.analog_fe, 1133 tda18271_attach(&dev->ts1.analog_fe,
1009 0x60, &dev->i2c_bus[1].i2c_adap, 1134 0x60, &dev->i2c_bus[1].i2c_adap,
@@ -1018,14 +1143,15 @@ static int dvb_register(struct cx23885_tsport *port)
1018 dvb_attach(s5h1409_attach, 1143 dvb_attach(s5h1409_attach,
1019 &hauppauge_ezqam_config, 1144 &hauppauge_ezqam_config,
1020 &i2c_bus->i2c_adap); 1145 &i2c_bus->i2c_adap);
1021 if (fe0->dvb.frontend != NULL) { 1146 if (fe0->dvb.frontend == NULL)
1022 dvb_attach(tda829x_attach, fe0->dvb.frontend, 1147 break;
1023 &dev->i2c_bus[1].i2c_adap, 0x42, 1148
1024 &tda829x_no_probe); 1149 dvb_attach(tda829x_attach, fe0->dvb.frontend,
1025 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1150 &dev->i2c_bus[1].i2c_adap, 0x42,
1026 0x60, &dev->i2c_bus[1].i2c_adap, 1151 &tda829x_no_probe);
1027 &hauppauge_tda18271_config); 1152 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1028 } 1153 0x60, &dev->i2c_bus[1].i2c_adap,
1154 &hauppauge_tda18271_config);
1029 break; 1155 break;
1030 case 0: 1156 case 0:
1031 default: 1157 default:
@@ -1033,11 +1159,11 @@ static int dvb_register(struct cx23885_tsport *port)
1033 dvb_attach(s5h1409_attach, 1159 dvb_attach(s5h1409_attach,
1034 &hauppauge_generic_config, 1160 &hauppauge_generic_config,
1035 &i2c_bus->i2c_adap); 1161 &i2c_bus->i2c_adap);
1036 if (fe0->dvb.frontend != NULL) 1162 if (fe0->dvb.frontend == NULL)
1037 dvb_attach(mt2131_attach, fe0->dvb.frontend, 1163 break;
1038 &i2c_bus->i2c_adap, 1164 dvb_attach(mt2131_attach, fe0->dvb.frontend,
1039 &hauppauge_generic_tunerconfig, 0); 1165 &i2c_bus->i2c_adap,
1040 break; 1166 &hauppauge_generic_tunerconfig, 0);
1041 } 1167 }
1042 break; 1168 break;
1043 case CX23885_BOARD_HAUPPAUGE_HVR1800lp: 1169 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
@@ -1045,32 +1171,33 @@ static int dvb_register(struct cx23885_tsport *port)
1045 fe0->dvb.frontend = dvb_attach(s5h1409_attach, 1171 fe0->dvb.frontend = dvb_attach(s5h1409_attach,
1046 &hauppauge_hvr1800lp_config, 1172 &hauppauge_hvr1800lp_config,
1047 &i2c_bus->i2c_adap); 1173 &i2c_bus->i2c_adap);
1048 if (fe0->dvb.frontend != NULL) { 1174 if (fe0->dvb.frontend == NULL)
1049 dvb_attach(mt2131_attach, fe0->dvb.frontend, 1175 break;
1050 &i2c_bus->i2c_adap, 1176 dvb_attach(mt2131_attach, fe0->dvb.frontend,
1051 &hauppauge_generic_tunerconfig, 0); 1177 &i2c_bus->i2c_adap,
1052 } 1178 &hauppauge_generic_tunerconfig, 0);
1053 break; 1179 break;
1054 case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP: 1180 case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
1055 i2c_bus = &dev->i2c_bus[0]; 1181 i2c_bus = &dev->i2c_bus[0];
1056 fe0->dvb.frontend = dvb_attach(lgdt330x_attach, 1182 fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
1057 &fusionhdtv_5_express, 1183 &fusionhdtv_5_express,
1058 &i2c_bus->i2c_adap); 1184 &i2c_bus->i2c_adap);
1059 if (fe0->dvb.frontend != NULL) { 1185 if (fe0->dvb.frontend == NULL)
1060 dvb_attach(simple_tuner_attach, fe0->dvb.frontend, 1186 break;
1061 &i2c_bus->i2c_adap, 0x61, 1187 dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
1062 TUNER_LG_TDVS_H06XF); 1188 &i2c_bus->i2c_adap, 0x61,
1063 } 1189 TUNER_LG_TDVS_H06XF);
1064 break; 1190 break;
1065 case CX23885_BOARD_HAUPPAUGE_HVR1500Q: 1191 case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
1066 i2c_bus = &dev->i2c_bus[1]; 1192 i2c_bus = &dev->i2c_bus[1];
1067 fe0->dvb.frontend = dvb_attach(s5h1409_attach, 1193 fe0->dvb.frontend = dvb_attach(s5h1409_attach,
1068 &hauppauge_hvr1500q_config, 1194 &hauppauge_hvr1500q_config,
1069 &dev->i2c_bus[0].i2c_adap); 1195 &dev->i2c_bus[0].i2c_adap);
1070 if (fe0->dvb.frontend != NULL) 1196 if (fe0->dvb.frontend == NULL)
1071 dvb_attach(xc5000_attach, fe0->dvb.frontend, 1197 break;
1072 &i2c_bus->i2c_adap, 1198 dvb_attach(xc5000_attach, fe0->dvb.frontend,
1073 &hauppauge_hvr1500q_tunerconfig); 1199 &i2c_bus->i2c_adap,
1200 &hauppauge_hvr1500q_tunerconfig);
1074 break; 1201 break;
1075 case CX23885_BOARD_HAUPPAUGE_HVR1500: 1202 case CX23885_BOARD_HAUPPAUGE_HVR1500:
1076 i2c_bus = &dev->i2c_bus[1]; 1203 i2c_bus = &dev->i2c_bus[1];
@@ -1101,14 +1228,14 @@ static int dvb_register(struct cx23885_tsport *port)
1101 fe0->dvb.frontend = dvb_attach(tda10048_attach, 1228 fe0->dvb.frontend = dvb_attach(tda10048_attach,
1102 &hauppauge_hvr1200_config, 1229 &hauppauge_hvr1200_config,
1103 &i2c_bus->i2c_adap); 1230 &i2c_bus->i2c_adap);
1104 if (fe0->dvb.frontend != NULL) { 1231 if (fe0->dvb.frontend == NULL)
1105 dvb_attach(tda829x_attach, fe0->dvb.frontend, 1232 break;
1106 &dev->i2c_bus[1].i2c_adap, 0x42, 1233 dvb_attach(tda829x_attach, fe0->dvb.frontend,
1107 &tda829x_no_probe); 1234 &dev->i2c_bus[1].i2c_adap, 0x42,
1108 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1235 &tda829x_no_probe);
1109 0x60, &dev->i2c_bus[1].i2c_adap, 1236 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1110 &hauppauge_hvr1200_tuner_config); 1237 0x60, &dev->i2c_bus[1].i2c_adap,
1111 } 1238 &hauppauge_hvr1200_tuner_config);
1112 break; 1239 break;
1113 case CX23885_BOARD_HAUPPAUGE_HVR1210: 1240 case CX23885_BOARD_HAUPPAUGE_HVR1210:
1114 i2c_bus = &dev->i2c_bus[0]; 1241 i2c_bus = &dev->i2c_bus[0];
@@ -1367,12 +1494,10 @@ static int dvb_register(struct cx23885_tsport *port)
1367 fe0->dvb.frontend = dvb_attach(lgs8gxx_attach, 1494 fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
1368 &mygica_x8506_lgs8gl5_config, 1495 &mygica_x8506_lgs8gl5_config,
1369 &i2c_bus->i2c_adap); 1496 &i2c_bus->i2c_adap);
1370 if (fe0->dvb.frontend != NULL) { 1497 if (fe0->dvb.frontend == NULL)
1371 dvb_attach(xc5000_attach, 1498 break;
1372 fe0->dvb.frontend, 1499 dvb_attach(xc5000_attach, fe0->dvb.frontend,
1373 &i2c_bus2->i2c_adap, 1500 &i2c_bus2->i2c_adap, &mygica_x8506_xc5000_config);
1374 &mygica_x8506_xc5000_config);
1375 }
1376 cx23885_set_frontend_hook(port, fe0->dvb.frontend); 1501 cx23885_set_frontend_hook(port, fe0->dvb.frontend);
1377 break; 1502 break;
1378 case CX23885_BOARD_MYGICA_X8507: 1503 case CX23885_BOARD_MYGICA_X8507:
@@ -1381,12 +1506,12 @@ static int dvb_register(struct cx23885_tsport *port)
1381 fe0->dvb.frontend = dvb_attach(mb86a20s_attach, 1506 fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
1382 &mygica_x8507_mb86a20s_config, 1507 &mygica_x8507_mb86a20s_config,
1383 &i2c_bus->i2c_adap); 1508 &i2c_bus->i2c_adap);
1384 if (fe0->dvb.frontend != NULL) { 1509 if (fe0->dvb.frontend == NULL)
1385 dvb_attach(xc5000_attach, 1510 break;
1386 fe0->dvb.frontend, 1511
1387 &i2c_bus2->i2c_adap, 1512 dvb_attach(xc5000_attach, fe0->dvb.frontend,
1388 &mygica_x8507_xc5000_config); 1513 &i2c_bus2->i2c_adap,
1389 } 1514 &mygica_x8507_xc5000_config);
1390 cx23885_set_frontend_hook(port, fe0->dvb.frontend); 1515 cx23885_set_frontend_hook(port, fe0->dvb.frontend);
1391 break; 1516 break;
1392 case CX23885_BOARD_MAGICPRO_PROHDTVE2: 1517 case CX23885_BOARD_MAGICPRO_PROHDTVE2:
@@ -1395,12 +1520,11 @@ static int dvb_register(struct cx23885_tsport *port)
1395 fe0->dvb.frontend = dvb_attach(lgs8gxx_attach, 1520 fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
1396 &magicpro_prohdtve2_lgs8g75_config, 1521 &magicpro_prohdtve2_lgs8g75_config,
1397 &i2c_bus->i2c_adap); 1522 &i2c_bus->i2c_adap);
1398 if (fe0->dvb.frontend != NULL) { 1523 if (fe0->dvb.frontend == NULL)
1399 dvb_attach(xc5000_attach, 1524 break;
1400 fe0->dvb.frontend, 1525 dvb_attach(xc5000_attach, fe0->dvb.frontend,
1401 &i2c_bus2->i2c_adap, 1526 &i2c_bus2->i2c_adap,
1402 &magicpro_prohdtve2_xc5000_config); 1527 &magicpro_prohdtve2_xc5000_config);
1403 }
1404 cx23885_set_frontend_hook(port, fe0->dvb.frontend); 1528 cx23885_set_frontend_hook(port, fe0->dvb.frontend);
1405 break; 1529 break;
1406 case CX23885_BOARD_HAUPPAUGE_HVR1850: 1530 case CX23885_BOARD_HAUPPAUGE_HVR1850:
@@ -1408,10 +1532,11 @@ static int dvb_register(struct cx23885_tsport *port)
1408 fe0->dvb.frontend = dvb_attach(s5h1411_attach, 1532 fe0->dvb.frontend = dvb_attach(s5h1411_attach,
1409 &hcw_s5h1411_config, 1533 &hcw_s5h1411_config,
1410 &i2c_bus->i2c_adap); 1534 &i2c_bus->i2c_adap);
1411 if (fe0->dvb.frontend != NULL) 1535 if (fe0->dvb.frontend == NULL)
1412 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1536 break;
1413 0x60, &dev->i2c_bus[0].i2c_adap, 1537 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1414 &hauppauge_tda18271_config); 1538 0x60, &dev->i2c_bus[0].i2c_adap,
1539 &hauppauge_tda18271_config);
1415 1540
1416 tda18271_attach(&dev->ts1.analog_fe, 1541 tda18271_attach(&dev->ts1.analog_fe,
1417 0x60, &dev->i2c_bus[1].i2c_adap, 1542 0x60, &dev->i2c_bus[1].i2c_adap,
@@ -1423,10 +1548,11 @@ static int dvb_register(struct cx23885_tsport *port)
1423 fe0->dvb.frontend = dvb_attach(s5h1411_attach, 1548 fe0->dvb.frontend = dvb_attach(s5h1411_attach,
1424 &hcw_s5h1411_config, 1549 &hcw_s5h1411_config,
1425 &i2c_bus->i2c_adap); 1550 &i2c_bus->i2c_adap);
1426 if (fe0->dvb.frontend != NULL) 1551 if (fe0->dvb.frontend == NULL)
1427 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1552 break;
1428 0x60, &dev->i2c_bus[0].i2c_adap, 1553 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1429 &hauppauge_tda18271_config); 1554 0x60, &dev->i2c_bus[0].i2c_adap,
1555 &hauppauge_tda18271_config);
1430 break; 1556 break;
1431 case CX23885_BOARD_MYGICA_X8558PRO: 1557 case CX23885_BOARD_MYGICA_X8558PRO:
1432 switch (port->nr) { 1558 switch (port->nr) {
@@ -1436,12 +1562,11 @@ static int dvb_register(struct cx23885_tsport *port)
1436 fe0->dvb.frontend = dvb_attach(atbm8830_attach, 1562 fe0->dvb.frontend = dvb_attach(atbm8830_attach,
1437 &mygica_x8558pro_atbm8830_cfg1, 1563 &mygica_x8558pro_atbm8830_cfg1,
1438 &i2c_bus->i2c_adap); 1564 &i2c_bus->i2c_adap);
1439 if (fe0->dvb.frontend != NULL) { 1565 if (fe0->dvb.frontend == NULL)
1440 dvb_attach(max2165_attach, 1566 break;
1441 fe0->dvb.frontend, 1567 dvb_attach(max2165_attach, fe0->dvb.frontend,
1442 &i2c_bus->i2c_adap, 1568 &i2c_bus->i2c_adap,
1443 &mygic_x8558pro_max2165_cfg1); 1569 &mygic_x8558pro_max2165_cfg1);
1444 }
1445 break; 1570 break;
1446 /* port C */ 1571 /* port C */
1447 case 2: 1572 case 2:
@@ -1449,13 +1574,11 @@ static int dvb_register(struct cx23885_tsport *port)
1449 fe0->dvb.frontend = dvb_attach(atbm8830_attach, 1574 fe0->dvb.frontend = dvb_attach(atbm8830_attach,
1450 &mygica_x8558pro_atbm8830_cfg2, 1575 &mygica_x8558pro_atbm8830_cfg2,
1451 &i2c_bus->i2c_adap); 1576 &i2c_bus->i2c_adap);
1452 if (fe0->dvb.frontend != NULL) { 1577 if (fe0->dvb.frontend == NULL)
1453 dvb_attach(max2165_attach, 1578 break;
1454 fe0->dvb.frontend, 1579 dvb_attach(max2165_attach, fe0->dvb.frontend,
1455 &i2c_bus->i2c_adap, 1580 &i2c_bus->i2c_adap,
1456 &mygic_x8558pro_max2165_cfg2); 1581 &mygic_x8558pro_max2165_cfg2);
1457 }
1458 break;
1459 } 1582 }
1460 break; 1583 break;
1461 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: 1584 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
@@ -1467,15 +1590,15 @@ static int dvb_register(struct cx23885_tsport *port)
1467 fe0->dvb.frontend = dvb_attach(stv0367ter_attach, 1590 fe0->dvb.frontend = dvb_attach(stv0367ter_attach,
1468 &netup_stv0367_config[port->nr - 1], 1591 &netup_stv0367_config[port->nr - 1],
1469 &i2c_bus->i2c_adap); 1592 &i2c_bus->i2c_adap);
1470 if (fe0->dvb.frontend != NULL) { 1593 if (fe0->dvb.frontend == NULL)
1471 if (NULL == dvb_attach(xc5000_attach, 1594 break;
1472 fe0->dvb.frontend, 1595 if (NULL == dvb_attach(xc5000_attach, fe0->dvb.frontend,
1473 &i2c_bus->i2c_adap, 1596 &i2c_bus->i2c_adap,
1474 &netup_xc5000_config[port->nr - 1])) 1597 &netup_xc5000_config[port->nr - 1]))
1475 goto frontend_detach; 1598 goto frontend_detach;
1476 /* load xc5000 firmware */ 1599 /* load xc5000 firmware */
1477 fe0->dvb.frontend->ops.tuner_ops.init(fe0->dvb.frontend); 1600 fe0->dvb.frontend->ops.tuner_ops.init(fe0->dvb.frontend);
1478 } 1601
1479 /* MFE frontend 2 */ 1602 /* MFE frontend 2 */
1480 fe1 = vb2_dvb_get_frontend(&port->frontends, 2); 1603 fe1 = vb2_dvb_get_frontend(&port->frontends, 2);
1481 if (fe1 == NULL) 1604 if (fe1 == NULL)
@@ -1484,14 +1607,15 @@ static int dvb_register(struct cx23885_tsport *port)
1484 fe1->dvb.frontend = dvb_attach(stv0367cab_attach, 1607 fe1->dvb.frontend = dvb_attach(stv0367cab_attach,
1485 &netup_stv0367_config[port->nr - 1], 1608 &netup_stv0367_config[port->nr - 1],
1486 &i2c_bus->i2c_adap); 1609 &i2c_bus->i2c_adap);
1487 if (fe1->dvb.frontend != NULL) { 1610 if (fe1->dvb.frontend == NULL)
1488 fe1->dvb.frontend->id = 1; 1611 break;
1489 if (NULL == dvb_attach(xc5000_attach, 1612
1490 fe1->dvb.frontend, 1613 fe1->dvb.frontend->id = 1;
1491 &i2c_bus->i2c_adap, 1614 if (NULL == dvb_attach(xc5000_attach,
1492 &netup_xc5000_config[port->nr - 1])) 1615 fe1->dvb.frontend,
1493 goto frontend_detach; 1616 &i2c_bus->i2c_adap,
1494 } 1617 &netup_xc5000_config[port->nr - 1]))
1618 goto frontend_detach;
1495 break; 1619 break;
1496 case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL: 1620 case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
1497 i2c_bus = &dev->i2c_bus[0]; 1621 i2c_bus = &dev->i2c_bus[0];
@@ -1503,26 +1627,26 @@ static int dvb_register(struct cx23885_tsport *port)
1503 fe0->dvb.frontend = dvb_attach(drxk_attach, 1627 fe0->dvb.frontend = dvb_attach(drxk_attach,
1504 &terratec_drxk_config[0], 1628 &terratec_drxk_config[0],
1505 &i2c_bus->i2c_adap); 1629 &i2c_bus->i2c_adap);
1506 if (fe0->dvb.frontend != NULL) { 1630 if (fe0->dvb.frontend == NULL)
1507 if (!dvb_attach(mt2063_attach, 1631 break;
1508 fe0->dvb.frontend, 1632 if (!dvb_attach(mt2063_attach,
1509 &terratec_mt2063_config[0], 1633 fe0->dvb.frontend,
1510 &i2c_bus2->i2c_adap)) 1634 &terratec_mt2063_config[0],
1511 goto frontend_detach; 1635 &i2c_bus2->i2c_adap))
1512 } 1636 goto frontend_detach;
1513 break; 1637 break;
1514 /* port c */ 1638 /* port c */
1515 case 2: 1639 case 2:
1516 fe0->dvb.frontend = dvb_attach(drxk_attach, 1640 fe0->dvb.frontend = dvb_attach(drxk_attach,
1517 &terratec_drxk_config[1], 1641 &terratec_drxk_config[1],
1518 &i2c_bus->i2c_adap); 1642 &i2c_bus->i2c_adap);
1519 if (fe0->dvb.frontend != NULL) { 1643 if (fe0->dvb.frontend == NULL)
1520 if (!dvb_attach(mt2063_attach, 1644 break;
1521 fe0->dvb.frontend, 1645 if (!dvb_attach(mt2063_attach,
1522 &terratec_mt2063_config[1], 1646 fe0->dvb.frontend,
1523 &i2c_bus2->i2c_adap)) 1647 &terratec_mt2063_config[1],
1524 goto frontend_detach; 1648 &i2c_bus2->i2c_adap))
1525 } 1649 goto frontend_detach;
1526 break; 1650 break;
1527 } 1651 }
1528 break; 1652 break;
@@ -1532,10 +1656,10 @@ static int dvb_register(struct cx23885_tsport *port)
1532 fe0->dvb.frontend = dvb_attach(ds3000_attach, 1656 fe0->dvb.frontend = dvb_attach(ds3000_attach,
1533 &tevii_ds3000_config, 1657 &tevii_ds3000_config,
1534 &i2c_bus->i2c_adap); 1658 &i2c_bus->i2c_adap);
1535 if (fe0->dvb.frontend != NULL) { 1659 if (fe0->dvb.frontend == NULL)
1536 dvb_attach(ts2020_attach, fe0->dvb.frontend, 1660 break;
1537 &tevii_ts2020_config, &i2c_bus->i2c_adap); 1661 dvb_attach(ts2020_attach, fe0->dvb.frontend,
1538 } 1662 &tevii_ts2020_config, &i2c_bus->i2c_adap);
1539 break; 1663 break;
1540 case CX23885_BOARD_PROF_8000: 1664 case CX23885_BOARD_PROF_8000:
1541 i2c_bus = &dev->i2c_bus[0]; 1665 i2c_bus = &dev->i2c_bus[0];
@@ -1544,15 +1668,15 @@ static int dvb_register(struct cx23885_tsport *port)
1544 &prof_8000_stv090x_config, 1668 &prof_8000_stv090x_config,
1545 &i2c_bus->i2c_adap, 1669 &i2c_bus->i2c_adap,
1546 STV090x_DEMODULATOR_0); 1670 STV090x_DEMODULATOR_0);
1547 if (fe0->dvb.frontend != NULL) { 1671 if (fe0->dvb.frontend == NULL)
1548 if (!dvb_attach(stb6100_attach, 1672 break;
1549 fe0->dvb.frontend, 1673 if (!dvb_attach(stb6100_attach,
1550 &prof_8000_stb6100_config, 1674 fe0->dvb.frontend,
1551 &i2c_bus->i2c_adap)) 1675 &prof_8000_stb6100_config,
1552 goto frontend_detach; 1676 &i2c_bus->i2c_adap))
1677 goto frontend_detach;
1553 1678
1554 fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage; 1679 fe0->dvb.frontend->ops.set_voltage = p8000_set_voltage;
1555 }
1556 break; 1680 break;
1557 case CX23885_BOARD_HAUPPAUGE_HVR4400: 1681 case CX23885_BOARD_HAUPPAUGE_HVR4400:
1558 i2c_bus = &dev->i2c_bus[0]; 1682 i2c_bus = &dev->i2c_bus[0];
@@ -1563,30 +1687,31 @@ static int dvb_register(struct cx23885_tsport *port)
1563 fe0->dvb.frontend = dvb_attach(tda10071_attach, 1687 fe0->dvb.frontend = dvb_attach(tda10071_attach,
1564 &hauppauge_tda10071_config, 1688 &hauppauge_tda10071_config,
1565 &i2c_bus->i2c_adap); 1689 &i2c_bus->i2c_adap);
1566 if (fe0->dvb.frontend != NULL) { 1690 if (fe0->dvb.frontend == NULL)
1567 if (!dvb_attach(a8293_attach, fe0->dvb.frontend, 1691 break;
1568 &i2c_bus->i2c_adap, 1692 if (!dvb_attach(a8293_attach, fe0->dvb.frontend,
1569 &hauppauge_a8293_config)) 1693 &i2c_bus->i2c_adap,
1570 goto frontend_detach; 1694 &hauppauge_a8293_config))
1571 } 1695 goto frontend_detach;
1572 break; 1696 break;
1573 /* port c */ 1697 /* port c */
1574 case 2: 1698 case 2:
1575 fe0->dvb.frontend = dvb_attach(si2165_attach, 1699 fe0->dvb.frontend = dvb_attach(si2165_attach,
1576 &hauppauge_hvr4400_si2165_config, 1700 &hauppauge_hvr4400_si2165_config,
1577 &i2c_bus->i2c_adap); 1701 &i2c_bus->i2c_adap);
1578 if (fe0->dvb.frontend != NULL) { 1702 if (fe0->dvb.frontend == NULL)
1579 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; 1703 break;
1580 if (!dvb_attach(tda18271_attach, 1704 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
1581 fe0->dvb.frontend, 1705 if (!dvb_attach(tda18271_attach,
1582 0x60, &i2c_bus2->i2c_adap, 1706 fe0->dvb.frontend,
1583 &hauppauge_hvr4400_tuner_config)) 1707 0x60, &i2c_bus2->i2c_adap,
1584 goto frontend_detach; 1708 &hauppauge_hvr4400_tuner_config))
1585 } 1709 goto frontend_detach;
1586 break; 1710 break;
1587 } 1711 }
1588 break; 1712 break;
1589 case CX23885_BOARD_DVBSKY_T9580: 1713 case CX23885_BOARD_DVBSKY_T9580:
1714 case CX23885_BOARD_DVBSKY_S950:
1590 i2c_bus = &dev->i2c_bus[0]; 1715 i2c_bus = &dev->i2c_bus[0];
1591 i2c_bus2 = &dev->i2c_bus[1]; 1716 i2c_bus2 = &dev->i2c_bus[1];
1592 switch (port->nr) { 1717 switch (port->nr) {
@@ -1680,6 +1805,201 @@ static int dvb_register(struct cx23885_tsport *port)
1680 break; 1805 break;
1681 } 1806 }
1682 break; 1807 break;
1808 case CX23885_BOARD_DVBSKY_T980C:
1809 case CX23885_BOARD_TT_CT2_4500_CI:
1810 i2c_bus = &dev->i2c_bus[1];
1811 i2c_bus2 = &dev->i2c_bus[0];
1812
1813 /* attach frontend */
1814 memset(&si2168_config, 0, sizeof(si2168_config));
1815 si2168_config.i2c_adapter = &adapter;
1816 si2168_config.fe = &fe0->dvb.frontend;
1817 si2168_config.ts_mode = SI2168_TS_PARALLEL;
1818 memset(&info, 0, sizeof(struct i2c_board_info));
1819 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
1820 info.addr = 0x64;
1821 info.platform_data = &si2168_config;
1822 request_module(info.type);
1823 client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
1824 if (client_demod == NULL ||
1825 client_demod->dev.driver == NULL)
1826 goto frontend_detach;
1827 if (!try_module_get(client_demod->dev.driver->owner)) {
1828 i2c_unregister_device(client_demod);
1829 goto frontend_detach;
1830 }
1831 port->i2c_client_demod = client_demod;
1832
1833 /* attach tuner */
1834 memset(&si2157_config, 0, sizeof(si2157_config));
1835 si2157_config.fe = fe0->dvb.frontend;
1836 memset(&info, 0, sizeof(struct i2c_board_info));
1837 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
1838 info.addr = 0x60;
1839 info.platform_data = &si2157_config;
1840 request_module(info.type);
1841 client_tuner = i2c_new_device(adapter, &info);
1842 if (client_tuner == NULL ||
1843 client_tuner->dev.driver == NULL) {
1844 module_put(client_demod->dev.driver->owner);
1845 i2c_unregister_device(client_demod);
1846 goto frontend_detach;
1847 }
1848 if (!try_module_get(client_tuner->dev.driver->owner)) {
1849 i2c_unregister_device(client_tuner);
1850 module_put(client_demod->dev.driver->owner);
1851 i2c_unregister_device(client_demod);
1852 goto frontend_detach;
1853 }
1854 port->i2c_client_tuner = client_tuner;
1855 break;
1856 case CX23885_BOARD_DVBSKY_S950C:
1857 i2c_bus = &dev->i2c_bus[1];
1858 i2c_bus2 = &dev->i2c_bus[0];
1859
1860 /* attach frontend */
1861 fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
1862 &dvbsky_s950c_m88ds3103_config,
1863 &i2c_bus->i2c_adap, &adapter);
1864 if (fe0->dvb.frontend == NULL)
1865 break;
1866
1867 /* attach tuner */
1868 memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
1869 m88ts2022_config.fe = fe0->dvb.frontend;
1870 m88ts2022_config.clock = 27000000;
1871 memset(&info, 0, sizeof(struct i2c_board_info));
1872 strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
1873 info.addr = 0x60;
1874 info.platform_data = &m88ts2022_config;
1875 request_module(info.type);
1876 client_tuner = i2c_new_device(adapter, &info);
1877 if (client_tuner == NULL ||
1878 client_tuner->dev.driver == NULL)
1879 goto frontend_detach;
1880 if (!try_module_get(client_tuner->dev.driver->owner)) {
1881 i2c_unregister_device(client_tuner);
1882 goto frontend_detach;
1883 }
1884
1885 /* delegate signal strength measurement to tuner */
1886 fe0->dvb.frontend->ops.read_signal_strength =
1887 fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
1888
1889 port->i2c_client_tuner = client_tuner;
1890 break;
1891 case CX23885_BOARD_DVBSKY_S952:
1892 switch (port->nr) {
1893 /* port b */
1894 case 1:
1895 i2c_bus = &dev->i2c_bus[1];
1896 p_m88ds3103_config = &dvbsky_t9580_m88ds3103_config;
1897 p_set_voltage = dvbsky_t9580_set_voltage;
1898 break;
1899 /* port c */
1900 case 2:
1901 i2c_bus = &dev->i2c_bus[0];
1902 p_m88ds3103_config = &dvbsky_s952_portc_m88ds3103_config;
1903 p_set_voltage = dvbsky_s952_portc_set_voltage;
1904 break;
1905 }
1906
1907 /* attach frontend */
1908 fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
1909 p_m88ds3103_config,
1910 &i2c_bus->i2c_adap, &adapter);
1911 if (fe0->dvb.frontend == NULL)
1912 break;
1913
1914 /* attach tuner */
1915 memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
1916 m88ts2022_config.fe = fe0->dvb.frontend;
1917 m88ts2022_config.clock = 27000000;
1918 memset(&info, 0, sizeof(struct i2c_board_info));
1919 strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
1920 info.addr = 0x60;
1921 info.platform_data = &m88ts2022_config;
1922 request_module(info.type);
1923 client_tuner = i2c_new_device(adapter, &info);
1924 if (client_tuner == NULL ||
1925 client_tuner->dev.driver == NULL)
1926 goto frontend_detach;
1927 if (!try_module_get(client_tuner->dev.driver->owner)) {
1928 i2c_unregister_device(client_tuner);
1929 goto frontend_detach;
1930 }
1931
1932 /* delegate signal strength measurement to tuner */
1933 fe0->dvb.frontend->ops.read_signal_strength =
1934 fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
1935
1936 /*
1937 * for setting the voltage we need to set GPIOs on
1938 * the card.
1939 */
1940 port->fe_set_voltage =
1941 fe0->dvb.frontend->ops.set_voltage;
1942 fe0->dvb.frontend->ops.set_voltage = p_set_voltage;
1943
1944 port->i2c_client_tuner = client_tuner;
1945 break;
1946 case CX23885_BOARD_DVBSKY_T982:
1947 memset(&si2168_config, 0, sizeof(si2168_config));
1948 switch (port->nr) {
1949 /* port b */
1950 case 1:
1951 i2c_bus = &dev->i2c_bus[1];
1952 si2168_config.ts_mode = SI2168_TS_PARALLEL;
1953 break;
1954 /* port c */
1955 case 2:
1956 i2c_bus = &dev->i2c_bus[0];
1957 si2168_config.ts_mode = SI2168_TS_SERIAL;
1958 break;
1959 }
1960
1961 /* attach frontend */
1962 si2168_config.i2c_adapter = &adapter;
1963 si2168_config.fe = &fe0->dvb.frontend;
1964 memset(&info, 0, sizeof(struct i2c_board_info));
1965 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
1966 info.addr = 0x64;
1967 info.platform_data = &si2168_config;
1968 request_module(info.type);
1969 client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
1970 if (client_demod == NULL ||
1971 client_demod->dev.driver == NULL)
1972 goto frontend_detach;
1973 if (!try_module_get(client_demod->dev.driver->owner)) {
1974 i2c_unregister_device(client_demod);
1975 goto frontend_detach;
1976 }
1977 port->i2c_client_demod = client_demod;
1978
1979 /* attach tuner */
1980 memset(&si2157_config, 0, sizeof(si2157_config));
1981 si2157_config.fe = fe0->dvb.frontend;
1982 memset(&info, 0, sizeof(struct i2c_board_info));
1983 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
1984 info.addr = 0x60;
1985 info.platform_data = &si2157_config;
1986 request_module(info.type);
1987 client_tuner = i2c_new_device(adapter, &info);
1988 if (client_tuner == NULL ||
1989 client_tuner->dev.driver == NULL) {
1990 module_put(client_demod->dev.driver->owner);
1991 i2c_unregister_device(client_demod);
1992 goto frontend_detach;
1993 }
1994 if (!try_module_get(client_tuner->dev.driver->owner)) {
1995 i2c_unregister_device(client_tuner);
1996 module_put(client_demod->dev.driver->owner);
1997 i2c_unregister_device(client_demod);
1998 port->i2c_client_demod = NULL;
1999 goto frontend_detach;
2000 }
2001 port->i2c_client_tuner = client_tuner;
2002 break;
1683 default: 2003 default:
1684 printk(KERN_INFO "%s: The frontend of your DVB/ATSC card " 2004 printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
1685 " isn't supported yet\n", 2005 " isn't supported yet\n",
@@ -1754,7 +2074,10 @@ static int dvb_register(struct cx23885_tsport *port)
1754 memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6); 2074 memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
1755 break; 2075 break;
1756 } 2076 }
1757 case CX23885_BOARD_DVBSKY_T9580: { 2077 case CX23885_BOARD_DVBSKY_T9580:
2078 case CX23885_BOARD_DVBSKY_S950:
2079 case CX23885_BOARD_DVBSKY_S952:
2080 case CX23885_BOARD_DVBSKY_T982: {
1758 u8 eeprom[256]; /* 24C02 i2c eeprom */ 2081 u8 eeprom[256]; /* 24C02 i2c eeprom */
1759 2082
1760 if (port->nr > 2) 2083 if (port->nr > 2)
@@ -1764,12 +2087,67 @@ static int dvb_register(struct cx23885_tsport *port)
1764 dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1; 2087 dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
1765 tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, 2088 tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
1766 sizeof(eeprom)); 2089 sizeof(eeprom));
1767 printk(KERN_INFO "DVBSky T9580 port %d MAC address: %pM\n", 2090 printk(KERN_INFO "%s port %d MAC address: %pM\n",
1768 port->nr, eeprom + 0xc0 + (port->nr-1) * 8); 2091 cx23885_boards[dev->board].name, port->nr,
2092 eeprom + 0xc0 + (port->nr-1) * 8);
1769 memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 + 2093 memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
1770 (port->nr-1) * 8, 6); 2094 (port->nr-1) * 8, 6);
1771 break; 2095 break;
1772 } 2096 }
2097 case CX23885_BOARD_DVBSKY_S950C:
2098 case CX23885_BOARD_DVBSKY_T980C:
2099 case CX23885_BOARD_TT_CT2_4500_CI: {
2100 u8 eeprom[256]; /* 24C02 i2c eeprom */
2101
2102 /* attach CI */
2103 memset(&sp2_config, 0, sizeof(sp2_config));
2104 sp2_config.dvb_adap = &port->frontends.adapter;
2105 sp2_config.priv = port;
2106 sp2_config.ci_control = cx23885_sp2_ci_ctrl;
2107 memset(&info, 0, sizeof(struct i2c_board_info));
2108 strlcpy(info.type, "sp2", I2C_NAME_SIZE);
2109 info.addr = 0x40;
2110 info.platform_data = &sp2_config;
2111 request_module(info.type);
2112 client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
2113 if (client_ci == NULL ||
2114 client_ci->dev.driver == NULL) {
2115 if (client_tuner) {
2116 module_put(client_tuner->dev.driver->owner);
2117 i2c_unregister_device(client_tuner);
2118 }
2119 if (client_demod) {
2120 module_put(client_demod->dev.driver->owner);
2121 i2c_unregister_device(client_demod);
2122 }
2123 goto frontend_detach;
2124 }
2125 if (!try_module_get(client_ci->dev.driver->owner)) {
2126 i2c_unregister_device(client_ci);
2127 if (client_tuner) {
2128 module_put(client_tuner->dev.driver->owner);
2129 i2c_unregister_device(client_tuner);
2130 }
2131 if (client_demod) {
2132 module_put(client_demod->dev.driver->owner);
2133 i2c_unregister_device(client_demod);
2134 }
2135 goto frontend_detach;
2136 }
2137 port->i2c_client_ci = client_ci;
2138
2139 if (port->nr != 1)
2140 break;
2141
2142 /* Read entire EEPROM */
2143 dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
2144 tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
2145 sizeof(eeprom));
2146 printk(KERN_INFO "%s MAC address: %pM\n",
2147 cx23885_boards[dev->board].name, eeprom + 0xc0);
2148 memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
2149 break;
2150 }
1773 } 2151 }
1774 2152
1775 return ret; 2153 return ret;
@@ -1810,7 +2188,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
1810 2188
1811 fe0 = vb2_dvb_get_frontend(&port->frontends, i); 2189 fe0 = vb2_dvb_get_frontend(&port->frontends, i);
1812 if (!fe0) 2190 if (!fe0)
1813 err = -EINVAL; 2191 return -EINVAL;
1814 2192
1815 dprintk(1, "%s\n", __func__); 2193 dprintk(1, "%s\n", __func__);
1816 dprintk(1, " ->probed by Card=%d Name=%s, PCI %02x:%02x\n", 2194 dprintk(1, " ->probed by Card=%d Name=%s, PCI %02x:%02x\n",
@@ -1853,6 +2231,13 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
1853 struct vb2_dvb_frontend *fe0; 2231 struct vb2_dvb_frontend *fe0;
1854 struct i2c_client *client; 2232 struct i2c_client *client;
1855 2233
2234 /* remove I2C client for CI */
2235 client = port->i2c_client_ci;
2236 if (client) {
2237 module_put(client->dev.driver->owner);
2238 i2c_unregister_device(client);
2239 }
2240
1856 /* remove I2C client for tuner */ 2241 /* remove I2C client for tuner */
1857 client = port->i2c_client_tuner; 2242 client = port->i2c_client_tuner;
1858 if (client) { 2243 if (client) {
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 9d37fe661691..088799c3b49b 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -87,6 +87,13 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
87 case CX23885_BOARD_MYGICA_X8507: 87 case CX23885_BOARD_MYGICA_X8507:
88 case CX23885_BOARD_TBS_6980: 88 case CX23885_BOARD_TBS_6980:
89 case CX23885_BOARD_TBS_6981: 89 case CX23885_BOARD_TBS_6981:
90 case CX23885_BOARD_DVBSKY_T9580:
91 case CX23885_BOARD_DVBSKY_T980C:
92 case CX23885_BOARD_DVBSKY_S950C:
93 case CX23885_BOARD_TT_CT2_4500_CI:
94 case CX23885_BOARD_DVBSKY_S950:
95 case CX23885_BOARD_DVBSKY_S952:
96 case CX23885_BOARD_DVBSKY_T982:
90 /* 97 /*
91 * The only boards we handle right now. However other boards 98 * The only boards we handle right now. However other boards
92 * using the CX2388x integrated IR controller should be similar 99 * using the CX2388x integrated IR controller should be similar
@@ -139,6 +146,13 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
139 case CX23885_BOARD_HAUPPAUGE_HVR1290: 146 case CX23885_BOARD_HAUPPAUGE_HVR1290:
140 case CX23885_BOARD_HAUPPAUGE_HVR1250: 147 case CX23885_BOARD_HAUPPAUGE_HVR1250:
141 case CX23885_BOARD_MYGICA_X8507: 148 case CX23885_BOARD_MYGICA_X8507:
149 case CX23885_BOARD_DVBSKY_T9580:
150 case CX23885_BOARD_DVBSKY_T980C:
151 case CX23885_BOARD_DVBSKY_S950C:
152 case CX23885_BOARD_TT_CT2_4500_CI:
153 case CX23885_BOARD_DVBSKY_S950:
154 case CX23885_BOARD_DVBSKY_S952:
155 case CX23885_BOARD_DVBSKY_T982:
142 /* 156 /*
143 * The IR controller on this board only returns pulse widths. 157 * The IR controller on this board only returns pulse widths.
144 * Any other mode setting will fail to set up the device. 158 * Any other mode setting will fail to set up the device.
@@ -305,6 +319,23 @@ int cx23885_input_init(struct cx23885_dev *dev)
305 /* A guess at the remote */ 319 /* A guess at the remote */
306 rc_map = RC_MAP_TBS_NEC; 320 rc_map = RC_MAP_TBS_NEC;
307 break; 321 break;
322 case CX23885_BOARD_DVBSKY_T9580:
323 case CX23885_BOARD_DVBSKY_T980C:
324 case CX23885_BOARD_DVBSKY_S950C:
325 case CX23885_BOARD_DVBSKY_S950:
326 case CX23885_BOARD_DVBSKY_S952:
327 case CX23885_BOARD_DVBSKY_T982:
328 /* Integrated CX23885 IR controller */
329 driver_type = RC_DRIVER_IR_RAW;
330 allowed_protos = RC_BIT_ALL;
331 rc_map = RC_MAP_DVBSKY;
332 break;
333 case CX23885_BOARD_TT_CT2_4500_CI:
334 /* Integrated CX23885 IR controller */
335 driver_type = RC_DRIVER_IR_RAW;
336 allowed_protos = RC_BIT_ALL;
337 rc_map = RC_MAP_TT_1500;
338 break;
308 default: 339 default:
309 return -ENODEV; 340 return -ENODEV;
310 } 341 }
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index a7c6ef8f3ea3..d362d3838c84 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -132,6 +132,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
132 lines = VBI_NTSC_LINE_COUNT; 132 lines = VBI_NTSC_LINE_COUNT;
133 *num_planes = 1; 133 *num_planes = 1;
134 sizes[0] = lines * VBI_LINE_LENGTH * 2; 134 sizes[0] = lines * VBI_LINE_LENGTH * 2;
135 alloc_ctxs[0] = dev->alloc_ctx;
135 return 0; 136 return 0;
136} 137}
137 138
@@ -142,7 +143,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
142 struct cx23885_buffer, vb); 143 struct cx23885_buffer, vb);
143 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 144 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
144 unsigned lines = VBI_PAL_LINE_COUNT; 145 unsigned lines = VBI_PAL_LINE_COUNT;
145 int ret;
146 146
147 if (dev->tvnorm & V4L2_STD_525_60) 147 if (dev->tvnorm & V4L2_STD_525_60)
148 lines = VBI_NTSC_LINE_COUNT; 148 lines = VBI_NTSC_LINE_COUNT;
@@ -151,10 +151,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
151 return -EINVAL; 151 return -EINVAL;
152 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); 152 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
153 153
154 ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
155 if (!ret)
156 return -EIO;
157
158 cx23885_risc_vbibuffer(dev->pci, &buf->risc, 154 cx23885_risc_vbibuffer(dev->pci, &buf->risc,
159 sgt->sgl, 155 sgt->sgl,
160 0, VBI_LINE_LENGTH * lines, 156 0, VBI_LINE_LENGTH * lines,
@@ -165,14 +161,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
165 161
166static void buffer_finish(struct vb2_buffer *vb) 162static void buffer_finish(struct vb2_buffer *vb)
167{ 163{
168 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
169 struct cx23885_buffer *buf = container_of(vb, 164 struct cx23885_buffer *buf = container_of(vb,
170 struct cx23885_buffer, vb); 165 struct cx23885_buffer, vb);
171 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
172 166
173 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 167 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
174
175 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
176} 168}
177 169
178/* 170/*
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 682a4f95df6b..5e93c682a3f5 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -323,6 +323,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
323 323
324 *num_planes = 1; 324 *num_planes = 1;
325 sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3; 325 sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
326 alloc_ctxs[0] = dev->alloc_ctx;
326 return 0; 327 return 0;
327} 328}
328 329
@@ -334,7 +335,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
334 u32 line0_offset, line1_offset; 335 u32 line0_offset, line1_offset;
335 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 336 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
336 int field_tff; 337 int field_tff;
337 int ret;
338 338
339 buf->bpl = (dev->width * dev->fmt->depth) >> 3; 339 buf->bpl = (dev->width * dev->fmt->depth) >> 3;
340 340
@@ -342,10 +342,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
342 return -EINVAL; 342 return -EINVAL;
343 vb2_set_plane_payload(vb, 0, dev->height * buf->bpl); 343 vb2_set_plane_payload(vb, 0, dev->height * buf->bpl);
344 344
345 ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
346 if (!ret)
347 return -EIO;
348
349 switch (dev->field) { 345 switch (dev->field) {
350 case V4L2_FIELD_TOP: 346 case V4L2_FIELD_TOP:
351 cx23885_risc_buffer(dev->pci, &buf->risc, 347 cx23885_risc_buffer(dev->pci, &buf->risc,
@@ -413,14 +409,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
413 409
414static void buffer_finish(struct vb2_buffer *vb) 410static void buffer_finish(struct vb2_buffer *vb)
415{ 411{
416 struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
417 struct cx23885_buffer *buf = container_of(vb, 412 struct cx23885_buffer *buf = container_of(vb,
418 struct cx23885_buffer, vb); 413 struct cx23885_buffer, vb);
419 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
420 414
421 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 415 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
422
423 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
424} 416}
425 417
426/* 418/*
@@ -608,7 +600,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
608 dev->field = f->fmt.pix.field; 600 dev->field = f->fmt.pix.field;
609 dprintk(2, "%s() width=%d height=%d field=%d\n", __func__, 601 dprintk(2, "%s() width=%d height=%d field=%d\n", __func__,
610 dev->width, dev->height, dev->field); 602 dev->width, dev->height, dev->field);
611 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); 603 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
612 call_all(dev, video, s_mbus_fmt, &mbus_fmt); 604 call_all(dev, video, s_mbus_fmt, &mbus_fmt);
613 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); 605 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
614 /* s_mbus_fmt overwrites f->fmt.pix.field, restore it */ 606 /* s_mbus_fmt overwrites f->fmt.pix.field, restore it */
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 6c35e6115969..f55cd12da0fd 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -93,6 +93,12 @@
93#define CX23885_BOARD_HAUPPAUGE_IMPACTVCBE 43 93#define CX23885_BOARD_HAUPPAUGE_IMPACTVCBE 43
94#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2 44 94#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2 44
95#define CX23885_BOARD_DVBSKY_T9580 45 95#define CX23885_BOARD_DVBSKY_T9580 45
96#define CX23885_BOARD_DVBSKY_T980C 46
97#define CX23885_BOARD_DVBSKY_S950C 47
98#define CX23885_BOARD_TT_CT2_4500_CI 48
99#define CX23885_BOARD_DVBSKY_S950 49
100#define CX23885_BOARD_DVBSKY_S952 50
101#define CX23885_BOARD_DVBSKY_T982 51
96 102
97#define GPIO_0 0x00000001 103#define GPIO_0 0x00000001
98#define GPIO_1 0x00000002 104#define GPIO_1 0x00000002
@@ -296,6 +302,7 @@ struct cx23885_tsport {
296 302
297 struct i2c_client *i2c_client_demod; 303 struct i2c_client *i2c_client_demod;
298 struct i2c_client *i2c_client_tuner; 304 struct i2c_client *i2c_client_tuner;
305 struct i2c_client *i2c_client_ci;
299 306
300 int (*set_frontend)(struct dvb_frontend *fe); 307 int (*set_frontend)(struct dvb_frontend *fe);
301 int (*fe_set_voltage)(struct dvb_frontend *fe, 308 int (*fe_set_voltage)(struct dvb_frontend *fe,
@@ -418,6 +425,7 @@ struct cx23885_dev {
418 struct vb2_queue vb2_vidq; 425 struct vb2_queue vb2_vidq;
419 struct cx23885_dmaqueue vbiq; 426 struct cx23885_dmaqueue vbiq;
420 struct vb2_queue vb2_vbiq; 427 struct vb2_queue vb2_vbiq;
428 void *alloc_ctx;
421 429
422 spinlock_t slock; 430 spinlock_t slock;
423 431
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index e81173c41e5a..389fffd2f36f 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -996,7 +996,7 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
996 for (line = 0; line < lines; line++) { 996 for (line = 0; line < lines; line++) {
997 while (offset && offset >= sg_dma_len(sg)) { 997 while (offset && offset >= sg_dma_len(sg)) {
998 offset -= sg_dma_len(sg); 998 offset -= sg_dma_len(sg);
999 sg++; 999 sg = sg_next(sg);
1000 } 1000 }
1001 if (bpl <= sg_dma_len(sg) - offset) { 1001 if (bpl <= sg_dma_len(sg) - offset) {
1002 /* fits into current chunk */ 1002 /* fits into current chunk */
@@ -1014,14 +1014,14 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
1014 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1014 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1015 todo -= (sg_dma_len(sg) - offset); 1015 todo -= (sg_dma_len(sg) - offset);
1016 offset = 0; 1016 offset = 0;
1017 sg++; 1017 sg = sg_next(sg);
1018 while (todo > sg_dma_len(sg)) { 1018 while (todo > sg_dma_len(sg)) {
1019 *(rp++) = cpu_to_le32(RISC_WRITE | 1019 *(rp++) = cpu_to_le32(RISC_WRITE |
1020 sg_dma_len(sg)); 1020 sg_dma_len(sg));
1021 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1021 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1022 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1022 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1023 todo -= sg_dma_len(sg); 1023 todo -= sg_dma_len(sg);
1024 sg++; 1024 sg = sg_next(sg);
1025 } 1025 }
1026 *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo); 1026 *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
1027 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1027 *(rp++) = cpu_to_le32(sg_dma_address(sg));
@@ -1101,7 +1101,7 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
1101 for (line = 0; line < lines; line++) { 1101 for (line = 0; line < lines; line++) {
1102 while (offset && offset >= sg_dma_len(sg)) { 1102 while (offset && offset >= sg_dma_len(sg)) {
1103 offset -= sg_dma_len(sg); 1103 offset -= sg_dma_len(sg);
1104 sg++; 1104 sg = sg_next(sg);
1105 } 1105 }
1106 1106
1107 if (lpi && line > 0 && !(line % lpi)) 1107 if (lpi && line > 0 && !(line % lpi))
@@ -1125,14 +1125,14 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
1125 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1125 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1126 todo -= (sg_dma_len(sg) - offset); 1126 todo -= (sg_dma_len(sg) - offset);
1127 offset = 0; 1127 offset = 0;
1128 sg++; 1128 sg = sg_next(sg);
1129 while (todo > sg_dma_len(sg)) { 1129 while (todo > sg_dma_len(sg)) {
1130 *(rp++) = cpu_to_le32(RISC_WRITE | 1130 *(rp++) = cpu_to_le32(RISC_WRITE |
1131 sg_dma_len(sg)); 1131 sg_dma_len(sg));
1132 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1132 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1133 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1133 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1134 todo -= sg_dma_len(sg); 1134 todo -= sg_dma_len(sg);
1135 sg++; 1135 sg = sg_next(sg);
1136 } 1136 }
1137 *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo); 1137 *(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
1138 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1138 *(rp++) = cpu_to_le32(sg_dma_address(sg));
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index a63a9ad163b2..14b813d634a8 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -2,8 +2,7 @@ config VIDEO_CX88
2 tristate "Conexant 2388x (bt878 successor) support" 2 tristate "Conexant 2388x (bt878 successor) support"
3 depends on VIDEO_DEV && PCI && I2C && RC_CORE 3 depends on VIDEO_DEV && PCI && I2C && RC_CORE
4 select I2C_ALGOBIT 4 select I2C_ALGOBIT
5 select VIDEO_BTCX 5 select VIDEOBUF2_DMA_SG
6 select VIDEOBUF_DMA_SG
7 select VIDEO_TUNER 6 select VIDEO_TUNER
8 select VIDEO_TVEEPROM 7 select VIDEO_TVEEPROM
9 select VIDEO_WM8775 if MEDIA_SUBDRV_AUTOSELECT 8 select VIDEO_WM8775 if MEDIA_SUBDRV_AUTOSELECT
@@ -45,7 +44,7 @@ config VIDEO_CX88_BLACKBIRD
45config VIDEO_CX88_DVB 44config VIDEO_CX88_DVB
46 tristate "DVB/ATSC Support for cx2388x based TV cards" 45 tristate "DVB/ATSC Support for cx2388x based TV cards"
47 depends on VIDEO_CX88 && DVB_CORE 46 depends on VIDEO_CX88 && DVB_CORE
48 select VIDEOBUF_DVB 47 select VIDEOBUF2_DVB
49 select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT 48 select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
50 select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT 49 select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
51 select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT 50 select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index 8619c1becee2..d3679c3ee248 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
11obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o 11obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
12 12
13ccflags-y += -Idrivers/media/i2c 13ccflags-y += -Idrivers/media/i2c
14ccflags-y += -Idrivers/media/common
15ccflags-y += -Idrivers/media/tuners 14ccflags-y += -Idrivers/media/tuners
16ccflags-y += -Idrivers/media/dvb-core 15ccflags-y += -Idrivers/media/dvb-core
17ccflags-y += -Idrivers/media/dvb-frontends 16ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index a72579a9f67f..7f8dc60028d5 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -61,8 +61,11 @@
61 61
62struct cx88_audio_buffer { 62struct cx88_audio_buffer {
63 unsigned int bpl; 63 unsigned int bpl;
64 struct btcx_riscmem risc; 64 struct cx88_riscmem risc;
65 struct videobuf_dmabuf dma; 65 void *vaddr;
66 struct scatterlist *sglist;
67 int sglen;
68 int nr_pages;
66}; 69};
67 70
68struct cx88_audio_dev { 71struct cx88_audio_dev {
@@ -84,8 +87,6 @@ struct cx88_audio_dev {
84 unsigned int period_size; 87 unsigned int period_size;
85 unsigned int num_periods; 88 unsigned int num_periods;
86 89
87 struct videobuf_dmabuf *dma_risc;
88
89 struct cx88_audio_buffer *buf; 90 struct cx88_audio_buffer *buf;
90 91
91 struct snd_pcm_substream *substream; 92 struct snd_pcm_substream *substream;
@@ -290,19 +291,97 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
290 return IRQ_RETVAL(handled); 291 return IRQ_RETVAL(handled);
291} 292}
292 293
294static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
295{
296 struct cx88_audio_buffer *buf = chip->buf;
297 struct page *pg;
298 int i;
299
300 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
301 if (NULL == buf->vaddr) {
302 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
303 return -ENOMEM;
304 }
305
306 dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
307 (unsigned long)buf->vaddr,
308 nr_pages << PAGE_SHIFT);
309
310 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
311 buf->nr_pages = nr_pages;
312
313 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
314 if (NULL == buf->sglist)
315 goto vzalloc_err;
316
317 sg_init_table(buf->sglist, buf->nr_pages);
318 for (i = 0; i < buf->nr_pages; i++) {
319 pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
320 if (NULL == pg)
321 goto vmalloc_to_page_err;
322 sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
323 }
324 return 0;
325
326vmalloc_to_page_err:
327 vfree(buf->sglist);
328 buf->sglist = NULL;
329vzalloc_err:
330 vfree(buf->vaddr);
331 buf->vaddr = NULL;
332 return -ENOMEM;
333}
334
335static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
336{
337 struct cx88_audio_buffer *buf = dev->buf;
338
339 buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
340 buf->nr_pages, PCI_DMA_FROMDEVICE);
341
342 if (0 == buf->sglen) {
343 pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
344 return -ENOMEM;
345 }
346 return 0;
347}
348
349static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
350{
351 struct cx88_audio_buffer *buf = dev->buf;
352
353 if (!buf->sglen)
354 return 0;
355
356 dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
357 buf->sglen = 0;
358 return 0;
359}
360
361static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
362{
363 vfree(buf->sglist);
364 buf->sglist = NULL;
365 vfree(buf->vaddr);
366 buf->vaddr = NULL;
367 return 0;
368}
369
293 370
294static int dsp_buffer_free(snd_cx88_card_t *chip) 371static int dsp_buffer_free(snd_cx88_card_t *chip)
295{ 372{
373 struct cx88_riscmem *risc = &chip->buf->risc;
374
296 BUG_ON(!chip->dma_size); 375 BUG_ON(!chip->dma_size);
297 376
298 dprintk(2,"Freeing buffer\n"); 377 dprintk(2,"Freeing buffer\n");
299 videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); 378 cx88_alsa_dma_unmap(chip);
300 videobuf_dma_free(chip->dma_risc); 379 cx88_alsa_dma_free(chip->buf);
301 btcx_riscmem_free(chip->pci,&chip->buf->risc); 380 if (risc->cpu)
381 pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
302 kfree(chip->buf); 382 kfree(chip->buf);
303 383
304 chip->dma_risc = NULL; 384 chip->buf = NULL;
305 chip->dma_size = 0;
306 385
307 return 0; 386 return 0;
308} 387}
@@ -387,7 +466,6 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
387 struct snd_pcm_hw_params * hw_params) 466 struct snd_pcm_hw_params * hw_params)
388{ 467{
389 snd_cx88_card_t *chip = snd_pcm_substream_chip(substream); 468 snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
390 struct videobuf_dmabuf *dma;
391 469
392 struct cx88_audio_buffer *buf; 470 struct cx88_audio_buffer *buf;
393 int ret; 471 int ret;
@@ -408,20 +486,19 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
408 if (NULL == buf) 486 if (NULL == buf)
409 return -ENOMEM; 487 return -ENOMEM;
410 488
489 chip->buf = buf;
411 buf->bpl = chip->period_size; 490 buf->bpl = chip->period_size;
412 491
413 dma = &buf->dma; 492 ret = cx88_alsa_dma_init(chip,
414 videobuf_dma_init(dma);
415 ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
416 (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); 493 (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
417 if (ret < 0) 494 if (ret < 0)
418 goto error; 495 goto error;
419 496
420 ret = videobuf_dma_map(&chip->pci->dev, dma); 497 ret = cx88_alsa_dma_map(chip);
421 if (ret < 0) 498 if (ret < 0)
422 goto error; 499 goto error;
423 500
424 ret = cx88_risc_databuffer(chip->pci, &buf->risc, dma->sglist, 501 ret = cx88_risc_databuffer(chip->pci, &buf->risc, buf->sglist,
425 chip->period_size, chip->num_periods, 1); 502 chip->period_size, chip->num_periods, 1);
426 if (ret < 0) 503 if (ret < 0)
427 goto error; 504 goto error;
@@ -430,10 +507,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
430 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC); 507 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
431 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 508 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
432 509
433 chip->buf = buf; 510 substream->runtime->dma_area = chip->buf->vaddr;
434 chip->dma_risc = dma;
435
436 substream->runtime->dma_area = chip->dma_risc->vaddr;
437 substream->runtime->dma_bytes = chip->dma_size; 511 substream->runtime->dma_bytes = chip->dma_size;
438 substream->runtime->dma_addr = 0; 512 substream->runtime->dma_addr = 0;
439 return 0; 513 return 0;
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 150bb76e7839..4160ca4e5413 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -45,10 +45,6 @@ MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(CX88_VERSION); 46MODULE_VERSION(CX88_VERSION);
47 47
48static unsigned int mpegbufs = 32;
49module_param(mpegbufs,int,0644);
50MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32");
51
52static unsigned int debug; 48static unsigned int debug;
53module_param(debug,int,0644); 49module_param(debug,int,0644);
54MODULE_PARM_DESC(debug,"enable debug messages [blackbird]"); 50MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
@@ -326,13 +322,13 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
326 memory_read(dev->core, dev->mailbox - 4, &value); 322 memory_read(dev->core, dev->mailbox - 4, &value);
327 if (value != 0x12345678) { 323 if (value != 0x12345678) {
328 dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n"); 324 dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
329 return -1; 325 return -EIO;
330 } 326 }
331 327
332 memory_read(dev->core, dev->mailbox, &flag); 328 memory_read(dev->core, dev->mailbox, &flag);
333 if (flag) { 329 if (flag) {
334 dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag); 330 dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag);
335 return -1; 331 return -EIO;
336 } 332 }
337 333
338 flag |= 1; /* tell 'em we're working on it */ 334 flag |= 1; /* tell 'em we're working on it */
@@ -352,14 +348,14 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
352 memory_write(dev->core, dev->mailbox, flag); 348 memory_write(dev->core, dev->mailbox, flag);
353 349
354 /* wait for firmware to handle the API command */ 350 /* wait for firmware to handle the API command */
355 timeout = jiffies + msecs_to_jiffies(10); 351 timeout = jiffies + msecs_to_jiffies(1000);
356 for (;;) { 352 for (;;) {
357 memory_read(dev->core, dev->mailbox, &flag); 353 memory_read(dev->core, dev->mailbox, &flag);
358 if (0 != (flag & 4)) 354 if (0 != (flag & 4))
359 break; 355 break;
360 if (time_after(jiffies,timeout)) { 356 if (time_after(jiffies,timeout)) {
361 dprintk(0, "ERROR: API Mailbox timeout\n"); 357 dprintk(0, "ERROR: API Mailbox timeout %x\n", command);
362 return -1; 358 return -EIO;
363 } 359 }
364 udelay(10); 360 udelay(10);
365 } 361 }
@@ -420,7 +416,7 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
420 } 416 }
421 } 417 }
422 dprintk(0, "Mailbox signature values not found!\n"); 418 dprintk(0, "Mailbox signature values not found!\n");
423 return -1; 419 return -EIO;
424} 420}
425 421
426static int blackbird_load_firmware(struct cx8802_dev *dev) 422static int blackbird_load_firmware(struct cx8802_dev *dev)
@@ -432,7 +428,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
432 int i, retval = 0; 428 int i, retval = 0;
433 u32 value = 0; 429 u32 value = 0;
434 u32 checksum = 0; 430 u32 checksum = 0;
435 u32 *dataptr; 431 __le32 *dataptr;
436 432
437 retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED); 433 retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
438 retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); 434 retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
@@ -449,29 +445,28 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
449 445
450 446
451 if (retval != 0) { 447 if (retval != 0) {
452 dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n", 448 pr_err("Hotplug firmware request failed (%s).\n",
453 CX2341X_FIRM_ENC_FILENAME); 449 CX2341X_FIRM_ENC_FILENAME);
454 dprintk(0, "Please fix your hotplug setup, the board will " 450 pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
455 "not work without firmware loaded!\n"); 451 return -EIO;
456 return -1;
457 } 452 }
458 453
459 if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) { 454 if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
460 dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n", 455 pr_err("Firmware size mismatch (have %zd, expected %d)\n",
461 firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE); 456 firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
462 release_firmware(firmware); 457 release_firmware(firmware);
463 return -1; 458 return -EINVAL;
464 } 459 }
465 460
466 if (0 != memcmp(firmware->data, magic, 8)) { 461 if (0 != memcmp(firmware->data, magic, 8)) {
467 dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); 462 pr_err("Firmware magic mismatch, wrong file?\n");
468 release_firmware(firmware); 463 release_firmware(firmware);
469 return -1; 464 return -EINVAL;
470 } 465 }
471 466
472 /* transfer to the chip */ 467 /* transfer to the chip */
473 dprintk(1,"Loading firmware ...\n"); 468 dprintk(1,"Loading firmware ...\n");
474 dataptr = (u32*)firmware->data; 469 dataptr = (__le32 *)firmware->data;
475 for (i = 0; i < (firmware->size >> 2); i++) { 470 for (i = 0; i < (firmware->size >> 2); i++) {
476 value = le32_to_cpu(*dataptr); 471 value = le32_to_cpu(*dataptr);
477 checksum += ~value; 472 checksum += ~value;
@@ -484,12 +479,11 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
484 memory_read(dev->core, i, &value); 479 memory_read(dev->core, i, &value);
485 checksum -= ~value; 480 checksum -= ~value;
486 } 481 }
482 release_firmware(firmware);
487 if (checksum) { 483 if (checksum) {
488 dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n"); 484 pr_err("Firmware load might have failed (checksum mismatch).\n");
489 release_firmware(firmware); 485 return -EIO;
490 return -1;
491 } 486 }
492 release_firmware(firmware);
493 dprintk(0, "Firmware upload successful.\n"); 487 dprintk(0, "Firmware upload successful.\n");
494 488
495 retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); 489 retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
@@ -521,12 +515,14 @@ DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | M
521 515
522static void blackbird_codec_settings(struct cx8802_dev *dev) 516static void blackbird_codec_settings(struct cx8802_dev *dev)
523{ 517{
518 struct cx88_core *core = dev->core;
519
524 /* assign frame size */ 520 /* assign frame size */
525 blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, 521 blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
526 dev->height, dev->width); 522 core->height, core->width);
527 523
528 dev->cxhdl.width = dev->width; 524 dev->cxhdl.width = core->width;
529 dev->cxhdl.height = dev->height; 525 dev->cxhdl.height = core->height;
530 cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50); 526 cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
531 cx2341x_handler_setup(&dev->cxhdl); 527 cx2341x_handler_setup(&dev->cxhdl);
532} 528}
@@ -540,9 +536,6 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
540 dprintk(1,"Initialize codec\n"); 536 dprintk(1,"Initialize codec\n");
541 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ 537 retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
542 if (retval < 0) { 538 if (retval < 0) {
543
544 dev->mpeg_active = 0;
545
546 /* ping was not successful, reset and upload firmware */ 539 /* ping was not successful, reset and upload firmware */
547 cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */ 540 cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */
548 cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */ 541 cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */
@@ -589,9 +582,8 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
589 return 0; 582 return 0;
590} 583}
591 584
592static int blackbird_start_codec(struct file *file, void *priv) 585static int blackbird_start_codec(struct cx8802_dev *dev)
593{ 586{
594 struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
595 struct cx88_core *core = dev->core; 587 struct cx88_core *core = dev->core;
596 /* start capturing to the host interface */ 588 /* start capturing to the host interface */
597 u32 reg; 589 u32 reg;
@@ -627,7 +619,6 @@ static int blackbird_start_codec(struct file *file, void *priv)
627 BLACKBIRD_RAW_BITS_NONE 619 BLACKBIRD_RAW_BITS_NONE
628 ); 620 );
629 621
630 dev->mpeg_active = 1;
631 return 0; 622 return 0;
632} 623}
633 624
@@ -641,51 +632,139 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
641 632
642 cx2341x_handler_set_busy(&dev->cxhdl, 0); 633 cx2341x_handler_set_busy(&dev->cxhdl, 0);
643 634
644 dev->mpeg_active = 0;
645 return 0; 635 return 0;
646} 636}
647 637
648/* ------------------------------------------------------------------ */ 638/* ------------------------------------------------------------------ */
649 639
650static int bb_buf_setup(struct videobuf_queue *q, 640static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
651 unsigned int *count, unsigned int *size) 641 unsigned int *num_buffers, unsigned int *num_planes,
642 unsigned int sizes[], void *alloc_ctxs[])
652{ 643{
653 struct cx8802_fh *fh = q->priv_data; 644 struct cx8802_dev *dev = q->drv_priv;
654
655 fh->dev->ts_packet_size = 188 * 4; /* was: 512 */
656 fh->dev->ts_packet_count = mpegbufs; /* was: 100 */
657 645
658 *size = fh->dev->ts_packet_size * fh->dev->ts_packet_count; 646 *num_planes = 1;
659 *count = fh->dev->ts_packet_count; 647 dev->ts_packet_size = 188 * 4;
648 dev->ts_packet_count = 32;
649 sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
660 return 0; 650 return 0;
661} 651}
662 652
663static int 653static int buffer_prepare(struct vb2_buffer *vb)
664bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
665 enum v4l2_field field)
666{ 654{
667 struct cx8802_fh *fh = q->priv_data; 655 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
668 return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field); 656 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
657
658 return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
669} 659}
670 660
671static void 661static void buffer_finish(struct vb2_buffer *vb)
672bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
673{ 662{
674 struct cx8802_fh *fh = q->priv_data; 663 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
675 cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb); 664 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
665 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
666 struct cx88_riscmem *risc = &buf->risc;
667
668 if (risc->cpu)
669 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
670 memset(risc, 0, sizeof(*risc));
671
672 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
676} 673}
677 674
678static void 675static void buffer_queue(struct vb2_buffer *vb)
679bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
680{ 676{
681 cx88_free_buffer(q, (struct cx88_buffer*)vb); 677 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
678 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
679
680 cx8802_buf_queue(dev, buf);
681}
682
683static int start_streaming(struct vb2_queue *q, unsigned int count)
684{
685 struct cx8802_dev *dev = q->drv_priv;
686 struct cx88_dmaqueue *dmaq = &dev->mpegq;
687 struct cx8802_driver *drv;
688 struct cx88_buffer *buf;
689 unsigned long flags;
690 int err;
691
692 /* Make sure we can acquire the hardware */
693 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
694 if (!drv) {
695 dprintk(1, "%s: blackbird driver is not loaded\n", __func__);
696 err = -ENODEV;
697 goto fail;
698 }
699
700 err = drv->request_acquire(drv);
701 if (err != 0) {
702 dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__, err);
703 goto fail;
704 }
705
706 if (blackbird_initialize_codec(dev) < 0) {
707 drv->request_release(drv);
708 err = -EINVAL;
709 goto fail;
710 }
711
712 err = blackbird_start_codec(dev);
713 if (err == 0) {
714 buf = list_entry(dmaq->active.next, struct cx88_buffer, list);
715 cx8802_start_dma(dev, dmaq, buf);
716 return 0;
717 }
718
719fail:
720 spin_lock_irqsave(&dev->slock, flags);
721 while (!list_empty(&dmaq->active)) {
722 struct cx88_buffer *buf = list_entry(dmaq->active.next,
723 struct cx88_buffer, list);
724
725 list_del(&buf->list);
726 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
727 }
728 spin_unlock_irqrestore(&dev->slock, flags);
729 return err;
682} 730}
683 731
684static struct videobuf_queue_ops blackbird_qops = { 732static void stop_streaming(struct vb2_queue *q)
685 .buf_setup = bb_buf_setup, 733{
686 .buf_prepare = bb_buf_prepare, 734 struct cx8802_dev *dev = q->drv_priv;
687 .buf_queue = bb_buf_queue, 735 struct cx88_dmaqueue *dmaq = &dev->mpegq;
688 .buf_release = bb_buf_release, 736 struct cx8802_driver *drv = NULL;
737 unsigned long flags;
738
739 cx8802_cancel_buffers(dev);
740 blackbird_stop_codec(dev);
741
742 /* Make sure we release the hardware */
743 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
744 WARN_ON(!drv);
745 if (drv)
746 drv->request_release(drv);
747
748 spin_lock_irqsave(&dev->slock, flags);
749 while (!list_empty(&dmaq->active)) {
750 struct cx88_buffer *buf = list_entry(dmaq->active.next,
751 struct cx88_buffer, list);
752
753 list_del(&buf->list);
754 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
755 }
756 spin_unlock_irqrestore(&dev->slock, flags);
757}
758
759static struct vb2_ops blackbird_qops = {
760 .queue_setup = queue_setup,
761 .buf_prepare = buffer_prepare,
762 .buf_finish = buffer_finish,
763 .buf_queue = buffer_queue,
764 .wait_prepare = vb2_ops_wait_prepare,
765 .wait_finish = vb2_ops_wait_finish,
766 .start_streaming = start_streaming,
767 .stop_streaming = stop_streaming,
689}; 768};
690 769
691/* ------------------------------------------------------------------ */ 770/* ------------------------------------------------------------------ */
@@ -693,8 +772,8 @@ static struct videobuf_queue_ops blackbird_qops = {
693static int vidioc_querycap(struct file *file, void *priv, 772static int vidioc_querycap(struct file *file, void *priv,
694 struct v4l2_capability *cap) 773 struct v4l2_capability *cap)
695{ 774{
696 struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; 775 struct cx8802_dev *dev = video_drvdata(file);
697 struct cx88_core *core = dev->core; 776 struct cx88_core *core = dev->core;
698 777
699 strcpy(cap->driver, "cx88_blackbird"); 778 strcpy(cap->driver, "cx88_blackbird");
700 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 779 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
@@ -714,131 +793,111 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
714 return 0; 793 return 0;
715} 794}
716 795
717static int vidioc_g_fmt_vid_cap (struct file *file, void *priv, 796static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
718 struct v4l2_format *f) 797 struct v4l2_format *f)
719{ 798{
720 struct cx8802_fh *fh = priv; 799 struct cx8802_dev *dev = video_drvdata(file);
721 struct cx8802_dev *dev = fh->dev; 800 struct cx88_core *core = dev->core;
722 801
723 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 802 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
724 f->fmt.pix.bytesperline = 0; 803 f->fmt.pix.bytesperline = 0;
725 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */ 804 f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count;
726 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 805 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
727 f->fmt.pix.width = dev->width; 806 f->fmt.pix.width = core->width;
728 f->fmt.pix.height = dev->height; 807 f->fmt.pix.height = core->height;
729 f->fmt.pix.field = fh->mpegq.field; 808 f->fmt.pix.field = core->field;
730 dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
731 dev->width, dev->height, fh->mpegq.field );
732 return 0; 809 return 0;
733} 810}
734 811
735static int vidioc_try_fmt_vid_cap (struct file *file, void *priv, 812static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
736 struct v4l2_format *f) 813 struct v4l2_format *f)
737{ 814{
738 struct cx8802_fh *fh = priv; 815 struct cx8802_dev *dev = video_drvdata(file);
739 struct cx8802_dev *dev = fh->dev; 816 struct cx88_core *core = dev->core;
817 unsigned maxw, maxh;
818 enum v4l2_field field;
740 819
741 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 820 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
742 f->fmt.pix.bytesperline = 0; 821 f->fmt.pix.bytesperline = 0;
743 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */ 822 f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count;
744 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 823 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
745 dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n", 824
746 dev->width, dev->height, fh->mpegq.field ); 825 maxw = norm_maxw(core->tvnorm);
826 maxh = norm_maxh(core->tvnorm);
827
828 field = f->fmt.pix.field;
829
830 switch (field) {
831 case V4L2_FIELD_TOP:
832 case V4L2_FIELD_BOTTOM:
833 case V4L2_FIELD_INTERLACED:
834 case V4L2_FIELD_SEQ_BT:
835 case V4L2_FIELD_SEQ_TB:
836 break;
837 default:
838 field = (f->fmt.pix.height > maxh / 2)
839 ? V4L2_FIELD_INTERLACED
840 : V4L2_FIELD_BOTTOM;
841 break;
842 }
843 if (V4L2_FIELD_HAS_T_OR_B(field))
844 maxh /= 2;
845
846 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
847 &f->fmt.pix.height, 32, maxh, 0, 0);
848 f->fmt.pix.field = field;
747 return 0; 849 return 0;
748} 850}
749 851
750static int vidioc_s_fmt_vid_cap (struct file *file, void *priv, 852static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
751 struct v4l2_format *f) 853 struct v4l2_format *f)
752{ 854{
753 struct cx8802_fh *fh = priv; 855 struct cx8802_dev *dev = video_drvdata(file);
754 struct cx8802_dev *dev = fh->dev;
755 struct cx88_core *core = dev->core; 856 struct cx88_core *core = dev->core;
756 857
757 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; 858 if (vb2_is_busy(&dev->vb2_mpegq))
758 f->fmt.pix.bytesperline = 0; 859 return -EBUSY;
759 f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */ 860 if (core->v4ldev && (vb2_is_busy(&core->v4ldev->vb2_vidq) ||
760 f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; 861 vb2_is_busy(&core->v4ldev->vb2_vbiq)))
761 dev->width = f->fmt.pix.width; 862 return -EBUSY;
762 dev->height = f->fmt.pix.height; 863 vidioc_try_fmt_vid_cap(file, priv, f);
763 fh->mpegq.field = f->fmt.pix.field; 864 core->width = f->fmt.pix.width;
865 core->height = f->fmt.pix.height;
866 core->field = f->fmt.pix.field;
764 cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field); 867 cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
765 blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, 868 blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
766 f->fmt.pix.height, f->fmt.pix.width); 869 f->fmt.pix.height, f->fmt.pix.width);
767 dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
768 f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field );
769 return 0; 870 return 0;
770} 871}
771 872
772static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
773{
774 struct cx8802_fh *fh = priv;
775 return (videobuf_reqbufs(&fh->mpegq, p));
776}
777
778static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
779{
780 struct cx8802_fh *fh = priv;
781 return (videobuf_querybuf(&fh->mpegq, p));
782}
783
784static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
785{
786 struct cx8802_fh *fh = priv;
787 return (videobuf_qbuf(&fh->mpegq, p));
788}
789
790static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
791{
792 struct cx8802_fh *fh = priv;
793 return (videobuf_dqbuf(&fh->mpegq, p,
794 file->f_flags & O_NONBLOCK));
795}
796
797static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
798{
799 struct cx8802_fh *fh = priv;
800 struct cx8802_dev *dev = fh->dev;
801
802 if (!dev->mpeg_active)
803 blackbird_start_codec(file, fh);
804 return videobuf_streamon(&fh->mpegq);
805}
806
807static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
808{
809 struct cx8802_fh *fh = priv;
810 struct cx8802_dev *dev = fh->dev;
811
812 if (dev->mpeg_active)
813 blackbird_stop_codec(dev);
814 return videobuf_streamoff(&fh->mpegq);
815}
816
817static int vidioc_s_frequency (struct file *file, void *priv, 873static int vidioc_s_frequency (struct file *file, void *priv,
818 const struct v4l2_frequency *f) 874 const struct v4l2_frequency *f)
819{ 875{
820 struct cx8802_fh *fh = priv; 876 struct cx8802_dev *dev = video_drvdata(file);
821 struct cx8802_dev *dev = fh->dev; 877 struct cx88_core *core = dev->core;
822 struct cx88_core *core = dev->core; 878 bool streaming;
823 879
824 if (unlikely(UNSET == core->board.tuner_type)) 880 if (unlikely(UNSET == core->board.tuner_type))
825 return -EINVAL; 881 return -EINVAL;
826 if (unlikely(f->tuner != 0)) 882 if (unlikely(f->tuner != 0))
827 return -EINVAL; 883 return -EINVAL;
828 if (dev->mpeg_active) 884 streaming = vb2_start_streaming_called(&dev->vb2_mpegq);
885 if (streaming)
829 blackbird_stop_codec(dev); 886 blackbird_stop_codec(dev);
830 887
831 cx88_set_freq (core,f); 888 cx88_set_freq (core,f);
832 blackbird_initialize_codec(dev); 889 blackbird_initialize_codec(dev);
833 cx88_set_scale(dev->core, dev->width, dev->height, 890 cx88_set_scale(core, core->width, core->height,
834 fh->mpegq.field); 891 core->field);
892 if (streaming)
893 blackbird_start_codec(dev);
835 return 0; 894 return 0;
836} 895}
837 896
838static int vidioc_log_status (struct file *file, void *priv) 897static int vidioc_log_status (struct file *file, void *priv)
839{ 898{
840 struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; 899 struct cx8802_dev *dev = video_drvdata(file);
841 struct cx88_core *core = dev->core; 900 struct cx88_core *core = dev->core;
842 char name[32 + 2]; 901 char name[32 + 2];
843 902
844 snprintf(name, sizeof(name), "%s/2", core->name); 903 snprintf(name, sizeof(name), "%s/2", core->name);
@@ -850,15 +909,16 @@ static int vidioc_log_status (struct file *file, void *priv)
850static int vidioc_enum_input (struct file *file, void *priv, 909static int vidioc_enum_input (struct file *file, void *priv,
851 struct v4l2_input *i) 910 struct v4l2_input *i)
852{ 911{
853 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 912 struct cx8802_dev *dev = video_drvdata(file);
913 struct cx88_core *core = dev->core;
854 return cx88_enum_input (core,i); 914 return cx88_enum_input (core,i);
855} 915}
856 916
857static int vidioc_g_frequency (struct file *file, void *priv, 917static int vidioc_g_frequency (struct file *file, void *priv,
858 struct v4l2_frequency *f) 918 struct v4l2_frequency *f)
859{ 919{
860 struct cx8802_fh *fh = priv; 920 struct cx8802_dev *dev = video_drvdata(file);
861 struct cx88_core *core = fh->dev->core; 921 struct cx88_core *core = dev->core;
862 922
863 if (unlikely(UNSET == core->board.tuner_type)) 923 if (unlikely(UNSET == core->board.tuner_type))
864 return -EINVAL; 924 return -EINVAL;
@@ -873,7 +933,8 @@ static int vidioc_g_frequency (struct file *file, void *priv,
873 933
874static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) 934static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
875{ 935{
876 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 936 struct cx8802_dev *dev = video_drvdata(file);
937 struct cx88_core *core = dev->core;
877 938
878 *i = core->input; 939 *i = core->input;
879 return 0; 940 return 0;
@@ -881,24 +942,24 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
881 942
882static int vidioc_s_input (struct file *file, void *priv, unsigned int i) 943static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
883{ 944{
884 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 945 struct cx8802_dev *dev = video_drvdata(file);
946 struct cx88_core *core = dev->core;
885 947
886 if (i >= 4) 948 if (i >= 4)
887 return -EINVAL; 949 return -EINVAL;
888 if (0 == INPUT(i).type) 950 if (0 == INPUT(i).type)
889 return -EINVAL; 951 return -EINVAL;
890 952
891 mutex_lock(&core->lock);
892 cx88_newstation(core); 953 cx88_newstation(core);
893 cx88_video_mux(core,i); 954 cx88_video_mux(core,i);
894 mutex_unlock(&core->lock);
895 return 0; 955 return 0;
896} 956}
897 957
898static int vidioc_g_tuner (struct file *file, void *priv, 958static int vidioc_g_tuner (struct file *file, void *priv,
899 struct v4l2_tuner *t) 959 struct v4l2_tuner *t)
900{ 960{
901 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 961 struct cx8802_dev *dev = video_drvdata(file);
962 struct cx88_core *core = dev->core;
902 u32 reg; 963 u32 reg;
903 964
904 if (unlikely(UNSET == core->board.tuner_type)) 965 if (unlikely(UNSET == core->board.tuner_type))
@@ -920,7 +981,8 @@ static int vidioc_g_tuner (struct file *file, void *priv,
920static int vidioc_s_tuner (struct file *file, void *priv, 981static int vidioc_s_tuner (struct file *file, void *priv,
921 const struct v4l2_tuner *t) 982 const struct v4l2_tuner *t)
922{ 983{
923 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 984 struct cx8802_dev *dev = video_drvdata(file);
985 struct cx88_core *core = dev->core;
924 986
925 if (UNSET == core->board.tuner_type) 987 if (UNSET == core->board.tuner_type)
926 return -EINVAL; 988 return -EINVAL;
@@ -933,7 +995,8 @@ static int vidioc_s_tuner (struct file *file, void *priv,
933 995
934static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm) 996static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
935{ 997{
936 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; 998 struct cx8802_dev *dev = video_drvdata(file);
999 struct cx88_core *core = dev->core;
937 1000
938 *tvnorm = core->tvnorm; 1001 *tvnorm = core->tvnorm;
939 return 0; 1002 return 0;
@@ -941,155 +1004,20 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
941 1004
942static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id) 1005static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
943{ 1006{
944 struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
945
946 mutex_lock(&core->lock);
947 cx88_set_tvnorm(core, id);
948 mutex_unlock(&core->lock);
949 return 0;
950}
951
952/* FIXME: cx88_ioctl_hook not implemented */
953
954static int mpeg_open(struct file *file)
955{
956 struct video_device *vdev = video_devdata(file);
957 struct cx8802_dev *dev = video_drvdata(file); 1007 struct cx8802_dev *dev = video_drvdata(file);
958 struct cx8802_fh *fh; 1008 struct cx88_core *core = dev->core;
959 struct cx8802_driver *drv = NULL;
960 int err;
961
962 dprintk( 1, "%s\n", __func__);
963
964 mutex_lock(&dev->core->lock);
965
966 /* Make sure we can acquire the hardware */
967 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
968 if (!drv) {
969 dprintk(1, "%s: blackbird driver is not loaded\n", __func__);
970 mutex_unlock(&dev->core->lock);
971 return -ENODEV;
972 }
973
974 err = drv->request_acquire(drv);
975 if (err != 0) {
976 dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
977 mutex_unlock(&dev->core->lock);
978 return err;
979 }
980
981 if (!dev->core->mpeg_users && blackbird_initialize_codec(dev) < 0) {
982 drv->request_release(drv);
983 mutex_unlock(&dev->core->lock);
984 return -EINVAL;
985 }
986 dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
987
988 /* allocate + initialize per filehandle data */
989 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
990 if (NULL == fh) {
991 drv->request_release(drv);
992 mutex_unlock(&dev->core->lock);
993 return -ENOMEM;
994 }
995 v4l2_fh_init(&fh->fh, vdev);
996 file->private_data = fh;
997 fh->dev = dev;
998
999 videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops,
1000 &dev->pci->dev, &dev->slock,
1001 V4L2_BUF_TYPE_VIDEO_CAPTURE,
1002 V4L2_FIELD_INTERLACED,
1003 sizeof(struct cx88_buffer),
1004 fh, NULL);
1005
1006 /* FIXME: locking against other video device */
1007 cx88_set_scale(dev->core, dev->width, dev->height,
1008 fh->mpegq.field);
1009
1010 dev->core->mpeg_users++;
1011 mutex_unlock(&dev->core->lock);
1012 v4l2_fh_add(&fh->fh);
1013 return 0;
1014}
1015
1016static int mpeg_release(struct file *file)
1017{
1018 struct cx8802_fh *fh = file->private_data;
1019 struct cx8802_dev *dev = fh->dev;
1020 struct cx8802_driver *drv = NULL;
1021
1022 mutex_lock(&dev->core->lock);
1023
1024 if (dev->mpeg_active && dev->core->mpeg_users == 1)
1025 blackbird_stop_codec(dev);
1026
1027 cx8802_cancel_buffers(fh->dev);
1028 /* stop mpeg capture */
1029 videobuf_stop(&fh->mpegq);
1030
1031 videobuf_mmap_free(&fh->mpegq);
1032
1033 v4l2_fh_del(&fh->fh);
1034 v4l2_fh_exit(&fh->fh);
1035 file->private_data = NULL;
1036 kfree(fh);
1037
1038 /* Make sure we release the hardware */
1039 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
1040 WARN_ON(!drv);
1041 if (drv)
1042 drv->request_release(drv);
1043
1044 dev->core->mpeg_users--;
1045
1046 mutex_unlock(&dev->core->lock);
1047
1048 return 0;
1049}
1050
1051static ssize_t
1052mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
1053{
1054 struct cx8802_fh *fh = file->private_data;
1055 struct cx8802_dev *dev = fh->dev;
1056
1057 if (!dev->mpeg_active)
1058 blackbird_start_codec(file, fh);
1059
1060 return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
1061 file->f_flags & O_NONBLOCK);
1062}
1063
1064static unsigned int
1065mpeg_poll(struct file *file, struct poll_table_struct *wait)
1066{
1067 unsigned long req_events = poll_requested_events(wait);
1068 struct cx8802_fh *fh = file->private_data;
1069 struct cx8802_dev *dev = fh->dev;
1070
1071 if (!dev->mpeg_active && (req_events & (POLLIN | POLLRDNORM)))
1072 blackbird_start_codec(file, fh);
1073
1074 return v4l2_ctrl_poll(file, wait) | videobuf_poll_stream(file, &fh->mpegq, wait);
1075}
1076
1077static int
1078mpeg_mmap(struct file *file, struct vm_area_struct * vma)
1079{
1080 struct cx8802_fh *fh = file->private_data;
1081 1009
1082 return videobuf_mmap_mapper(&fh->mpegq, vma); 1010 return cx88_set_tvnorm(core, id);
1083} 1011}
1084 1012
1085static const struct v4l2_file_operations mpeg_fops = 1013static const struct v4l2_file_operations mpeg_fops =
1086{ 1014{
1087 .owner = THIS_MODULE, 1015 .owner = THIS_MODULE,
1088 .open = mpeg_open, 1016 .open = v4l2_fh_open,
1089 .release = mpeg_release, 1017 .release = vb2_fop_release,
1090 .read = mpeg_read, 1018 .read = vb2_fop_read,
1091 .poll = mpeg_poll, 1019 .poll = vb2_fop_poll,
1092 .mmap = mpeg_mmap, 1020 .mmap = vb2_fop_mmap,
1093 .unlocked_ioctl = video_ioctl2, 1021 .unlocked_ioctl = video_ioctl2,
1094}; 1022};
1095 1023
@@ -1099,12 +1027,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
1099 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, 1027 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
1100 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, 1028 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
1101 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, 1029 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
1102 .vidioc_reqbufs = vidioc_reqbufs, 1030 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1103 .vidioc_querybuf = vidioc_querybuf, 1031 .vidioc_querybuf = vb2_ioctl_querybuf,
1104 .vidioc_qbuf = vidioc_qbuf, 1032 .vidioc_qbuf = vb2_ioctl_qbuf,
1105 .vidioc_dqbuf = vidioc_dqbuf, 1033 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1106 .vidioc_streamon = vidioc_streamon, 1034 .vidioc_streamon = vb2_ioctl_streamon,
1107 .vidioc_streamoff = vidioc_streamoff, 1035 .vidioc_streamoff = vb2_ioctl_streamoff,
1108 .vidioc_s_frequency = vidioc_s_frequency, 1036 .vidioc_s_frequency = vidioc_s_frequency,
1109 .vidioc_log_status = vidioc_log_status, 1037 .vidioc_log_status = vidioc_log_status,
1110 .vidioc_enum_input = vidioc_enum_input, 1038 .vidioc_enum_input = vidioc_enum_input,
@@ -1189,11 +1117,12 @@ static int blackbird_register_video(struct cx8802_dev *dev)
1189{ 1117{
1190 int err; 1118 int err;
1191 1119
1192 dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci, 1120 dev->mpeg_dev = cx88_vdev_init(dev->core, dev->pci,
1193 &cx8802_mpeg_template,"mpeg"); 1121 &cx8802_mpeg_template, "mpeg");
1194 dev->mpeg_dev->ctrl_handler = &dev->cxhdl.hdl; 1122 dev->mpeg_dev->ctrl_handler = &dev->cxhdl.hdl;
1195 video_set_drvdata(dev->mpeg_dev, dev); 1123 video_set_drvdata(dev->mpeg_dev, dev);
1196 err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1); 1124 dev->mpeg_dev->queue = &dev->vb2_mpegq;
1125 err = video_register_device(dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
1197 if (err < 0) { 1126 if (err < 0) {
1198 printk(KERN_INFO "%s/2: can't register mpeg device\n", 1127 printk(KERN_INFO "%s/2: can't register mpeg device\n",
1199 dev->core->name); 1128 dev->core->name);
@@ -1210,6 +1139,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
1210{ 1139{
1211 struct cx88_core *core = drv->core; 1140 struct cx88_core *core = drv->core;
1212 struct cx8802_dev *dev = core->dvbdev; 1141 struct cx8802_dev *dev = core->dvbdev;
1142 struct vb2_queue *q;
1213 int err; 1143 int err;
1214 1144
1215 dprintk( 1, "%s\n", __func__); 1145 dprintk( 1, "%s\n", __func__);
@@ -1223,15 +1153,9 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
1223 if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD)) 1153 if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD))
1224 goto fail_core; 1154 goto fail_core;
1225 1155
1226 dev->width = 720;
1227 if (core->tvnorm & V4L2_STD_525_60) {
1228 dev->height = 480;
1229 } else {
1230 dev->height = 576;
1231 }
1232 dev->cxhdl.port = CX2341X_PORT_STREAMING; 1156 dev->cxhdl.port = CX2341X_PORT_STREAMING;
1233 dev->cxhdl.width = dev->width; 1157 dev->cxhdl.width = core->width;
1234 dev->cxhdl.height = dev->height; 1158 dev->cxhdl.height = core->height;
1235 dev->cxhdl.func = blackbird_mbox_func; 1159 dev->cxhdl.func = blackbird_mbox_func;
1236 dev->cxhdl.priv = dev; 1160 dev->cxhdl.priv = dev;
1237 err = cx2341x_handler_init(&dev->cxhdl, 36); 1161 err = cx2341x_handler_init(&dev->cxhdl, 36);
@@ -1250,13 +1174,30 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
1250// init_controls(core); 1174// init_controls(core);
1251 cx88_set_tvnorm(core,core->tvnorm); 1175 cx88_set_tvnorm(core,core->tvnorm);
1252 cx88_video_mux(core,0); 1176 cx88_video_mux(core,0);
1253 cx2341x_handler_set_50hz(&dev->cxhdl, dev->height == 576); 1177 cx2341x_handler_set_50hz(&dev->cxhdl, core->height == 576);
1254 cx2341x_handler_setup(&dev->cxhdl); 1178 cx2341x_handler_setup(&dev->cxhdl);
1179
1180 q = &dev->vb2_mpegq;
1181 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1182 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
1183 q->gfp_flags = GFP_DMA32;
1184 q->min_buffers_needed = 2;
1185 q->drv_priv = dev;
1186 q->buf_struct_size = sizeof(struct cx88_buffer);
1187 q->ops = &blackbird_qops;
1188 q->mem_ops = &vb2_dma_sg_memops;
1189 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1190 q->lock = &core->lock;
1191
1192 err = vb2_queue_init(q);
1193 if (err < 0)
1194 goto fail_core;
1195
1255 blackbird_register_video(dev); 1196 blackbird_register_video(dev);
1256 1197
1257 return 0; 1198 return 0;
1258 1199
1259 fail_core: 1200fail_core:
1260 return err; 1201 return err;
1261} 1202}
1262 1203
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 851754bf1291..8f2556ec3971 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -347,7 +347,7 @@ static const struct cx88_board cx88_boards[] = {
347 }, 347 },
348 [CX88_BOARD_IODATA_GVVCP3PCI] = { 348 [CX88_BOARD_IODATA_GVVCP3PCI] = {
349 .name = "IODATA GV-VCP3/PCI", 349 .name = "IODATA GV-VCP3/PCI",
350 .tuner_type = TUNER_ABSENT, 350 .tuner_type = UNSET,
351 .radio_type = UNSET, 351 .radio_type = UNSET,
352 .tuner_addr = ADDR_UNSET, 352 .tuner_addr = ADDR_UNSET,
353 .radio_addr = ADDR_UNSET, 353 .radio_addr = ADDR_UNSET,
@@ -436,7 +436,7 @@ static const struct cx88_board cx88_boards[] = {
436 }, 436 },
437 [CX88_BOARD_KWORLD_DVB_T] = { 437 [CX88_BOARD_KWORLD_DVB_T] = {
438 .name = "KWorld/VStream XPert DVB-T", 438 .name = "KWorld/VStream XPert DVB-T",
439 .tuner_type = TUNER_ABSENT, 439 .tuner_type = UNSET,
440 .radio_type = UNSET, 440 .radio_type = UNSET,
441 .tuner_addr = ADDR_UNSET, 441 .tuner_addr = ADDR_UNSET,
442 .radio_addr = ADDR_UNSET, 442 .radio_addr = ADDR_UNSET,
@@ -455,7 +455,7 @@ static const struct cx88_board cx88_boards[] = {
455 }, 455 },
456 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1] = { 456 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1] = {
457 .name = "DViCO FusionHDTV DVB-T1", 457 .name = "DViCO FusionHDTV DVB-T1",
458 .tuner_type = TUNER_ABSENT, /* No analog tuner */ 458 .tuner_type = UNSET, /* No analog tuner */
459 .radio_type = UNSET, 459 .radio_type = UNSET,
460 .tuner_addr = ADDR_UNSET, 460 .tuner_addr = ADDR_UNSET,
461 .radio_addr = ADDR_UNSET, 461 .radio_addr = ADDR_UNSET,
@@ -542,7 +542,7 @@ static const struct cx88_board cx88_boards[] = {
542 }, 542 },
543 [CX88_BOARD_HAUPPAUGE_DVB_T1] = { 543 [CX88_BOARD_HAUPPAUGE_DVB_T1] = {
544 .name = "Hauppauge Nova-T DVB-T", 544 .name = "Hauppauge Nova-T DVB-T",
545 .tuner_type = TUNER_ABSENT, 545 .tuner_type = UNSET,
546 .radio_type = UNSET, 546 .radio_type = UNSET,
547 .tuner_addr = ADDR_UNSET, 547 .tuner_addr = ADDR_UNSET,
548 .radio_addr = ADDR_UNSET, 548 .radio_addr = ADDR_UNSET,
@@ -554,7 +554,7 @@ static const struct cx88_board cx88_boards[] = {
554 }, 554 },
555 [CX88_BOARD_CONEXANT_DVB_T1] = { 555 [CX88_BOARD_CONEXANT_DVB_T1] = {
556 .name = "Conexant DVB-T reference design", 556 .name = "Conexant DVB-T reference design",
557 .tuner_type = TUNER_ABSENT, 557 .tuner_type = UNSET,
558 .radio_type = UNSET, 558 .radio_type = UNSET,
559 .tuner_addr = ADDR_UNSET, 559 .tuner_addr = ADDR_UNSET,
560 .radio_addr = ADDR_UNSET, 560 .radio_addr = ADDR_UNSET,
@@ -579,7 +579,7 @@ static const struct cx88_board cx88_boards[] = {
579 }, 579 },
580 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS] = { 580 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS] = {
581 .name = "DViCO FusionHDTV DVB-T Plus", 581 .name = "DViCO FusionHDTV DVB-T Plus",
582 .tuner_type = TUNER_ABSENT, /* No analog tuner */ 582 .tuner_type = UNSET, /* No analog tuner */
583 .radio_type = UNSET, 583 .radio_type = UNSET,
584 .tuner_addr = ADDR_UNSET, 584 .tuner_addr = ADDR_UNSET,
585 .radio_addr = ADDR_UNSET, 585 .radio_addr = ADDR_UNSET,
@@ -596,7 +596,7 @@ static const struct cx88_board cx88_boards[] = {
596 }, 596 },
597 [CX88_BOARD_DNTV_LIVE_DVB_T] = { 597 [CX88_BOARD_DNTV_LIVE_DVB_T] = {
598 .name = "digitalnow DNTV Live! DVB-T", 598 .name = "digitalnow DNTV Live! DVB-T",
599 .tuner_type = TUNER_ABSENT, 599 .tuner_type = UNSET,
600 .radio_type = UNSET, 600 .radio_type = UNSET,
601 .tuner_addr = ADDR_UNSET, 601 .tuner_addr = ADDR_UNSET,
602 .radio_addr = ADDR_UNSET, 602 .radio_addr = ADDR_UNSET,
@@ -787,7 +787,7 @@ static const struct cx88_board cx88_boards[] = {
787 }, 787 },
788 [CX88_BOARD_ADSTECH_DVB_T_PCI] = { 788 [CX88_BOARD_ADSTECH_DVB_T_PCI] = {
789 .name = "ADS Tech Instant TV DVB-T PCI", 789 .name = "ADS Tech Instant TV DVB-T PCI",
790 .tuner_type = TUNER_ABSENT, 790 .tuner_type = UNSET,
791 .radio_type = UNSET, 791 .radio_type = UNSET,
792 .tuner_addr = ADDR_UNSET, 792 .tuner_addr = ADDR_UNSET,
793 .radio_addr = ADDR_UNSET, 793 .radio_addr = ADDR_UNSET,
@@ -806,7 +806,7 @@ static const struct cx88_board cx88_boards[] = {
806 }, 806 },
807 [CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1] = { 807 [CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1] = {
808 .name = "TerraTec Cinergy 1400 DVB-T", 808 .name = "TerraTec Cinergy 1400 DVB-T",
809 .tuner_type = TUNER_ABSENT, 809 .tuner_type = UNSET,
810 .input = { { 810 .input = { {
811 .type = CX88_VMUX_DVB, 811 .type = CX88_VMUX_DVB,
812 .vmux = 0, 812 .vmux = 0,
@@ -924,7 +924,7 @@ static const struct cx88_board cx88_boards[] = {
924 }, 924 },
925 [CX88_BOARD_WINFAST_DTV1000] = { 925 [CX88_BOARD_WINFAST_DTV1000] = {
926 .name = "WinFast DTV1000-T", 926 .name = "WinFast DTV1000-T",
927 .tuner_type = TUNER_ABSENT, 927 .tuner_type = UNSET,
928 .radio_type = UNSET, 928 .radio_type = UNSET,
929 .tuner_addr = ADDR_UNSET, 929 .tuner_addr = ADDR_UNSET,
930 .radio_addr = ADDR_UNSET, 930 .radio_addr = ADDR_UNSET,
@@ -972,7 +972,7 @@ static const struct cx88_board cx88_boards[] = {
972 }, 972 },
973 [CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = { 973 [CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = {
974 .name = "Hauppauge Nova-S-Plus DVB-S", 974 .name = "Hauppauge Nova-S-Plus DVB-S",
975 .tuner_type = TUNER_ABSENT, 975 .tuner_type = UNSET,
976 .radio_type = UNSET, 976 .radio_type = UNSET,
977 .tuner_addr = ADDR_UNSET, 977 .tuner_addr = ADDR_UNSET,
978 .radio_addr = ADDR_UNSET, 978 .radio_addr = ADDR_UNSET,
@@ -998,7 +998,7 @@ static const struct cx88_board cx88_boards[] = {
998 }, 998 },
999 [CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = { 999 [CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = {
1000 .name = "Hauppauge Nova-SE2 DVB-S", 1000 .name = "Hauppauge Nova-SE2 DVB-S",
1001 .tuner_type = TUNER_ABSENT, 1001 .tuner_type = UNSET,
1002 .radio_type = UNSET, 1002 .radio_type = UNSET,
1003 .tuner_addr = ADDR_UNSET, 1003 .tuner_addr = ADDR_UNSET,
1004 .radio_addr = ADDR_UNSET, 1004 .radio_addr = ADDR_UNSET,
@@ -1010,7 +1010,7 @@ static const struct cx88_board cx88_boards[] = {
1010 }, 1010 },
1011 [CX88_BOARD_KWORLD_DVBS_100] = { 1011 [CX88_BOARD_KWORLD_DVBS_100] = {
1012 .name = "KWorld DVB-S 100", 1012 .name = "KWorld DVB-S 100",
1013 .tuner_type = TUNER_ABSENT, 1013 .tuner_type = UNSET,
1014 .radio_type = UNSET, 1014 .radio_type = UNSET,
1015 .tuner_addr = ADDR_UNSET, 1015 .tuner_addr = ADDR_UNSET,
1016 .radio_addr = ADDR_UNSET, 1016 .radio_addr = ADDR_UNSET,
@@ -1102,7 +1102,7 @@ static const struct cx88_board cx88_boards[] = {
1102 /* DTT 7579 Conexant CX22702-19 Conexant CX2388x */ 1102 /* DTT 7579 Conexant CX22702-19 Conexant CX2388x */
1103 /* Manenti Marco <marco_manenti@colman.it> */ 1103 /* Manenti Marco <marco_manenti@colman.it> */
1104 .name = "KWorld/VStream XPert DVB-T with cx22702", 1104 .name = "KWorld/VStream XPert DVB-T with cx22702",
1105 .tuner_type = TUNER_ABSENT, 1105 .tuner_type = UNSET,
1106 .radio_type = UNSET, 1106 .radio_type = UNSET,
1107 .tuner_addr = ADDR_UNSET, 1107 .tuner_addr = ADDR_UNSET,
1108 .radio_addr = ADDR_UNSET, 1108 .radio_addr = ADDR_UNSET,
@@ -1121,7 +1121,7 @@ static const struct cx88_board cx88_boards[] = {
1121 }, 1121 },
1122 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = { 1122 [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = {
1123 .name = "DViCO FusionHDTV DVB-T Dual Digital", 1123 .name = "DViCO FusionHDTV DVB-T Dual Digital",
1124 .tuner_type = TUNER_ABSENT, /* No analog tuner */ 1124 .tuner_type = UNSET, /* No analog tuner */
1125 .radio_type = UNSET, 1125 .radio_type = UNSET,
1126 .tuner_addr = ADDR_UNSET, 1126 .tuner_addr = ADDR_UNSET,
1127 .radio_addr = ADDR_UNSET, 1127 .radio_addr = ADDR_UNSET,
@@ -1356,7 +1356,7 @@ static const struct cx88_board cx88_boards[] = {
1356 }, 1356 },
1357 [CX88_BOARD_GENIATECH_DVBS] = { 1357 [CX88_BOARD_GENIATECH_DVBS] = {
1358 .name = "Geniatech DVB-S", 1358 .name = "Geniatech DVB-S",
1359 .tuner_type = TUNER_ABSENT, 1359 .tuner_type = UNSET,
1360 .radio_type = UNSET, 1360 .radio_type = UNSET,
1361 .tuner_addr = ADDR_UNSET, 1361 .tuner_addr = ADDR_UNSET,
1362 .radio_addr = ADDR_UNSET, 1362 .radio_addr = ADDR_UNSET,
@@ -1494,7 +1494,7 @@ static const struct cx88_board cx88_boards[] = {
1494 }, 1494 },
1495 [CX88_BOARD_SAMSUNG_SMT_7020] = { 1495 [CX88_BOARD_SAMSUNG_SMT_7020] = {
1496 .name = "Samsung SMT 7020 DVB-S", 1496 .name = "Samsung SMT 7020 DVB-S",
1497 .tuner_type = TUNER_ABSENT, 1497 .tuner_type = UNSET,
1498 .radio_type = UNSET, 1498 .radio_type = UNSET,
1499 .tuner_addr = ADDR_UNSET, 1499 .tuner_addr = ADDR_UNSET,
1500 .radio_addr = ADDR_UNSET, 1500 .radio_addr = ADDR_UNSET,
@@ -1506,7 +1506,7 @@ static const struct cx88_board cx88_boards[] = {
1506 }, 1506 },
1507 [CX88_BOARD_ADSTECH_PTV_390] = { 1507 [CX88_BOARD_ADSTECH_PTV_390] = {
1508 .name = "ADS Tech Instant Video PCI", 1508 .name = "ADS Tech Instant Video PCI",
1509 .tuner_type = TUNER_ABSENT, 1509 .tuner_type = UNSET,
1510 .radio_type = UNSET, 1510 .radio_type = UNSET,
1511 .tuner_addr = ADDR_UNSET, 1511 .tuner_addr = ADDR_UNSET,
1512 .radio_addr = ADDR_UNSET, 1512 .radio_addr = ADDR_UNSET,
@@ -1553,7 +1553,7 @@ static const struct cx88_board cx88_boards[] = {
1553 [CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO] = { 1553 [CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO] = {
1554 .name = "DViCO FusionHDTV 5 PCI nano", 1554 .name = "DViCO FusionHDTV 5 PCI nano",
1555 /* xc3008 tuner, digital only for now */ 1555 /* xc3008 tuner, digital only for now */
1556 .tuner_type = TUNER_ABSENT, 1556 .tuner_type = UNSET,
1557 .radio_type = UNSET, 1557 .radio_type = UNSET,
1558 .tuner_addr = ADDR_UNSET, 1558 .tuner_addr = ADDR_UNSET,
1559 .radio_addr = ADDR_UNSET, 1559 .radio_addr = ADDR_UNSET,
@@ -2069,7 +2069,7 @@ static const struct cx88_board cx88_boards[] = {
2069 }, 2069 },
2070 [CX88_BOARD_TBS_8920] = { 2070 [CX88_BOARD_TBS_8920] = {
2071 .name = "TBS 8920 DVB-S/S2", 2071 .name = "TBS 8920 DVB-S/S2",
2072 .tuner_type = TUNER_ABSENT, 2072 .tuner_type = UNSET,
2073 .radio_type = UNSET, 2073 .radio_type = UNSET,
2074 .tuner_addr = ADDR_UNSET, 2074 .tuner_addr = ADDR_UNSET,
2075 .radio_addr = ADDR_UNSET, 2075 .radio_addr = ADDR_UNSET,
@@ -2304,7 +2304,7 @@ static const struct cx88_board cx88_boards[] = {
2304 }, 2304 },
2305 [CX88_BOARD_TWINHAN_VP1027_DVBS] = { 2305 [CX88_BOARD_TWINHAN_VP1027_DVBS] = {
2306 .name = "Twinhan VP-1027 DVB-S", 2306 .name = "Twinhan VP-1027 DVB-S",
2307 .tuner_type = TUNER_ABSENT, 2307 .tuner_type = UNSET,
2308 .radio_type = UNSET, 2308 .radio_type = UNSET,
2309 .tuner_addr = ADDR_UNSET, 2309 .tuner_addr = ADDR_UNSET,
2310 .radio_addr = ADDR_UNSET, 2310 .radio_addr = ADDR_UNSET,
@@ -2921,33 +2921,33 @@ static const struct {
2921 int fm; 2921 int fm;
2922 const char *name; 2922 const char *name;
2923} gdi_tuner[] = { 2923} gdi_tuner[] = {
2924 [ 0x01 ] = { .id = TUNER_ABSENT, 2924 [ 0x01 ] = { .id = UNSET,
2925 .name = "NTSC_M" }, 2925 .name = "NTSC_M" },
2926 [ 0x02 ] = { .id = TUNER_ABSENT, 2926 [ 0x02 ] = { .id = UNSET,
2927 .name = "PAL_B" }, 2927 .name = "PAL_B" },
2928 [ 0x03 ] = { .id = TUNER_ABSENT, 2928 [ 0x03 ] = { .id = UNSET,
2929 .name = "PAL_I" }, 2929 .name = "PAL_I" },
2930 [ 0x04 ] = { .id = TUNER_ABSENT, 2930 [ 0x04 ] = { .id = UNSET,
2931 .name = "PAL_D" }, 2931 .name = "PAL_D" },
2932 [ 0x05 ] = { .id = TUNER_ABSENT, 2932 [ 0x05 ] = { .id = UNSET,
2933 .name = "SECAM" }, 2933 .name = "SECAM" },
2934 2934
2935 [ 0x10 ] = { .id = TUNER_ABSENT, 2935 [ 0x10 ] = { .id = UNSET,
2936 .fm = 1, 2936 .fm = 1,
2937 .name = "TEMIC_4049" }, 2937 .name = "TEMIC_4049" },
2938 [ 0x11 ] = { .id = TUNER_TEMIC_4136FY5, 2938 [ 0x11 ] = { .id = TUNER_TEMIC_4136FY5,
2939 .name = "TEMIC_4136" }, 2939 .name = "TEMIC_4136" },
2940 [ 0x12 ] = { .id = TUNER_ABSENT, 2940 [ 0x12 ] = { .id = UNSET,
2941 .name = "TEMIC_4146" }, 2941 .name = "TEMIC_4146" },
2942 2942
2943 [ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME, 2943 [ 0x20 ] = { .id = TUNER_PHILIPS_FQ1216ME,
2944 .fm = 1, 2944 .fm = 1,
2945 .name = "PHILIPS_FQ1216_MK3" }, 2945 .name = "PHILIPS_FQ1216_MK3" },
2946 [ 0x21 ] = { .id = TUNER_ABSENT, .fm = 1, 2946 [ 0x21 ] = { .id = UNSET, .fm = 1,
2947 .name = "PHILIPS_FQ1236_MK3" }, 2947 .name = "PHILIPS_FQ1236_MK3" },
2948 [ 0x22 ] = { .id = TUNER_ABSENT, 2948 [ 0x22 ] = { .id = UNSET,
2949 .name = "PHILIPS_FI1236_MK3" }, 2949 .name = "PHILIPS_FI1236_MK3" },
2950 [ 0x23 ] = { .id = TUNER_ABSENT, 2950 [ 0x23 ] = { .id = UNSET,
2951 .name = "PHILIPS_FI1216_MK3" }, 2951 .name = "PHILIPS_FI1216_MK3" },
2952}; 2952};
2953 2953
@@ -3564,7 +3564,7 @@ static void cx88_card_setup(struct cx88_core *core)
3564 mode_mask &= ~T_RADIO; 3564 mode_mask &= ~T_RADIO;
3565 } 3565 }
3566 3566
3567 if (core->board.tuner_type != TUNER_ABSENT) { 3567 if (core->board.tuner_type != UNSET) {
3568 tun_setup.mode_mask = mode_mask; 3568 tun_setup.mode_mask = mode_mask;
3569 tun_setup.type = core->board.tuner_type; 3569 tun_setup.type = core->board.tuner_type;
3570 tun_setup.addr = core->board.tuner_addr; 3570 tun_setup.addr = core->board.tuner_addr;
@@ -3691,6 +3691,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
3691 core->nr = nr; 3691 core->nr = nr;
3692 sprintf(core->name, "cx88[%d]", core->nr); 3692 sprintf(core->name, "cx88[%d]", core->nr);
3693 3693
3694 core->tvnorm = V4L2_STD_NTSC_M;
3695 core->width = 320;
3696 core->height = 240;
3697 core->field = V4L2_FIELD_INTERLACED;
3698
3694 strcpy(core->v4l2_dev.name, core->name); 3699 strcpy(core->v4l2_dev.name, core->name);
3695 if (v4l2_device_register(NULL, &core->v4l2_dev)) { 3700 if (v4l2_device_register(NULL, &core->v4l2_dev)) {
3696 kfree(core); 3701 kfree(core);
@@ -3772,7 +3777,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
3772 cx88_i2c_init(core, pci); 3777 cx88_i2c_init(core, pci);
3773 3778
3774 /* load tuner module, if needed */ 3779 /* load tuner module, if needed */
3775 if (TUNER_ABSENT != core->board.tuner_type) { 3780 if (UNSET != core->board.tuner_type) {
3776 /* Ignore 0x6b and 0x6f on cx88 boards. 3781 /* Ignore 0x6b and 0x6f on cx88 boards.
3777 * FusionHDTV5 RT Gold has an ir receiver at 0x6b 3782 * FusionHDTV5 RT Gold has an ir receiver at 0x6b
3778 * and an RTC at 0x6f which can get corrupted if probed. */ 3783 * and an RTC at 0x6f which can get corrupted if probed. */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 71630238027b..dee177ed5fe9 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -76,11 +76,16 @@ static DEFINE_MUTEX(devlist);
76static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist, 76static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
77 unsigned int offset, u32 sync_line, 77 unsigned int offset, u32 sync_line,
78 unsigned int bpl, unsigned int padding, 78 unsigned int bpl, unsigned int padding,
79 unsigned int lines, unsigned int lpi) 79 unsigned int lines, unsigned int lpi, bool jump)
80{ 80{
81 struct scatterlist *sg; 81 struct scatterlist *sg;
82 unsigned int line,todo,sol; 82 unsigned int line,todo,sol;
83 83
84 if (jump) {
85 (*rp++) = cpu_to_le32(RISC_JUMP);
86 (*rp++) = 0;
87 }
88
84 /* sync instruction */ 89 /* sync instruction */
85 if (sync_line != NO_SYNC_LINE) 90 if (sync_line != NO_SYNC_LINE)
86 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line); 91 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
@@ -90,7 +95,7 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
90 for (line = 0; line < lines; line++) { 95 for (line = 0; line < lines; line++) {
91 while (offset && offset >= sg_dma_len(sg)) { 96 while (offset && offset >= sg_dma_len(sg)) {
92 offset -= sg_dma_len(sg); 97 offset -= sg_dma_len(sg);
93 sg++; 98 sg = sg_next(sg);
94 } 99 }
95 if (lpi && line>0 && !(line % lpi)) 100 if (lpi && line>0 && !(line % lpi))
96 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC; 101 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
@@ -109,13 +114,13 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
109 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); 114 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
110 todo -= (sg_dma_len(sg)-offset); 115 todo -= (sg_dma_len(sg)-offset);
111 offset = 0; 116 offset = 0;
112 sg++; 117 sg = sg_next(sg);
113 while (todo > sg_dma_len(sg)) { 118 while (todo > sg_dma_len(sg)) {
114 *(rp++)=cpu_to_le32(RISC_WRITE| 119 *(rp++)=cpu_to_le32(RISC_WRITE|
115 sg_dma_len(sg)); 120 sg_dma_len(sg));
116 *(rp++)=cpu_to_le32(sg_dma_address(sg)); 121 *(rp++)=cpu_to_le32(sg_dma_address(sg));
117 todo -= sg_dma_len(sg); 122 todo -= sg_dma_len(sg);
118 sg++; 123 sg = sg_next(sg);
119 } 124 }
120 *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 125 *(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
121 *(rp++)=cpu_to_le32(sg_dma_address(sg)); 126 *(rp++)=cpu_to_le32(sg_dma_address(sg));
@@ -127,14 +132,13 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
127 return rp; 132 return rp;
128} 133}
129 134
130int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc, 135int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
131 struct scatterlist *sglist, 136 struct scatterlist *sglist,
132 unsigned int top_offset, unsigned int bottom_offset, 137 unsigned int top_offset, unsigned int bottom_offset,
133 unsigned int bpl, unsigned int padding, unsigned int lines) 138 unsigned int bpl, unsigned int padding, unsigned int lines)
134{ 139{
135 u32 instructions,fields; 140 u32 instructions,fields;
136 __le32 *rp; 141 __le32 *rp;
137 int rc;
138 142
139 fields = 0; 143 fields = 0;
140 if (UNSET != top_offset) 144 if (UNSET != top_offset)
@@ -147,18 +151,21 @@ int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
147 can cause next bpl to start close to a page border. First DMA 151 can cause next bpl to start close to a page border. First DMA
148 region may be smaller than PAGE_SIZE */ 152 region may be smaller than PAGE_SIZE */
149 instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines); 153 instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
150 instructions += 2; 154 instructions += 4;
151 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0) 155 risc->size = instructions * 8;
152 return rc; 156 risc->dma = 0;
157 risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
158 if (NULL == risc->cpu)
159 return -ENOMEM;
153 160
154 /* write risc instructions */ 161 /* write risc instructions */
155 rp = risc->cpu; 162 rp = risc->cpu;
156 if (UNSET != top_offset) 163 if (UNSET != top_offset)
157 rp = cx88_risc_field(rp, sglist, top_offset, 0, 164 rp = cx88_risc_field(rp, sglist, top_offset, 0,
158 bpl, padding, lines, 0); 165 bpl, padding, lines, 0, true);
159 if (UNSET != bottom_offset) 166 if (UNSET != bottom_offset)
160 rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200, 167 rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
161 bpl, padding, lines, 0); 168 bpl, padding, lines, 0, top_offset == UNSET);
162 169
163 /* save pointer to jmp instruction address */ 170 /* save pointer to jmp instruction address */
164 risc->jmp = rp; 171 risc->jmp = rp;
@@ -166,26 +173,28 @@ int cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
166 return 0; 173 return 0;
167} 174}
168 175
169int cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc, 176int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
170 struct scatterlist *sglist, unsigned int bpl, 177 struct scatterlist *sglist, unsigned int bpl,
171 unsigned int lines, unsigned int lpi) 178 unsigned int lines, unsigned int lpi)
172{ 179{
173 u32 instructions; 180 u32 instructions;
174 __le32 *rp; 181 __le32 *rp;
175 int rc;
176 182
177 /* estimate risc mem: worst case is one write per page border + 183 /* estimate risc mem: worst case is one write per page border +
178 one write per scan line + syncs + jump (all 2 dwords). Here 184 one write per scan line + syncs + jump (all 2 dwords). Here
179 there is no padding and no sync. First DMA region may be smaller 185 there is no padding and no sync. First DMA region may be smaller
180 than PAGE_SIZE */ 186 than PAGE_SIZE */
181 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines; 187 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
182 instructions += 1; 188 instructions += 3;
183 if ((rc = btcx_riscmem_alloc(pci,risc,instructions*8)) < 0) 189 risc->size = instructions * 8;
184 return rc; 190 risc->dma = 0;
191 risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
192 if (NULL == risc->cpu)
193 return -ENOMEM;
185 194
186 /* write risc instructions */ 195 /* write risc instructions */
187 rp = risc->cpu; 196 rp = risc->cpu;
188 rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi); 197 rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, !lpi);
189 198
190 /* save pointer to jmp instruction address */ 199 /* save pointer to jmp instruction address */
191 risc->jmp = rp; 200 risc->jmp = rp;
@@ -193,39 +202,6 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
193 return 0; 202 return 0;
194} 203}
195 204
196int cx88_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
197 u32 reg, u32 mask, u32 value)
198{
199 __le32 *rp;
200 int rc;
201
202 if ((rc = btcx_riscmem_alloc(pci, risc, 4*16)) < 0)
203 return rc;
204
205 /* write risc instructions */
206 rp = risc->cpu;
207 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2 | RISC_IMM);
208 *(rp++) = cpu_to_le32(reg);
209 *(rp++) = cpu_to_le32(value);
210 *(rp++) = cpu_to_le32(mask);
211 *(rp++) = cpu_to_le32(RISC_JUMP);
212 *(rp++) = cpu_to_le32(risc->dma);
213 return 0;
214}
215
216void
217cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
218{
219 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
220
221 BUG_ON(in_interrupt());
222 videobuf_waiton(q, &buf->vb, 0, 0);
223 videobuf_dma_unmap(q->dev, dma);
224 videobuf_dma_free(dma);
225 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
226 buf->vb.state = VIDEOBUF_NEEDS_INIT;
227}
228
229/* ------------------------------------------------------------------ */ 205/* ------------------------------------------------------------------ */
230/* our SRAM memory layout */ 206/* our SRAM memory layout */
231 207
@@ -539,33 +515,12 @@ void cx88_wakeup(struct cx88_core *core,
539 struct cx88_dmaqueue *q, u32 count) 515 struct cx88_dmaqueue *q, u32 count)
540{ 516{
541 struct cx88_buffer *buf; 517 struct cx88_buffer *buf;
542 int bc; 518
543 519 buf = list_entry(q->active.next,
544 for (bc = 0;; bc++) { 520 struct cx88_buffer, list);
545 if (list_empty(&q->active)) 521 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
546 break; 522 list_del(&buf->list);
547 buf = list_entry(q->active.next, 523 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
548 struct cx88_buffer, vb.queue);
549 /* count comes from the hw and is is 16bit wide --
550 * this trick handles wrap-arounds correctly for
551 * up to 32767 buffers in flight... */
552 if ((s16) (count - buf->count) < 0)
553 break;
554 v4l2_get_timestamp(&buf->vb.ts);
555 dprintk(2,"[%p/%d] wakeup reg=%d buf=%d\n",buf,buf->vb.i,
556 count, buf->count);
557 buf->vb.state = VIDEOBUF_DONE;
558 list_del(&buf->vb.queue);
559 wake_up(&buf->vb.done);
560 }
561 if (list_empty(&q->active)) {
562 del_timer(&q->timeout);
563 } else {
564 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
565 }
566 if (bc != 1)
567 dprintk(2, "%s: %d buffers handled (should be 1)\n",
568 __func__, bc);
569} 524}
570 525
571void cx88_shutdown(struct cx88_core *core) 526void cx88_shutdown(struct cx88_core *core)
@@ -909,6 +864,13 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
909 u32 bdelay,agcdelay,htotal; 864 u32 bdelay,agcdelay,htotal;
910 u32 cxiformat, cxoformat; 865 u32 cxiformat, cxoformat;
911 866
867 if (norm == core->tvnorm)
868 return 0;
869 if (core->v4ldev && (vb2_is_busy(&core->v4ldev->vb2_vidq) ||
870 vb2_is_busy(&core->v4ldev->vb2_vbiq)))
871 return -EBUSY;
872 if (core->dvbdev && vb2_is_busy(&core->dvbdev->vb2_mpegq))
873 return -EBUSY;
912 core->tvnorm = norm; 874 core->tvnorm = norm;
913 fsc8 = norm_fsc8(norm); 875 fsc8 = norm_fsc8(norm);
914 adc_clock = xtal; 876 adc_clock = xtal;
@@ -1043,6 +1005,7 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
1043 vfd->v4l2_dev = &core->v4l2_dev; 1005 vfd->v4l2_dev = &core->v4l2_dev;
1044 vfd->dev_parent = &pci->dev; 1006 vfd->dev_parent = &pci->dev;
1045 vfd->release = video_device_release; 1007 vfd->release = video_device_release;
1008 vfd->lock = &core->lock;
1046 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", 1009 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
1047 core->name, type, core->board.name); 1010 core->name, type, core->board.name);
1048 return vfd; 1011 return vfd;
@@ -1114,8 +1077,6 @@ EXPORT_SYMBOL(cx88_shutdown);
1114 1077
1115EXPORT_SYMBOL(cx88_risc_buffer); 1078EXPORT_SYMBOL(cx88_risc_buffer);
1116EXPORT_SYMBOL(cx88_risc_databuffer); 1079EXPORT_SYMBOL(cx88_risc_databuffer);
1117EXPORT_SYMBOL(cx88_risc_stopper);
1118EXPORT_SYMBOL(cx88_free_buffer);
1119 1080
1120EXPORT_SYMBOL(cx88_sram_channels); 1081EXPORT_SYMBOL(cx88_sram_channels);
1121EXPORT_SYMBOL(cx88_sram_channel_setup); 1082EXPORT_SYMBOL(cx88_sram_channel_setup);
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 053ed1ba1d85..c344bfd0b896 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -82,43 +82,89 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
82 82
83/* ------------------------------------------------------------------ */ 83/* ------------------------------------------------------------------ */
84 84
85static int dvb_buf_setup(struct videobuf_queue *q, 85static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
86 unsigned int *count, unsigned int *size) 86 unsigned int *num_buffers, unsigned int *num_planes,
87 unsigned int sizes[], void *alloc_ctxs[])
87{ 88{
88 struct cx8802_dev *dev = q->priv_data; 89 struct cx8802_dev *dev = q->drv_priv;
89 90
91 *num_planes = 1;
90 dev->ts_packet_size = 188 * 4; 92 dev->ts_packet_size = 188 * 4;
91 dev->ts_packet_count = dvb_buf_tscnt; 93 dev->ts_packet_count = dvb_buf_tscnt;
92 94 sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
93 *size = dev->ts_packet_size * dev->ts_packet_count; 95 *num_buffers = dvb_buf_tscnt;
94 *count = dvb_buf_tscnt;
95 return 0; 96 return 0;
96} 97}
97 98
98static int dvb_buf_prepare(struct videobuf_queue *q, 99static int buffer_prepare(struct vb2_buffer *vb)
99 struct videobuf_buffer *vb, enum v4l2_field field) 100{
101 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
102 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
103
104 return cx8802_buf_prepare(vb->vb2_queue, dev, buf);
105}
106
107static void buffer_finish(struct vb2_buffer *vb)
108{
109 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
110 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
111 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
112 struct cx88_riscmem *risc = &buf->risc;
113
114 if (risc->cpu)
115 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
116 memset(risc, 0, sizeof(*risc));
117
118 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
119}
120
121static void buffer_queue(struct vb2_buffer *vb)
100{ 122{
101 struct cx8802_dev *dev = q->priv_data; 123 struct cx8802_dev *dev = vb->vb2_queue->drv_priv;
102 return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field); 124 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
125
126 cx8802_buf_queue(dev, buf);
103} 127}
104 128
105static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) 129static int start_streaming(struct vb2_queue *q, unsigned int count)
106{ 130{
107 struct cx8802_dev *dev = q->priv_data; 131 struct cx8802_dev *dev = q->drv_priv;
108 cx8802_buf_queue(dev, (struct cx88_buffer*)vb); 132 struct cx88_dmaqueue *dmaq = &dev->mpegq;
133 struct cx88_buffer *buf;
134
135 buf = list_entry(dmaq->active.next, struct cx88_buffer, list);
136 cx8802_start_dma(dev, dmaq, buf);
137 return 0;
109} 138}
110 139
111static void dvb_buf_release(struct videobuf_queue *q, 140static void stop_streaming(struct vb2_queue *q)
112 struct videobuf_buffer *vb)
113{ 141{
114 cx88_free_buffer(q, (struct cx88_buffer*)vb); 142 struct cx8802_dev *dev = q->drv_priv;
143 struct cx88_dmaqueue *dmaq = &dev->mpegq;
144 unsigned long flags;
145
146 cx8802_cancel_buffers(dev);
147
148 spin_lock_irqsave(&dev->slock, flags);
149 while (!list_empty(&dmaq->active)) {
150 struct cx88_buffer *buf = list_entry(dmaq->active.next,
151 struct cx88_buffer, list);
152
153 list_del(&buf->list);
154 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
155 }
156 spin_unlock_irqrestore(&dev->slock, flags);
115} 157}
116 158
117static const struct videobuf_queue_ops dvb_qops = { 159static struct vb2_ops dvb_qops = {
118 .buf_setup = dvb_buf_setup, 160 .queue_setup = queue_setup,
119 .buf_prepare = dvb_buf_prepare, 161 .buf_prepare = buffer_prepare,
120 .buf_queue = dvb_buf_queue, 162 .buf_finish = buffer_finish,
121 .buf_release = dvb_buf_release, 163 .buf_queue = buffer_queue,
164 .wait_prepare = vb2_ops_wait_prepare,
165 .wait_finish = vb2_ops_wait_finish,
166 .start_streaming = start_streaming,
167 .stop_streaming = stop_streaming,
122}; 168};
123 169
124/* ------------------------------------------------------------------ */ 170/* ------------------------------------------------------------------ */
@@ -130,7 +176,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
130 int ret = 0; 176 int ret = 0;
131 int fe_id; 177 int fe_id;
132 178
133 fe_id = videobuf_dvb_find_frontend(&dev->frontends, fe); 179 fe_id = vb2_dvb_find_frontend(&dev->frontends, fe);
134 if (!fe_id) { 180 if (!fe_id) {
135 printk(KERN_ERR "%s() No frontend found\n", __func__); 181 printk(KERN_ERR "%s() No frontend found\n", __func__);
136 return -EINVAL; 182 return -EINVAL;
@@ -154,8 +200,8 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
154 200
155static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open) 201static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
156{ 202{
157 struct videobuf_dvb_frontends *f; 203 struct vb2_dvb_frontends *f;
158 struct videobuf_dvb_frontend *fe; 204 struct vb2_dvb_frontend *fe;
159 205
160 if (!core->dvbdev) 206 if (!core->dvbdev)
161 return; 207 return;
@@ -166,9 +212,9 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
166 return; 212 return;
167 213
168 if (f->gate <= 1) /* undefined or fe0 */ 214 if (f->gate <= 1) /* undefined or fe0 */
169 fe = videobuf_dvb_get_frontend(f, 1); 215 fe = vb2_dvb_get_frontend(f, 1);
170 else 216 else
171 fe = videobuf_dvb_get_frontend(f, f->gate); 217 fe = vb2_dvb_get_frontend(f, f->gate);
172 218
173 if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl) 219 if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
174 fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open); 220 fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
@@ -565,7 +611,7 @@ static const struct xc5000_config dvico_fusionhdtv7_tuner_config = {
565static int attach_xc3028(u8 addr, struct cx8802_dev *dev) 611static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
566{ 612{
567 struct dvb_frontend *fe; 613 struct dvb_frontend *fe;
568 struct videobuf_dvb_frontend *fe0 = NULL; 614 struct vb2_dvb_frontend *fe0 = NULL;
569 struct xc2028_ctrl ctl; 615 struct xc2028_ctrl ctl;
570 struct xc2028_config cfg = { 616 struct xc2028_config cfg = {
571 .i2c_adap = &dev->core->i2c_adap, 617 .i2c_adap = &dev->core->i2c_adap,
@@ -574,7 +620,7 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
574 }; 620 };
575 621
576 /* Get the first frontend */ 622 /* Get the first frontend */
577 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); 623 fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
578 if (!fe0) 624 if (!fe0)
579 return -EINVAL; 625 return -EINVAL;
580 626
@@ -611,10 +657,10 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
611static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg) 657static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
612{ 658{
613 struct dvb_frontend *fe; 659 struct dvb_frontend *fe;
614 struct videobuf_dvb_frontend *fe0 = NULL; 660 struct vb2_dvb_frontend *fe0 = NULL;
615 661
616 /* Get the first frontend */ 662 /* Get the first frontend */
617 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); 663 fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
618 if (!fe0) 664 if (!fe0)
619 return -EINVAL; 665 return -EINVAL;
620 666
@@ -745,7 +791,7 @@ static const struct stv0288_config tevii_tuner_earda_config = {
745static int cx8802_alloc_frontends(struct cx8802_dev *dev) 791static int cx8802_alloc_frontends(struct cx8802_dev *dev)
746{ 792{
747 struct cx88_core *core = dev->core; 793 struct cx88_core *core = dev->core;
748 struct videobuf_dvb_frontend *fe = NULL; 794 struct vb2_dvb_frontend *fe = NULL;
749 int i; 795 int i;
750 796
751 mutex_init(&dev->frontends.lock); 797 mutex_init(&dev->frontends.lock);
@@ -757,10 +803,10 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
757 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, 803 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
758 core->board.num_frontends); 804 core->board.num_frontends);
759 for (i = 1; i <= core->board.num_frontends; i++) { 805 for (i = 1; i <= core->board.num_frontends; i++) {
760 fe = videobuf_dvb_alloc_frontend(&dev->frontends, i); 806 fe = vb2_dvb_alloc_frontend(&dev->frontends, i);
761 if (!fe) { 807 if (!fe) {
762 printk(KERN_ERR "%s() failed to alloc\n", __func__); 808 printk(KERN_ERR "%s() failed to alloc\n", __func__);
763 videobuf_dvb_dealloc_frontends(&dev->frontends); 809 vb2_dvb_dealloc_frontends(&dev->frontends);
764 return -ENOMEM; 810 return -ENOMEM;
765 } 811 }
766 } 812 }
@@ -958,7 +1004,7 @@ static const struct stv0299_config samsung_stv0299_config = {
958static int dvb_register(struct cx8802_dev *dev) 1004static int dvb_register(struct cx8802_dev *dev)
959{ 1005{
960 struct cx88_core *core = dev->core; 1006 struct cx88_core *core = dev->core;
961 struct videobuf_dvb_frontend *fe0, *fe1 = NULL; 1007 struct vb2_dvb_frontend *fe0, *fe1 = NULL;
962 int mfe_shared = 0; /* bus not shared by default */ 1008 int mfe_shared = 0; /* bus not shared by default */
963 int res = -EINVAL; 1009 int res = -EINVAL;
964 1010
@@ -968,7 +1014,7 @@ static int dvb_register(struct cx8802_dev *dev)
968 } 1014 }
969 1015
970 /* Get the first frontend */ 1016 /* Get the first frontend */
971 fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1); 1017 fe0 = vb2_dvb_get_frontend(&dev->frontends, 1);
972 if (!fe0) 1018 if (!fe0)
973 goto frontend_detach; 1019 goto frontend_detach;
974 1020
@@ -1046,7 +1092,7 @@ static int dvb_register(struct cx8802_dev *dev)
1046 goto frontend_detach; 1092 goto frontend_detach;
1047 } 1093 }
1048 /* MFE frontend 2 */ 1094 /* MFE frontend 2 */
1049 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); 1095 fe1 = vb2_dvb_get_frontend(&dev->frontends, 2);
1050 if (!fe1) 1096 if (!fe1)
1051 goto frontend_detach; 1097 goto frontend_detach;
1052 /* DVB-T init */ 1098 /* DVB-T init */
@@ -1415,7 +1461,7 @@ static int dvb_register(struct cx8802_dev *dev)
1415 goto frontend_detach; 1461 goto frontend_detach;
1416 } 1462 }
1417 /* MFE frontend 2 */ 1463 /* MFE frontend 2 */
1418 fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2); 1464 fe1 = vb2_dvb_get_frontend(&dev->frontends, 2);
1419 if (!fe1) 1465 if (!fe1)
1420 goto frontend_detach; 1466 goto frontend_detach;
1421 /* DVB-T Init */ 1467 /* DVB-T Init */
@@ -1594,7 +1640,7 @@ static int dvb_register(struct cx8802_dev *dev)
1594 call_all(core, core, s_power, 0); 1640 call_all(core, core, s_power, 0);
1595 1641
1596 /* register everything */ 1642 /* register everything */
1597 res = videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev, 1643 res = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
1598 &dev->pci->dev, adapter_nr, mfe_shared); 1644 &dev->pci->dev, adapter_nr, mfe_shared);
1599 if (res) 1645 if (res)
1600 goto frontend_detach; 1646 goto frontend_detach;
@@ -1602,7 +1648,7 @@ static int dvb_register(struct cx8802_dev *dev)
1602 1648
1603frontend_detach: 1649frontend_detach:
1604 core->gate_ctrl = NULL; 1650 core->gate_ctrl = NULL;
1605 videobuf_dvb_dealloc_frontends(&dev->frontends); 1651 vb2_dvb_dealloc_frontends(&dev->frontends);
1606 return res; 1652 return res;
1607} 1653}
1608 1654
@@ -1697,7 +1743,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
1697 struct cx88_core *core = drv->core; 1743 struct cx88_core *core = drv->core;
1698 struct cx8802_dev *dev = drv->core->dvbdev; 1744 struct cx8802_dev *dev = drv->core->dvbdev;
1699 int err; 1745 int err;
1700 struct videobuf_dvb_frontend *fe; 1746 struct vb2_dvb_frontend *fe;
1701 int i; 1747 int i;
1702 1748
1703 dprintk( 1, "%s\n", __func__); 1749 dprintk( 1, "%s\n", __func__);
@@ -1726,19 +1772,31 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
1726 1772
1727 err = -ENODEV; 1773 err = -ENODEV;
1728 for (i = 1; i <= core->board.num_frontends; i++) { 1774 for (i = 1; i <= core->board.num_frontends; i++) {
1729 fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i); 1775 struct vb2_queue *q;
1776
1777 fe = vb2_dvb_get_frontend(&core->dvbdev->frontends, i);
1730 if (fe == NULL) { 1778 if (fe == NULL) {
1731 printk(KERN_ERR "%s() failed to get frontend(%d)\n", 1779 printk(KERN_ERR "%s() failed to get frontend(%d)\n",
1732 __func__, i); 1780 __func__, i);
1733 goto fail_probe; 1781 goto fail_probe;
1734 } 1782 }
1735 videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops, 1783 q = &fe->dvb.dvbq;
1736 &dev->pci->dev, &dev->slock, 1784 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1737 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1785 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
1738 V4L2_FIELD_TOP, 1786 q->gfp_flags = GFP_DMA32;
1739 sizeof(struct cx88_buffer), 1787 q->min_buffers_needed = 2;
1740 dev, NULL); 1788 q->drv_priv = dev;
1741 /* init struct videobuf_dvb */ 1789 q->buf_struct_size = sizeof(struct cx88_buffer);
1790 q->ops = &dvb_qops;
1791 q->mem_ops = &vb2_dma_sg_memops;
1792 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1793 q->lock = &core->lock;
1794
1795 err = vb2_queue_init(q);
1796 if (err < 0)
1797 goto fail_probe;
1798
1799 /* init struct vb2_dvb */
1742 fe->dvb.name = dev->core->name; 1800 fe->dvb.name = dev->core->name;
1743 } 1801 }
1744 1802
@@ -1749,7 +1807,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
1749 core->name, err); 1807 core->name, err);
1750 return err; 1808 return err;
1751fail_probe: 1809fail_probe:
1752 videobuf_dvb_dealloc_frontends(&core->dvbdev->frontends); 1810 vb2_dvb_dealloc_frontends(&core->dvbdev->frontends);
1753fail_core: 1811fail_core:
1754 return err; 1812 return err;
1755} 1813}
@@ -1761,7 +1819,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
1761 1819
1762 dprintk( 1, "%s\n", __func__); 1820 dprintk( 1, "%s\n", __func__);
1763 1821
1764 videobuf_dvb_unregister_bus(&dev->frontends); 1822 vb2_dvb_unregister_bus(&dev->frontends);
1765 1823
1766 vp3054_i2c_remove(dev); 1824 vp3054_i2c_remove(dev);
1767 1825
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 74b7b8614c23..f181a3a10389 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -86,21 +86,21 @@ static LIST_HEAD(cx8802_devlist);
86static DEFINE_MUTEX(cx8802_mutex); 86static DEFINE_MUTEX(cx8802_mutex);
87/* ------------------------------------------------------------------ */ 87/* ------------------------------------------------------------------ */
88 88
89static int cx8802_start_dma(struct cx8802_dev *dev, 89int cx8802_start_dma(struct cx8802_dev *dev,
90 struct cx88_dmaqueue *q, 90 struct cx88_dmaqueue *q,
91 struct cx88_buffer *buf) 91 struct cx88_buffer *buf)
92{ 92{
93 struct cx88_core *core = dev->core; 93 struct cx88_core *core = dev->core;
94 94
95 dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n", 95 dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
96 buf->vb.width, buf->vb.height, buf->vb.field); 96 core->width, core->height, core->field);
97 97
98 /* setup fifo + format */ 98 /* setup fifo + format */
99 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 99 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
100 dev->ts_packet_size, buf->risc.dma); 100 dev->ts_packet_size, buf->risc.dma);
101 101
102 /* write TS length to chip */ 102 /* write TS length to chip */
103 cx_write(MO_TS_LNGTH, buf->vb.width); 103 cx_write(MO_TS_LNGTH, dev->ts_packet_size);
104 104
105 /* FIXME: this needs a review. 105 /* FIXME: this needs a review.
106 * also: move to cx88-blackbird + cx88-dvb source files? */ 106 * also: move to cx88-blackbird + cx88-dvb source files? */
@@ -210,83 +210,44 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
210 210
211 dprintk( 1, "cx8802_restart_queue\n" ); 211 dprintk( 1, "cx8802_restart_queue\n" );
212 if (list_empty(&q->active)) 212 if (list_empty(&q->active))
213 {
214 struct cx88_buffer *prev;
215 prev = NULL;
216
217 dprintk(1, "cx8802_restart_queue: queue is empty\n" );
218
219 for (;;) {
220 if (list_empty(&q->queued))
221 return 0;
222 buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
223 if (NULL == prev) {
224 list_move_tail(&buf->vb.queue, &q->active);
225 cx8802_start_dma(dev, q, buf);
226 buf->vb.state = VIDEOBUF_ACTIVE;
227 buf->count = q->count++;
228 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
229 dprintk(1,"[%p/%d] restart_queue - first active\n",
230 buf,buf->vb.i);
231
232 } else if (prev->vb.width == buf->vb.width &&
233 prev->vb.height == buf->vb.height &&
234 prev->fmt == buf->fmt) {
235 list_move_tail(&buf->vb.queue, &q->active);
236 buf->vb.state = VIDEOBUF_ACTIVE;
237 buf->count = q->count++;
238 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
239 dprintk(1,"[%p/%d] restart_queue - move to active\n",
240 buf,buf->vb.i);
241 } else {
242 return 0;
243 }
244 prev = buf;
245 }
246 return 0; 213 return 0;
247 }
248 214
249 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); 215 buf = list_entry(q->active.next, struct cx88_buffer, list);
250 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 216 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
251 buf, buf->vb.i); 217 buf, buf->vb.v4l2_buf.index);
252 cx8802_start_dma(dev, q, buf); 218 cx8802_start_dma(dev, q, buf);
253 list_for_each_entry(buf, &q->active, vb.queue) 219 list_for_each_entry(buf, &q->active, list)
254 buf->count = q->count++; 220 buf->count = q->count++;
255 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
256 return 0; 221 return 0;
257} 222}
258 223
259/* ------------------------------------------------------------------ */ 224/* ------------------------------------------------------------------ */
260 225
261int cx8802_buf_prepare(struct videobuf_queue *q, struct cx8802_dev *dev, 226int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
262 struct cx88_buffer *buf, enum v4l2_field field) 227 struct cx88_buffer *buf)
263{ 228{
264 int size = dev->ts_packet_size * dev->ts_packet_count; 229 int size = dev->ts_packet_size * dev->ts_packet_count;
265 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 230 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
231 struct cx88_riscmem *risc = &buf->risc;
266 int rc; 232 int rc;
267 233
268 dprintk(1, "%s: %p\n", __func__, buf); 234 if (vb2_plane_size(&buf->vb, 0) < size)
269 if (0 != buf->vb.baddr && buf->vb.bsize < size)
270 return -EINVAL; 235 return -EINVAL;
236 vb2_set_plane_payload(&buf->vb, 0, size);
271 237
272 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 238 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
273 buf->vb.width = dev->ts_packet_size; 239 if (!rc)
274 buf->vb.height = dev->ts_packet_count; 240 return -EIO;
275 buf->vb.size = size; 241
276 buf->vb.field = field /*V4L2_FIELD_TOP*/; 242 rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
277 243 dev->ts_packet_size, dev->ts_packet_count, 0);
278 if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL))) 244 if (rc) {
279 goto fail; 245 if (risc->cpu)
280 cx88_risc_databuffer(dev->pci, &buf->risc, 246 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
281 dma->sglist, 247 memset(risc, 0, sizeof(*risc));
282 buf->vb.width, buf->vb.height, 0); 248 return rc;
283 } 249 }
284 buf->vb.state = VIDEOBUF_PREPARED;
285 return 0; 250 return 0;
286
287 fail:
288 cx88_free_buffer(q,buf);
289 return rc;
290} 251}
291 252
292void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf) 253void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
@@ -295,35 +256,33 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
295 struct cx88_dmaqueue *cx88q = &dev->mpegq; 256 struct cx88_dmaqueue *cx88q = &dev->mpegq;
296 257
297 dprintk( 1, "cx8802_buf_queue\n" ); 258 dprintk( 1, "cx8802_buf_queue\n" );
298 /* add jump to stopper */ 259 /* add jump to start */
299 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); 260 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
300 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma); 261 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
262 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
301 263
302 if (list_empty(&cx88q->active)) { 264 if (list_empty(&cx88q->active)) {
303 dprintk( 1, "queue is empty - first active\n" ); 265 dprintk( 1, "queue is empty - first active\n" );
304 list_add_tail(&buf->vb.queue,&cx88q->active); 266 list_add_tail(&buf->list, &cx88q->active);
305 cx8802_start_dma(dev, cx88q, buf);
306 buf->vb.state = VIDEOBUF_ACTIVE;
307 buf->count = cx88q->count++; 267 buf->count = cx88q->count++;
308 mod_timer(&cx88q->timeout, jiffies+BUFFER_TIMEOUT);
309 dprintk(1,"[%p/%d] %s - first active\n", 268 dprintk(1,"[%p/%d] %s - first active\n",
310 buf, buf->vb.i, __func__); 269 buf, buf->vb.v4l2_buf.index, __func__);
311 270
312 } else { 271 } else {
272 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
313 dprintk( 1, "queue is not empty - append to active\n" ); 273 dprintk( 1, "queue is not empty - append to active\n" );
314 prev = list_entry(cx88q->active.prev, struct cx88_buffer, vb.queue); 274 prev = list_entry(cx88q->active.prev, struct cx88_buffer, list);
315 list_add_tail(&buf->vb.queue,&cx88q->active); 275 list_add_tail(&buf->list, &cx88q->active);
316 buf->vb.state = VIDEOBUF_ACTIVE;
317 buf->count = cx88q->count++; 276 buf->count = cx88q->count++;
318 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 277 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
319 dprintk( 1, "[%p/%d] %s - append to active\n", 278 dprintk( 1, "[%p/%d] %s - append to active\n",
320 buf, buf->vb.i, __func__); 279 buf, buf->vb.v4l2_buf.index, __func__);
321 } 280 }
322} 281}
323 282
324/* ----------------------------------------------------------- */ 283/* ----------------------------------------------------------- */
325 284
326static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int restart) 285static void do_cancel_buffers(struct cx8802_dev *dev)
327{ 286{
328 struct cx88_dmaqueue *q = &dev->mpegq; 287 struct cx88_dmaqueue *q = &dev->mpegq;
329 struct cx88_buffer *buf; 288 struct cx88_buffer *buf;
@@ -331,41 +290,18 @@ static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int re
331 290
332 spin_lock_irqsave(&dev->slock,flags); 291 spin_lock_irqsave(&dev->slock,flags);
333 while (!list_empty(&q->active)) { 292 while (!list_empty(&q->active)) {
334 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); 293 buf = list_entry(q->active.next, struct cx88_buffer, list);
335 list_del(&buf->vb.queue); 294 list_del(&buf->list);
336 buf->vb.state = VIDEOBUF_ERROR; 295 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
337 wake_up(&buf->vb.done);
338 dprintk(1,"[%p/%d] %s - dma=0x%08lx\n",
339 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
340 }
341 if (restart)
342 {
343 dprintk(1, "restarting queue\n" );
344 cx8802_restart_queue(dev,q);
345 } 296 }
346 spin_unlock_irqrestore(&dev->slock,flags); 297 spin_unlock_irqrestore(&dev->slock,flags);
347} 298}
348 299
349void cx8802_cancel_buffers(struct cx8802_dev *dev) 300void cx8802_cancel_buffers(struct cx8802_dev *dev)
350{ 301{
351 struct cx88_dmaqueue *q = &dev->mpegq;
352
353 dprintk( 1, "cx8802_cancel_buffers" ); 302 dprintk( 1, "cx8802_cancel_buffers" );
354 del_timer_sync(&q->timeout);
355 cx8802_stop_dma(dev); 303 cx8802_stop_dma(dev);
356 do_cancel_buffers(dev,"cancel",0); 304 do_cancel_buffers(dev);
357}
358
359static void cx8802_timeout(unsigned long data)
360{
361 struct cx8802_dev *dev = (struct cx8802_dev*)data;
362
363 dprintk(1, "%s\n",__func__);
364
365 if (debug)
366 cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
367 cx8802_stop_dma(dev);
368 do_cancel_buffers(dev,"timeout",1);
369} 305}
370 306
371static const char * cx88_mpeg_irqs[32] = { 307static const char * cx88_mpeg_irqs[32] = {
@@ -411,19 +347,11 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
411 spin_unlock(&dev->slock); 347 spin_unlock(&dev->slock);
412 } 348 }
413 349
414 /* risc2 y */
415 if (status & 0x10) {
416 spin_lock(&dev->slock);
417 cx8802_restart_queue(dev,&dev->mpegq);
418 spin_unlock(&dev->slock);
419 }
420
421 /* other general errors */ 350 /* other general errors */
422 if (status & 0x1f0100) { 351 if (status & 0x1f0100) {
423 dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 ); 352 dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 );
424 spin_lock(&dev->slock); 353 spin_lock(&dev->slock);
425 cx8802_stop_dma(dev); 354 cx8802_stop_dma(dev);
426 cx8802_restart_queue(dev,&dev->mpegq);
427 spin_unlock(&dev->slock); 355 spin_unlock(&dev->slock);
428 } 356 }
429} 357}
@@ -490,12 +418,6 @@ static int cx8802_init_common(struct cx8802_dev *dev)
490 418
491 /* init dma queue */ 419 /* init dma queue */
492 INIT_LIST_HEAD(&dev->mpegq.active); 420 INIT_LIST_HEAD(&dev->mpegq.active);
493 INIT_LIST_HEAD(&dev->mpegq.queued);
494 dev->mpegq.timeout.function = cx8802_timeout;
495 dev->mpegq.timeout.data = (unsigned long)dev;
496 init_timer(&dev->mpegq.timeout);
497 cx88_risc_stopper(dev->pci,&dev->mpegq.stopper,
498 MO_TS_DMACNTRL,0x11,0x00);
499 421
500 /* get irq */ 422 /* get irq */
501 err = request_irq(dev->pci->irq, cx8802_irq, 423 err = request_irq(dev->pci->irq, cx8802_irq,
@@ -520,9 +442,6 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
520 442
521 /* unregister stuff */ 443 /* unregister stuff */
522 free_irq(dev->pci->irq, dev); 444 free_irq(dev->pci->irq, dev);
523
524 /* free memory */
525 btcx_riscmem_free(dev->pci,&dev->mpegq.stopper);
526} 445}
527 446
528/* ----------------------------------------------------------- */ 447/* ----------------------------------------------------------- */
@@ -539,7 +458,6 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
539 dprintk( 2, "suspend\n" ); 458 dprintk( 2, "suspend\n" );
540 printk("%s: suspend mpeg\n", core->name); 459 printk("%s: suspend mpeg\n", core->name);
541 cx8802_stop_dma(dev); 460 cx8802_stop_dma(dev);
542 del_timer(&dev->mpegq.timeout);
543 } 461 }
544 spin_unlock_irqrestore(&dev->slock, flags); 462 spin_unlock_irqrestore(&dev->slock, flags);
545 463
@@ -907,6 +825,7 @@ module_pci_driver(cx8802_pci_driver);
907EXPORT_SYMBOL(cx8802_buf_prepare); 825EXPORT_SYMBOL(cx8802_buf_prepare);
908EXPORT_SYMBOL(cx8802_buf_queue); 826EXPORT_SYMBOL(cx8802_buf_queue);
909EXPORT_SYMBOL(cx8802_cancel_buffers); 827EXPORT_SYMBOL(cx8802_cancel_buffers);
828EXPORT_SYMBOL(cx8802_start_dma);
910 829
911EXPORT_SYMBOL(cx8802_register_driver); 830EXPORT_SYMBOL(cx8802_register_driver);
912EXPORT_SYMBOL(cx8802_unregister_driver); 831EXPORT_SYMBOL(cx8802_unregister_driver);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index f8f8389c0362..6ab6e27648f6 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -6,10 +6,6 @@
6 6
7#include "cx88.h" 7#include "cx88.h"
8 8
9static unsigned int vbibufs = 4;
10module_param(vbibufs,int,0644);
11MODULE_PARM_DESC(vbibufs,"number of vbi buffers, range 2-32");
12
13static unsigned int vbi_debug; 9static unsigned int vbi_debug;
14module_param(vbi_debug,int,0644); 10module_param(vbi_debug,int,0644);
15MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]"); 11MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
@@ -22,26 +18,27 @@ MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
22int cx8800_vbi_fmt (struct file *file, void *priv, 18int cx8800_vbi_fmt (struct file *file, void *priv,
23 struct v4l2_format *f) 19 struct v4l2_format *f)
24{ 20{
25 struct cx8800_fh *fh = priv; 21 struct cx8800_dev *dev = video_drvdata(file);
26 struct cx8800_dev *dev = fh->dev;
27 22
28 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH; 23 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
29 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; 24 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
30 f->fmt.vbi.offset = 244; 25 f->fmt.vbi.offset = 244;
31 f->fmt.vbi.count[0] = VBI_LINE_COUNT;
32 f->fmt.vbi.count[1] = VBI_LINE_COUNT;
33 26
34 if (dev->core->tvnorm & V4L2_STD_525_60) { 27 if (dev->core->tvnorm & V4L2_STD_525_60) {
35 /* ntsc */ 28 /* ntsc */
36 f->fmt.vbi.sampling_rate = 28636363; 29 f->fmt.vbi.sampling_rate = 28636363;
37 f->fmt.vbi.start[0] = 10; 30 f->fmt.vbi.start[0] = 10;
38 f->fmt.vbi.start[1] = 273; 31 f->fmt.vbi.start[1] = 273;
32 f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
33 f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
39 34
40 } else if (dev->core->tvnorm & V4L2_STD_625_50) { 35 } else if (dev->core->tvnorm & V4L2_STD_625_50) {
41 /* pal */ 36 /* pal */
42 f->fmt.vbi.sampling_rate = 35468950; 37 f->fmt.vbi.sampling_rate = 35468950;
43 f->fmt.vbi.start[0] = 7 -1; 38 f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
44 f->fmt.vbi.start[1] = 319 -1; 39 f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
40 f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
41 f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
45 } 42 }
46 return 0; 43 return 0;
47} 44}
@@ -54,7 +51,7 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
54 51
55 /* setup fifo + format */ 52 /* setup fifo + format */
56 cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24], 53 cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
57 buf->vb.width, buf->risc.dma); 54 VBI_LINE_LENGTH, buf->risc.dma);
58 55
59 cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup 56 cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
60 (1 << 15) | // enable vbi capture 57 (1 << 15) | // enable vbi capture
@@ -78,7 +75,7 @@ static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
78 return 0; 75 return 0;
79} 76}
80 77
81int cx8800_stop_vbi_dma(struct cx8800_dev *dev) 78void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
82{ 79{
83 struct cx88_core *core = dev->core; 80 struct cx88_core *core = dev->core;
84 81
@@ -91,7 +88,6 @@ int cx8800_stop_vbi_dma(struct cx8800_dev *dev)
91 /* disable irqs */ 88 /* disable irqs */
92 cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT); 89 cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
93 cx_clear(MO_VID_INTMSK, 0x0f0088); 90 cx_clear(MO_VID_INTMSK, 0x0f0088);
94 return 0;
95} 91}
96 92
97int cx8800_restart_vbi_queue(struct cx8800_dev *dev, 93int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
@@ -102,144 +98,144 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
102 if (list_empty(&q->active)) 98 if (list_empty(&q->active))
103 return 0; 99 return 0;
104 100
105 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); 101 buf = list_entry(q->active.next, struct cx88_buffer, list);
106 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 102 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
107 buf, buf->vb.i); 103 buf, buf->vb.v4l2_buf.index);
108 cx8800_start_vbi_dma(dev, q, buf); 104 cx8800_start_vbi_dma(dev, q, buf);
109 list_for_each_entry(buf, &q->active, vb.queue) 105 list_for_each_entry(buf, &q->active, list)
110 buf->count = q->count++; 106 buf->count = q->count++;
111 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
112 return 0; 107 return 0;
113} 108}
114 109
115void cx8800_vbi_timeout(unsigned long data)
116{
117 struct cx8800_dev *dev = (struct cx8800_dev*)data;
118 struct cx88_core *core = dev->core;
119 struct cx88_dmaqueue *q = &dev->vbiq;
120 struct cx88_buffer *buf;
121 unsigned long flags;
122
123 cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH24]);
124
125 cx_clear(MO_VID_DMACNTRL, 0x88);
126 cx_clear(VID_CAPTURE_CONTROL, 0x18);
127
128 spin_lock_irqsave(&dev->slock,flags);
129 while (!list_empty(&q->active)) {
130 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
131 list_del(&buf->vb.queue);
132 buf->vb.state = VIDEOBUF_ERROR;
133 wake_up(&buf->vb.done);
134 printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", dev->core->name,
135 buf, buf->vb.i, (unsigned long)buf->risc.dma);
136 }
137 cx8800_restart_vbi_queue(dev,q);
138 spin_unlock_irqrestore(&dev->slock,flags);
139}
140
141/* ------------------------------------------------------------------ */ 110/* ------------------------------------------------------------------ */
142 111
143static int 112static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
144vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) 113 unsigned int *num_buffers, unsigned int *num_planes,
114 unsigned int sizes[], void *alloc_ctxs[])
145{ 115{
146 *size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2; 116 struct cx8800_dev *dev = q->drv_priv;
147 if (0 == *count) 117
148 *count = vbibufs; 118 *num_planes = 1;
149 if (*count < 2) 119 if (dev->core->tvnorm & V4L2_STD_525_60)
150 *count = 2; 120 sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
151 if (*count > 32) 121 else
152 *count = 32; 122 sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
153 return 0; 123 return 0;
154} 124}
155 125
156static int 126
157vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, 127static int buffer_prepare(struct vb2_buffer *vb)
158 enum v4l2_field field)
159{ 128{
160 struct cx8800_fh *fh = q->priv_data; 129 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
161 struct cx8800_dev *dev = fh->dev; 130 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
162 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 131 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
132 unsigned int lines;
163 unsigned int size; 133 unsigned int size;
164 int rc; 134 int rc;
165 135
166 size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2; 136 if (dev->core->tvnorm & V4L2_STD_525_60)
167 if (0 != buf->vb.baddr && buf->vb.bsize < size) 137 lines = VBI_LINE_NTSC_COUNT;
138 else
139 lines = VBI_LINE_PAL_COUNT;
140 size = lines * VBI_LINE_LENGTH * 2;
141 if (vb2_plane_size(vb, 0) < size)
168 return -EINVAL; 142 return -EINVAL;
143 vb2_set_plane_payload(vb, 0, size);
169 144
170 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 145 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
171 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 146 if (!rc)
172 buf->vb.width = VBI_LINE_LENGTH; 147 return -EIO;
173 buf->vb.height = VBI_LINE_COUNT; 148
174 buf->vb.size = size; 149 cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
175 buf->vb.field = V4L2_FIELD_SEQ_TB; 150 0, VBI_LINE_LENGTH * lines,
176 151 VBI_LINE_LENGTH, 0,
177 if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL))) 152 lines);
178 goto fail;
179 cx88_risc_buffer(dev->pci, &buf->risc,
180 dma->sglist,
181 0, buf->vb.width * buf->vb.height,
182 buf->vb.width, 0,
183 buf->vb.height);
184 }
185 buf->vb.state = VIDEOBUF_PREPARED;
186 return 0; 153 return 0;
154}
187 155
188 fail: 156static void buffer_finish(struct vb2_buffer *vb)
189 cx88_free_buffer(q,buf); 157{
190 return rc; 158 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
159 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
160 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
161 struct cx88_riscmem *risc = &buf->risc;
162
163 if (risc->cpu)
164 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
165 memset(risc, 0, sizeof(*risc));
166
167 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
191} 168}
192 169
193static void 170static void buffer_queue(struct vb2_buffer *vb)
194vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
195{ 171{
196 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 172 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
173 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
197 struct cx88_buffer *prev; 174 struct cx88_buffer *prev;
198 struct cx8800_fh *fh = vq->priv_data;
199 struct cx8800_dev *dev = fh->dev;
200 struct cx88_dmaqueue *q = &dev->vbiq; 175 struct cx88_dmaqueue *q = &dev->vbiq;
201 176
202 /* add jump to stopper */ 177 /* add jump to start */
203 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); 178 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
204 buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); 179 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
180 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
205 181
206 if (list_empty(&q->active)) { 182 if (list_empty(&q->active)) {
207 list_add_tail(&buf->vb.queue,&q->active); 183 list_add_tail(&buf->list, &q->active);
208 cx8800_start_vbi_dma(dev, q, buf); 184 cx8800_start_vbi_dma(dev, q, buf);
209 buf->vb.state = VIDEOBUF_ACTIVE;
210 buf->count = q->count++; 185 buf->count = q->count++;
211 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
212 dprintk(2,"[%p/%d] vbi_queue - first active\n", 186 dprintk(2,"[%p/%d] vbi_queue - first active\n",
213 buf, buf->vb.i); 187 buf, buf->vb.v4l2_buf.index);
214 188
215 } else { 189 } else {
216 prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue); 190 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
217 list_add_tail(&buf->vb.queue,&q->active); 191 prev = list_entry(q->active.prev, struct cx88_buffer, list);
218 buf->vb.state = VIDEOBUF_ACTIVE; 192 list_add_tail(&buf->list, &q->active);
219 buf->count = q->count++; 193 buf->count = q->count++;
220 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 194 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
221 dprintk(2,"[%p/%d] buffer_queue - append to active\n", 195 dprintk(2,"[%p/%d] buffer_queue - append to active\n",
222 buf, buf->vb.i); 196 buf, buf->vb.v4l2_buf.index);
223 } 197 }
224} 198}
225 199
226static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb) 200static int start_streaming(struct vb2_queue *q, unsigned int count)
227{ 201{
228 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 202 struct cx8800_dev *dev = q->drv_priv;
203 struct cx88_dmaqueue *dmaq = &dev->vbiq;
204 struct cx88_buffer *buf = list_entry(dmaq->active.next,
205 struct cx88_buffer, list);
229 206
230 cx88_free_buffer(q,buf); 207 cx8800_start_vbi_dma(dev, dmaq, buf);
208 return 0;
231} 209}
232 210
233const struct videobuf_queue_ops cx8800_vbi_qops = { 211static void stop_streaming(struct vb2_queue *q)
234 .buf_setup = vbi_setup, 212{
235 .buf_prepare = vbi_prepare, 213 struct cx8800_dev *dev = q->drv_priv;
236 .buf_queue = vbi_queue, 214 struct cx88_core *core = dev->core;
237 .buf_release = vbi_release, 215 struct cx88_dmaqueue *dmaq = &dev->vbiq;
238}; 216 unsigned long flags;
239 217
240/* ------------------------------------------------------------------ */ 218 cx_clear(MO_VID_DMACNTRL, 0x11);
241/* 219 cx_clear(VID_CAPTURE_CONTROL, 0x06);
242 * Local variables: 220 cx8800_stop_vbi_dma(dev);
243 * c-basic-offset: 8 221 spin_lock_irqsave(&dev->slock, flags);
244 * End: 222 while (!list_empty(&dmaq->active)) {
245 */ 223 struct cx88_buffer *buf = list_entry(dmaq->active.next,
224 struct cx88_buffer, list);
225
226 list_del(&buf->list);
227 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
228 }
229 spin_unlock_irqrestore(&dev->slock, flags);
230}
231
232const struct vb2_ops cx8800_vbi_qops = {
233 .queue_setup = queue_setup,
234 .buf_prepare = buffer_prepare,
235 .buf_finish = buffer_finish,
236 .buf_queue = buffer_queue,
237 .wait_prepare = vb2_ops_wait_prepare,
238 .wait_finish = vb2_ops_wait_finish,
239 .start_streaming = start_streaming,
240 .stop_streaming = stop_streaming,
241};
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index ce27e6d4f16e..a64ae31ae142 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -70,10 +70,6 @@ static unsigned int irq_debug;
70module_param(irq_debug,int,0644); 70module_param(irq_debug,int,0644);
71MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]"); 71MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
72 72
73static unsigned int vid_limit = 16;
74module_param(vid_limit,int,0644);
75MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
76
77#define dprintk(level,fmt, arg...) if (video_debug >= level) \ 73#define dprintk(level,fmt, arg...) if (video_debug >= level) \
78 printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg) 74 printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
79 75
@@ -297,56 +293,6 @@ enum {
297 CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls), 293 CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls),
298}; 294};
299 295
300/* ------------------------------------------------------------------- */
301/* resource management */
302
303static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bit)
304{
305 struct cx88_core *core = dev->core;
306 if (fh->resources & bit)
307 /* have it already allocated */
308 return 1;
309
310 /* is it free? */
311 mutex_lock(&core->lock);
312 if (dev->resources & bit) {
313 /* no, someone else uses it */
314 mutex_unlock(&core->lock);
315 return 0;
316 }
317 /* it's free, grab it */
318 fh->resources |= bit;
319 dev->resources |= bit;
320 dprintk(1,"res: get %d\n",bit);
321 mutex_unlock(&core->lock);
322 return 1;
323}
324
325static
326int res_check(struct cx8800_fh *fh, unsigned int bit)
327{
328 return (fh->resources & bit);
329}
330
331static
332int res_locked(struct cx8800_dev *dev, unsigned int bit)
333{
334 return (dev->resources & bit);
335}
336
337static
338void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits)
339{
340 struct cx88_core *core = dev->core;
341 BUG_ON((fh->resources & bits) != bits);
342
343 mutex_lock(&core->lock);
344 fh->resources &= ~bits;
345 dev->resources &= ~bits;
346 dprintk(1,"res: put %d\n",bits);
347 mutex_unlock(&core->lock);
348}
349
350/* ------------------------------------------------------------------ */ 296/* ------------------------------------------------------------------ */
351 297
352int cx88_video_mux(struct cx88_core *core, unsigned int input) 298int cx88_video_mux(struct cx88_core *core, unsigned int input)
@@ -419,8 +365,8 @@ static int start_video_dma(struct cx8800_dev *dev,
419 /* setup fifo + format */ 365 /* setup fifo + format */
420 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], 366 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
421 buf->bpl, buf->risc.dma); 367 buf->bpl, buf->risc.dma);
422 cx88_set_scale(core, buf->vb.width, buf->vb.height, buf->vb.field); 368 cx88_set_scale(core, core->width, core->height, core->field);
423 cx_write(MO_COLOR_CTRL, buf->fmt->cxformat | ColorFormatGamma); 369 cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma);
424 370
425 /* reset counter */ 371 /* reset counter */
426 cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET); 372 cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
@@ -470,433 +416,211 @@ static int restart_video_queue(struct cx8800_dev *dev,
470 struct cx88_dmaqueue *q) 416 struct cx88_dmaqueue *q)
471{ 417{
472 struct cx88_core *core = dev->core; 418 struct cx88_core *core = dev->core;
473 struct cx88_buffer *buf, *prev; 419 struct cx88_buffer *buf;
474 420
475 if (!list_empty(&q->active)) { 421 if (!list_empty(&q->active)) {
476 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue); 422 buf = list_entry(q->active.next, struct cx88_buffer, list);
477 dprintk(2,"restart_queue [%p/%d]: restart dma\n", 423 dprintk(2,"restart_queue [%p/%d]: restart dma\n",
478 buf, buf->vb.i); 424 buf, buf->vb.v4l2_buf.index);
479 start_video_dma(dev, q, buf); 425 start_video_dma(dev, q, buf);
480 list_for_each_entry(buf, &q->active, vb.queue) 426 list_for_each_entry(buf, &q->active, list)
481 buf->count = q->count++; 427 buf->count = q->count++;
482 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
483 return 0;
484 }
485
486 prev = NULL;
487 for (;;) {
488 if (list_empty(&q->queued))
489 return 0;
490 buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
491 if (NULL == prev) {
492 list_move_tail(&buf->vb.queue, &q->active);
493 start_video_dma(dev, q, buf);
494 buf->vb.state = VIDEOBUF_ACTIVE;
495 buf->count = q->count++;
496 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
497 dprintk(2,"[%p/%d] restart_queue - first active\n",
498 buf,buf->vb.i);
499
500 } else if (prev->vb.width == buf->vb.width &&
501 prev->vb.height == buf->vb.height &&
502 prev->fmt == buf->fmt) {
503 list_move_tail(&buf->vb.queue, &q->active);
504 buf->vb.state = VIDEOBUF_ACTIVE;
505 buf->count = q->count++;
506 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
507 dprintk(2,"[%p/%d] restart_queue - move to active\n",
508 buf,buf->vb.i);
509 } else {
510 return 0;
511 }
512 prev = buf;
513 } 428 }
429 return 0;
514} 430}
515 431
516/* ------------------------------------------------------------------ */ 432/* ------------------------------------------------------------------ */
517 433
518static int 434static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
519buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) 435 unsigned int *num_buffers, unsigned int *num_planes,
436 unsigned int sizes[], void *alloc_ctxs[])
520{ 437{
521 struct cx8800_fh *fh = q->priv_data; 438 struct cx8800_dev *dev = q->drv_priv;
522 struct cx8800_dev *dev = fh->dev; 439 struct cx88_core *core = dev->core;
523 440
524 *size = dev->fmt->depth * dev->width * dev->height >> 3; 441 *num_planes = 1;
525 if (0 == *count) 442 sizes[0] = (dev->fmt->depth * core->width * core->height) >> 3;
526 *count = 32;
527 if (*size * *count > vid_limit * 1024 * 1024)
528 *count = (vid_limit * 1024 * 1024) / *size;
529 return 0; 443 return 0;
530} 444}
531 445
532static int 446static int buffer_prepare(struct vb2_buffer *vb)
533buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
534 enum v4l2_field field)
535{ 447{
536 struct cx8800_fh *fh = q->priv_data; 448 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
537 struct cx8800_dev *dev = fh->dev;
538 struct cx88_core *core = dev->core; 449 struct cx88_core *core = dev->core;
539 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 450 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
540 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 451 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
541 int rc, init_buffer = 0; 452 int rc;
542 453
543 BUG_ON(NULL == dev->fmt); 454 buf->bpl = core->width * dev->fmt->depth >> 3;
544 if (dev->width < 48 || dev->width > norm_maxw(core->tvnorm) ||
545 dev->height < 32 || dev->height > norm_maxh(core->tvnorm))
546 return -EINVAL;
547 buf->vb.size = (dev->width * dev->height * dev->fmt->depth) >> 3;
548 if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
549 return -EINVAL;
550 455
551 if (buf->fmt != dev->fmt || 456 if (vb2_plane_size(vb, 0) < core->height * buf->bpl)
552 buf->vb.width != dev->width || 457 return -EINVAL;
553 buf->vb.height != dev->height || 458 vb2_set_plane_payload(vb, 0, core->height * buf->bpl);
554 buf->vb.field != field) {
555 buf->fmt = dev->fmt;
556 buf->vb.width = dev->width;
557 buf->vb.height = dev->height;
558 buf->vb.field = field;
559 init_buffer = 1;
560 }
561 459
562 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 460 rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
563 init_buffer = 1; 461 if (!rc)
564 if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL))) 462 return -EIO;
565 goto fail;
566 }
567 463
568 if (init_buffer) { 464 switch (core->field) {
569 buf->bpl = buf->vb.width * buf->fmt->depth >> 3; 465 case V4L2_FIELD_TOP:
570 switch (buf->vb.field) { 466 cx88_risc_buffer(dev->pci, &buf->risc,
571 case V4L2_FIELD_TOP: 467 sgt->sgl, 0, UNSET,
572 cx88_risc_buffer(dev->pci, &buf->risc, 468 buf->bpl, 0, core->height);
573 dma->sglist, 0, UNSET, 469 break;
574 buf->bpl, 0, buf->vb.height); 470 case V4L2_FIELD_BOTTOM:
575 break; 471 cx88_risc_buffer(dev->pci, &buf->risc,
576 case V4L2_FIELD_BOTTOM: 472 sgt->sgl, UNSET, 0,
577 cx88_risc_buffer(dev->pci, &buf->risc, 473 buf->bpl, 0, core->height);
578 dma->sglist, UNSET, 0, 474 break;
579 buf->bpl, 0, buf->vb.height); 475 case V4L2_FIELD_SEQ_TB:
580 break; 476 cx88_risc_buffer(dev->pci, &buf->risc,
581 case V4L2_FIELD_INTERLACED: 477 sgt->sgl,
582 cx88_risc_buffer(dev->pci, &buf->risc, 478 0, buf->bpl * (core->height >> 1),
583 dma->sglist, 0, buf->bpl, 479 buf->bpl, 0,
584 buf->bpl, buf->bpl, 480 core->height >> 1);
585 buf->vb.height >> 1); 481 break;
586 break; 482 case V4L2_FIELD_SEQ_BT:
587 case V4L2_FIELD_SEQ_TB: 483 cx88_risc_buffer(dev->pci, &buf->risc,
588 cx88_risc_buffer(dev->pci, &buf->risc, 484 sgt->sgl,
589 dma->sglist, 485 buf->bpl * (core->height >> 1), 0,
590 0, buf->bpl * (buf->vb.height >> 1), 486 buf->bpl, 0,
591 buf->bpl, 0, 487 core->height >> 1);
592 buf->vb.height >> 1); 488 break;
593 break; 489 case V4L2_FIELD_INTERLACED:
594 case V4L2_FIELD_SEQ_BT: 490 default:
595 cx88_risc_buffer(dev->pci, &buf->risc, 491 cx88_risc_buffer(dev->pci, &buf->risc,
596 dma->sglist, 492 sgt->sgl, 0, buf->bpl,
597 buf->bpl * (buf->vb.height >> 1), 0, 493 buf->bpl, buf->bpl,
598 buf->bpl, 0, 494 core->height >> 1);
599 buf->vb.height >> 1); 495 break;
600 break;
601 default:
602 BUG();
603 }
604 } 496 }
605 dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n", 497 dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
606 buf, buf->vb.i, 498 buf, buf->vb.v4l2_buf.index,
607 dev->width, dev->height, dev->fmt->depth, dev->fmt->name, 499 core->width, core->height, dev->fmt->depth, dev->fmt->name,
608 (unsigned long)buf->risc.dma); 500 (unsigned long)buf->risc.dma);
609
610 buf->vb.state = VIDEOBUF_PREPARED;
611 return 0; 501 return 0;
502}
503
504static void buffer_finish(struct vb2_buffer *vb)
505{
506 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
507 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
508 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
509 struct cx88_riscmem *risc = &buf->risc;
510
511 if (risc->cpu)
512 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
513 memset(risc, 0, sizeof(*risc));
612 514
613 fail: 515 dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
614 cx88_free_buffer(q,buf);
615 return rc;
616} 516}
617 517
618static void 518static void buffer_queue(struct vb2_buffer *vb)
619buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
620{ 519{
621 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 520 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
521 struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
622 struct cx88_buffer *prev; 522 struct cx88_buffer *prev;
623 struct cx8800_fh *fh = vq->priv_data;
624 struct cx8800_dev *dev = fh->dev;
625 struct cx88_core *core = dev->core; 523 struct cx88_core *core = dev->core;
626 struct cx88_dmaqueue *q = &dev->vidq; 524 struct cx88_dmaqueue *q = &dev->vidq;
627 525
628 /* add jump to stopper */ 526 /* add jump to start */
629 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); 527 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
630 buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); 528 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
529 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
631 530
632 if (!list_empty(&q->queued)) { 531 if (list_empty(&q->active)) {
633 list_add_tail(&buf->vb.queue,&q->queued); 532 list_add_tail(&buf->list, &q->active);
634 buf->vb.state = VIDEOBUF_QUEUED;
635 dprintk(2,"[%p/%d] buffer_queue - append to queued\n",
636 buf, buf->vb.i);
637
638 } else if (list_empty(&q->active)) {
639 list_add_tail(&buf->vb.queue,&q->active);
640 start_video_dma(dev, q, buf); 533 start_video_dma(dev, q, buf);
641 buf->vb.state = VIDEOBUF_ACTIVE;
642 buf->count = q->count++; 534 buf->count = q->count++;
643 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
644 dprintk(2,"[%p/%d] buffer_queue - first active\n", 535 dprintk(2,"[%p/%d] buffer_queue - first active\n",
645 buf, buf->vb.i); 536 buf, buf->vb.v4l2_buf.index);
646 537
647 } else { 538 } else {
648 prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue); 539 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
649 if (prev->vb.width == buf->vb.width && 540 prev = list_entry(q->active.prev, struct cx88_buffer, list);
650 prev->vb.height == buf->vb.height && 541 list_add_tail(&buf->list, &q->active);
651 prev->fmt == buf->fmt) { 542 buf->count = q->count++;
652 list_add_tail(&buf->vb.queue,&q->active); 543 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
653 buf->vb.state = VIDEOBUF_ACTIVE; 544 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
654 buf->count = q->count++; 545 buf, buf->vb.v4l2_buf.index);
655 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
656 dprintk(2,"[%p/%d] buffer_queue - append to active\n",
657 buf, buf->vb.i);
658
659 } else {
660 list_add_tail(&buf->vb.queue,&q->queued);
661 buf->vb.state = VIDEOBUF_QUEUED;
662 dprintk(2,"[%p/%d] buffer_queue - first queued\n",
663 buf, buf->vb.i);
664 }
665 } 546 }
666} 547}
667 548
668static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) 549static int start_streaming(struct vb2_queue *q, unsigned int count)
669{ 550{
670 struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb); 551 struct cx8800_dev *dev = q->drv_priv;
552 struct cx88_dmaqueue *dmaq = &dev->vidq;
553 struct cx88_buffer *buf = list_entry(dmaq->active.next,
554 struct cx88_buffer, list);
671 555
672 cx88_free_buffer(q,buf); 556 start_video_dma(dev, dmaq, buf);
557 return 0;
673} 558}
674 559
675static const struct videobuf_queue_ops cx8800_video_qops = { 560static void stop_streaming(struct vb2_queue *q)
676 .buf_setup = buffer_setup,
677 .buf_prepare = buffer_prepare,
678 .buf_queue = buffer_queue,
679 .buf_release = buffer_release,
680};
681
682/* ------------------------------------------------------------------ */
683
684
685/* ------------------------------------------------------------------ */
686
687static struct videobuf_queue *get_queue(struct file *file)
688{ 561{
689 struct video_device *vdev = video_devdata(file); 562 struct cx8800_dev *dev = q->drv_priv;
690 struct cx8800_fh *fh = file->private_data; 563 struct cx88_core *core = dev->core;
564 struct cx88_dmaqueue *dmaq = &dev->vidq;
565 unsigned long flags;
691 566
692 switch (vdev->vfl_type) { 567 cx_clear(MO_VID_DMACNTRL, 0x11);
693 case VFL_TYPE_GRABBER: 568 cx_clear(VID_CAPTURE_CONTROL, 0x06);
694 return &fh->vidq; 569 spin_lock_irqsave(&dev->slock, flags);
695 case VFL_TYPE_VBI: 570 while (!list_empty(&dmaq->active)) {
696 return &fh->vbiq; 571 struct cx88_buffer *buf = list_entry(dmaq->active.next,
697 default: 572 struct cx88_buffer, list);
698 BUG(); 573
574 list_del(&buf->list);
575 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
699 } 576 }
577 spin_unlock_irqrestore(&dev->slock, flags);
700} 578}
701 579
702static int get_resource(struct file *file) 580static struct vb2_ops cx8800_video_qops = {
703{ 581 .queue_setup = queue_setup,
704 struct video_device *vdev = video_devdata(file); 582 .buf_prepare = buffer_prepare,
583 .buf_finish = buffer_finish,
584 .buf_queue = buffer_queue,
585 .wait_prepare = vb2_ops_wait_prepare,
586 .wait_finish = vb2_ops_wait_finish,
587 .start_streaming = start_streaming,
588 .stop_streaming = stop_streaming,
589};
705 590
706 switch (vdev->vfl_type) { 591/* ------------------------------------------------------------------ */
707 case VFL_TYPE_GRABBER:
708 return RESOURCE_VIDEO;
709 case VFL_TYPE_VBI:
710 return RESOURCE_VBI;
711 default:
712 BUG();
713 }
714}
715 592
716static int video_open(struct file *file) 593static int radio_open(struct file *file)
717{ 594{
718 struct video_device *vdev = video_devdata(file);
719 struct cx8800_dev *dev = video_drvdata(file); 595 struct cx8800_dev *dev = video_drvdata(file);
720 struct cx88_core *core = dev->core; 596 struct cx88_core *core = dev->core;
721 struct cx8800_fh *fh; 597 int ret = v4l2_fh_open(file);
722 enum v4l2_buf_type type = 0;
723 int radio = 0;
724 598
725 switch (vdev->vfl_type) { 599 if (ret)
726 case VFL_TYPE_GRABBER: 600 return ret;
727 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
728 break;
729 case VFL_TYPE_VBI:
730 type = V4L2_BUF_TYPE_VBI_CAPTURE;
731 break;
732 case VFL_TYPE_RADIO:
733 radio = 1;
734 break;
735 }
736
737 dprintk(1, "open dev=%s radio=%d type=%s\n",
738 video_device_node_name(vdev), radio, v4l2_type_names[type]);
739
740 /* allocate + initialize per filehandle data */
741 fh = kzalloc(sizeof(*fh),GFP_KERNEL);
742 if (unlikely(!fh))
743 return -ENOMEM;
744
745 v4l2_fh_init(&fh->fh, vdev);
746 file->private_data = fh;
747 fh->dev = dev;
748 601
749 mutex_lock(&core->lock); 602 cx_write(MO_GP3_IO, core->board.radio.gpio3);
750 603 cx_write(MO_GP0_IO, core->board.radio.gpio0);
751 videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops, 604 cx_write(MO_GP1_IO, core->board.radio.gpio1);
752 &dev->pci->dev, &dev->slock, 605 cx_write(MO_GP2_IO, core->board.radio.gpio2);
753 V4L2_BUF_TYPE_VIDEO_CAPTURE, 606 if (core->board.radio.audioroute) {
754 V4L2_FIELD_INTERLACED, 607 if (core->sd_wm8775) {
755 sizeof(struct cx88_buffer), 608 call_all(core, audio, s_routing,
756 fh, NULL);
757 videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops,
758 &dev->pci->dev, &dev->slock,
759 V4L2_BUF_TYPE_VBI_CAPTURE,
760 V4L2_FIELD_SEQ_TB,
761 sizeof(struct cx88_buffer),
762 fh, NULL);
763
764 if (vdev->vfl_type == VFL_TYPE_RADIO) {
765 dprintk(1,"video_open: setting radio device\n");
766 cx_write(MO_GP3_IO, core->board.radio.gpio3);
767 cx_write(MO_GP0_IO, core->board.radio.gpio0);
768 cx_write(MO_GP1_IO, core->board.radio.gpio1);
769 cx_write(MO_GP2_IO, core->board.radio.gpio2);
770 if (core->board.radio.audioroute) {
771 if (core->sd_wm8775) {
772 call_all(core, audio, s_routing,
773 core->board.radio.audioroute, 0, 0); 609 core->board.radio.audioroute, 0, 0);
774 }
775 /* "I2S ADC mode" */
776 core->tvaudio = WW_I2SADC;
777 cx88_set_tvaudio(core);
778 } else {
779 /* FM Mode */
780 core->tvaudio = WW_FM;
781 cx88_set_tvaudio(core);
782 cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1);
783 } 610 }
784 call_all(core, tuner, s_radio); 611 /* "I2S ADC mode" */
785 } 612 core->tvaudio = WW_I2SADC;
786 613 cx88_set_tvaudio(core);
787 core->users++;
788 mutex_unlock(&core->lock);
789 v4l2_fh_add(&fh->fh);
790
791 return 0;
792}
793
794static ssize_t
795video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
796{
797 struct video_device *vdev = video_devdata(file);
798 struct cx8800_fh *fh = file->private_data;
799
800 switch (vdev->vfl_type) {
801 case VFL_TYPE_GRABBER:
802 if (res_locked(fh->dev,RESOURCE_VIDEO))
803 return -EBUSY;
804 return videobuf_read_one(&fh->vidq, data, count, ppos,
805 file->f_flags & O_NONBLOCK);
806 case VFL_TYPE_VBI:
807 if (!res_get(fh->dev,fh,RESOURCE_VBI))
808 return -EBUSY;
809 return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
810 file->f_flags & O_NONBLOCK);
811 default:
812 BUG();
813 }
814}
815
816static unsigned int
817video_poll(struct file *file, struct poll_table_struct *wait)
818{
819 struct video_device *vdev = video_devdata(file);
820 struct cx8800_fh *fh = file->private_data;
821 struct cx88_buffer *buf;
822 unsigned int rc = v4l2_ctrl_poll(file, wait);
823
824 if (vdev->vfl_type == VFL_TYPE_VBI) {
825 if (!res_get(fh->dev,fh,RESOURCE_VBI))
826 return rc | POLLERR;
827 return rc | videobuf_poll_stream(file, &fh->vbiq, wait);
828 }
829 mutex_lock(&fh->vidq.vb_lock);
830 if (res_check(fh,RESOURCE_VIDEO)) {
831 /* streaming capture */
832 if (list_empty(&fh->vidq.stream))
833 goto done;
834 buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream);
835 } else { 614 } else {
836 /* read() capture */ 615 /* FM Mode */
837 buf = (struct cx88_buffer*)fh->vidq.read_buf; 616 core->tvaudio = WW_FM;
838 if (NULL == buf) 617 cx88_set_tvaudio(core);
839 goto done; 618 cx88_set_stereo(core, V4L2_TUNER_MODE_STEREO, 1);
840 }
841 poll_wait(file, &buf->vb.done, wait);
842 if (buf->vb.state == VIDEOBUF_DONE ||
843 buf->vb.state == VIDEOBUF_ERROR)
844 rc |= POLLIN|POLLRDNORM;
845done:
846 mutex_unlock(&fh->vidq.vb_lock);
847 return rc;
848}
849
850static int video_release(struct file *file)
851{
852 struct cx8800_fh *fh = file->private_data;
853 struct cx8800_dev *dev = fh->dev;
854
855 /* turn off overlay */
856 if (res_check(fh, RESOURCE_OVERLAY)) {
857 /* FIXME */
858 res_free(dev,fh,RESOURCE_OVERLAY);
859 }
860
861 /* stop video capture */
862 if (res_check(fh, RESOURCE_VIDEO)) {
863 videobuf_queue_cancel(&fh->vidq);
864 res_free(dev,fh,RESOURCE_VIDEO);
865 } 619 }
866 if (fh->vidq.read_buf) { 620 call_all(core, tuner, s_radio);
867 buffer_release(&fh->vidq,fh->vidq.read_buf);
868 kfree(fh->vidq.read_buf);
869 }
870
871 /* stop vbi capture */
872 if (res_check(fh, RESOURCE_VBI)) {
873 videobuf_stop(&fh->vbiq);
874 res_free(dev,fh,RESOURCE_VBI);
875 }
876
877 videobuf_mmap_free(&fh->vidq);
878 videobuf_mmap_free(&fh->vbiq);
879
880 mutex_lock(&dev->core->lock);
881 v4l2_fh_del(&fh->fh);
882 v4l2_fh_exit(&fh->fh);
883 file->private_data = NULL;
884 kfree(fh);
885
886 dev->core->users--;
887 if (!dev->core->users)
888 call_all(dev->core, core, s_power, 0);
889 mutex_unlock(&dev->core->lock);
890
891 return 0; 621 return 0;
892} 622}
893 623
894static int
895video_mmap(struct file *file, struct vm_area_struct * vma)
896{
897 return videobuf_mmap_mapper(get_queue(file), vma);
898}
899
900/* ------------------------------------------------------------------ */ 624/* ------------------------------------------------------------------ */
901/* VIDEO CTRL IOCTLS */ 625/* VIDEO CTRL IOCTLS */
902 626
@@ -999,12 +723,12 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
999static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 723static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1000 struct v4l2_format *f) 724 struct v4l2_format *f)
1001{ 725{
1002 struct cx8800_fh *fh = priv; 726 struct cx8800_dev *dev = video_drvdata(file);
1003 struct cx8800_dev *dev = fh->dev; 727 struct cx88_core *core = dev->core;
1004 728
1005 f->fmt.pix.width = dev->width; 729 f->fmt.pix.width = core->width;
1006 f->fmt.pix.height = dev->height; 730 f->fmt.pix.height = core->height;
1007 f->fmt.pix.field = fh->vidq.field; 731 f->fmt.pix.field = core->field;
1008 f->fmt.pix.pixelformat = dev->fmt->fourcc; 732 f->fmt.pix.pixelformat = dev->fmt->fourcc;
1009 f->fmt.pix.bytesperline = 733 f->fmt.pix.bytesperline =
1010 (f->fmt.pix.width * dev->fmt->depth) >> 3; 734 (f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -1017,7 +741,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1017static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 741static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1018 struct v4l2_format *f) 742 struct v4l2_format *f)
1019{ 743{
1020 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 744 struct cx8800_dev *dev = video_drvdata(file);
745 struct cx88_core *core = dev->core;
1021 const struct cx8800_fmt *fmt; 746 const struct cx8800_fmt *fmt;
1022 enum v4l2_field field; 747 enum v4l2_field field;
1023 unsigned int maxw, maxh; 748 unsigned int maxw, maxh;
@@ -1026,30 +751,30 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1026 if (NULL == fmt) 751 if (NULL == fmt)
1027 return -EINVAL; 752 return -EINVAL;
1028 753
1029 field = f->fmt.pix.field; 754 maxw = norm_maxw(core->tvnorm);
1030 maxw = norm_maxw(core->tvnorm); 755 maxh = norm_maxh(core->tvnorm);
1031 maxh = norm_maxh(core->tvnorm);
1032 756
1033 if (V4L2_FIELD_ANY == field) { 757 field = f->fmt.pix.field;
1034 field = (f->fmt.pix.height > maxh/2)
1035 ? V4L2_FIELD_INTERLACED
1036 : V4L2_FIELD_BOTTOM;
1037 }
1038 758
1039 switch (field) { 759 switch (field) {
1040 case V4L2_FIELD_TOP: 760 case V4L2_FIELD_TOP:
1041 case V4L2_FIELD_BOTTOM: 761 case V4L2_FIELD_BOTTOM:
1042 maxh = maxh / 2;
1043 break;
1044 case V4L2_FIELD_INTERLACED: 762 case V4L2_FIELD_INTERLACED:
763 case V4L2_FIELD_SEQ_BT:
764 case V4L2_FIELD_SEQ_TB:
1045 break; 765 break;
1046 default: 766 default:
1047 return -EINVAL; 767 field = (f->fmt.pix.height > maxh / 2)
768 ? V4L2_FIELD_INTERLACED
769 : V4L2_FIELD_BOTTOM;
770 break;
1048 } 771 }
772 if (V4L2_FIELD_HAS_T_OR_B(field))
773 maxh /= 2;
1049 774
1050 f->fmt.pix.field = field;
1051 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, 775 v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
1052 &f->fmt.pix.height, 32, maxh, 0, 0); 776 &f->fmt.pix.height, 32, maxh, 0, 0);
777 f->fmt.pix.field = field;
1053 f->fmt.pix.bytesperline = 778 f->fmt.pix.bytesperline =
1054 (f->fmt.pix.width * fmt->depth) >> 3; 779 (f->fmt.pix.width * fmt->depth) >> 3;
1055 f->fmt.pix.sizeimage = 780 f->fmt.pix.sizeimage =
@@ -1061,16 +786,20 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1061static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 786static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1062 struct v4l2_format *f) 787 struct v4l2_format *f)
1063{ 788{
1064 struct cx8800_fh *fh = priv; 789 struct cx8800_dev *dev = video_drvdata(file);
1065 struct cx8800_dev *dev = fh->dev; 790 struct cx88_core *core = dev->core;
1066 int err = vidioc_try_fmt_vid_cap (file,priv,f); 791 int err = vidioc_try_fmt_vid_cap (file,priv,f);
1067 792
1068 if (0 != err) 793 if (0 != err)
1069 return err; 794 return err;
1070 dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat); 795 if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq))
1071 dev->width = f->fmt.pix.width; 796 return -EBUSY;
1072 dev->height = f->fmt.pix.height; 797 if (core->dvbdev && vb2_is_busy(&core->dvbdev->vb2_mpegq))
1073 fh->vidq.field = f->fmt.pix.field; 798 return -EBUSY;
799 dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
800 core->width = f->fmt.pix.width;
801 core->height = f->fmt.pix.height;
802 core->field = f->fmt.pix.field;
1074 return 0; 803 return 0;
1075} 804}
1076 805
@@ -1104,8 +833,8 @@ EXPORT_SYMBOL(cx88_querycap);
1104static int vidioc_querycap(struct file *file, void *priv, 833static int vidioc_querycap(struct file *file, void *priv,
1105 struct v4l2_capability *cap) 834 struct v4l2_capability *cap)
1106{ 835{
1107 struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev; 836 struct cx8800_dev *dev = video_drvdata(file);
1108 struct cx88_core *core = dev->core; 837 struct cx88_core *core = dev->core;
1109 838
1110 strcpy(cap->driver, "cx8800"); 839 strcpy(cap->driver, "cx8800");
1111 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); 840 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
@@ -1125,64 +854,10 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
1125 return 0; 854 return 0;
1126} 855}
1127 856
1128static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
1129{
1130 return videobuf_reqbufs(get_queue(file), p);
1131}
1132
1133static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
1134{
1135 return videobuf_querybuf(get_queue(file), p);
1136}
1137
1138static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
1139{
1140 return videobuf_qbuf(get_queue(file), p);
1141}
1142
1143static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
1144{
1145 return videobuf_dqbuf(get_queue(file), p,
1146 file->f_flags & O_NONBLOCK);
1147}
1148
1149static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1150{
1151 struct video_device *vdev = video_devdata(file);
1152 struct cx8800_fh *fh = priv;
1153 struct cx8800_dev *dev = fh->dev;
1154
1155 if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
1156 (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
1157 return -EINVAL;
1158
1159 if (unlikely(!res_get(dev, fh, get_resource(file))))
1160 return -EBUSY;
1161 return videobuf_streamon(get_queue(file));
1162}
1163
1164static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1165{
1166 struct video_device *vdev = video_devdata(file);
1167 struct cx8800_fh *fh = priv;
1168 struct cx8800_dev *dev = fh->dev;
1169 int err, res;
1170
1171 if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
1172 (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
1173 return -EINVAL;
1174
1175 res = get_resource(file);
1176 err = videobuf_streamoff(get_queue(file));
1177 if (err < 0)
1178 return err;
1179 res_free(dev,fh,res);
1180 return 0;
1181}
1182
1183static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm) 857static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
1184{ 858{
1185 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 859 struct cx8800_dev *dev = video_drvdata(file);
860 struct cx88_core *core = dev->core;
1186 861
1187 *tvnorm = core->tvnorm; 862 *tvnorm = core->tvnorm;
1188 return 0; 863 return 0;
@@ -1190,13 +865,10 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
1190 865
1191static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms) 866static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
1192{ 867{
1193 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 868 struct cx8800_dev *dev = video_drvdata(file);
1194 869 struct cx88_core *core = dev->core;
1195 mutex_lock(&core->lock);
1196 cx88_set_tvnorm(core, tvnorms);
1197 mutex_unlock(&core->lock);
1198 870
1199 return 0; 871 return cx88_set_tvnorm(core, tvnorms);
1200} 872}
1201 873
1202/* only one input in this sample driver */ 874/* only one input in this sample driver */
@@ -1233,13 +905,15 @@ EXPORT_SYMBOL(cx88_enum_input);
1233static int vidioc_enum_input (struct file *file, void *priv, 905static int vidioc_enum_input (struct file *file, void *priv,
1234 struct v4l2_input *i) 906 struct v4l2_input *i)
1235{ 907{
1236 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 908 struct cx8800_dev *dev = video_drvdata(file);
909 struct cx88_core *core = dev->core;
1237 return cx88_enum_input (core,i); 910 return cx88_enum_input (core,i);
1238} 911}
1239 912
1240static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) 913static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
1241{ 914{
1242 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 915 struct cx8800_dev *dev = video_drvdata(file);
916 struct cx88_core *core = dev->core;
1243 917
1244 *i = core->input; 918 *i = core->input;
1245 return 0; 919 return 0;
@@ -1247,24 +921,24 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
1247 921
1248static int vidioc_s_input (struct file *file, void *priv, unsigned int i) 922static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
1249{ 923{
1250 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 924 struct cx8800_dev *dev = video_drvdata(file);
925 struct cx88_core *core = dev->core;
1251 926
1252 if (i >= 4) 927 if (i >= 4)
1253 return -EINVAL; 928 return -EINVAL;
1254 if (0 == INPUT(i).type) 929 if (0 == INPUT(i).type)
1255 return -EINVAL; 930 return -EINVAL;
1256 931
1257 mutex_lock(&core->lock);
1258 cx88_newstation(core); 932 cx88_newstation(core);
1259 cx88_video_mux(core,i); 933 cx88_video_mux(core,i);
1260 mutex_unlock(&core->lock);
1261 return 0; 934 return 0;
1262} 935}
1263 936
1264static int vidioc_g_tuner (struct file *file, void *priv, 937static int vidioc_g_tuner (struct file *file, void *priv,
1265 struct v4l2_tuner *t) 938 struct v4l2_tuner *t)
1266{ 939{
1267 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 940 struct cx8800_dev *dev = video_drvdata(file);
941 struct cx88_core *core = dev->core;
1268 u32 reg; 942 u32 reg;
1269 943
1270 if (unlikely(UNSET == core->board.tuner_type)) 944 if (unlikely(UNSET == core->board.tuner_type))
@@ -1286,7 +960,8 @@ static int vidioc_g_tuner (struct file *file, void *priv,
1286static int vidioc_s_tuner (struct file *file, void *priv, 960static int vidioc_s_tuner (struct file *file, void *priv,
1287 const struct v4l2_tuner *t) 961 const struct v4l2_tuner *t)
1288{ 962{
1289 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 963 struct cx8800_dev *dev = video_drvdata(file);
964 struct cx88_core *core = dev->core;
1290 965
1291 if (UNSET == core->board.tuner_type) 966 if (UNSET == core->board.tuner_type)
1292 return -EINVAL; 967 return -EINVAL;
@@ -1300,8 +975,8 @@ static int vidioc_s_tuner (struct file *file, void *priv,
1300static int vidioc_g_frequency (struct file *file, void *priv, 975static int vidioc_g_frequency (struct file *file, void *priv,
1301 struct v4l2_frequency *f) 976 struct v4l2_frequency *f)
1302{ 977{
1303 struct cx8800_fh *fh = priv; 978 struct cx8800_dev *dev = video_drvdata(file);
1304 struct cx88_core *core = fh->dev->core; 979 struct cx88_core *core = dev->core;
1305 980
1306 if (unlikely(UNSET == core->board.tuner_type)) 981 if (unlikely(UNSET == core->board.tuner_type))
1307 return -EINVAL; 982 return -EINVAL;
@@ -1325,7 +1000,6 @@ int cx88_set_freq (struct cx88_core *core,
1325 if (unlikely(f->tuner != 0)) 1000 if (unlikely(f->tuner != 0))
1326 return -EINVAL; 1001 return -EINVAL;
1327 1002
1328 mutex_lock(&core->lock);
1329 cx88_newstation(core); 1003 cx88_newstation(core);
1330 call_all(core, tuner, s_frequency, f); 1004 call_all(core, tuner, s_frequency, f);
1331 call_all(core, tuner, g_frequency, &new_freq); 1005 call_all(core, tuner, g_frequency, &new_freq);
@@ -1335,8 +1009,6 @@ int cx88_set_freq (struct cx88_core *core,
1335 msleep (10); 1009 msleep (10);
1336 cx88_set_tvaudio(core); 1010 cx88_set_tvaudio(core);
1337 1011
1338 mutex_unlock(&core->lock);
1339
1340 return 0; 1012 return 0;
1341} 1013}
1342EXPORT_SYMBOL(cx88_set_freq); 1014EXPORT_SYMBOL(cx88_set_freq);
@@ -1344,8 +1016,8 @@ EXPORT_SYMBOL(cx88_set_freq);
1344static int vidioc_s_frequency (struct file *file, void *priv, 1016static int vidioc_s_frequency (struct file *file, void *priv,
1345 const struct v4l2_frequency *f) 1017 const struct v4l2_frequency *f)
1346{ 1018{
1347 struct cx8800_fh *fh = priv; 1019 struct cx8800_dev *dev = video_drvdata(file);
1348 struct cx88_core *core = fh->dev->core; 1020 struct cx88_core *core = dev->core;
1349 1021
1350 return cx88_set_freq(core, f); 1022 return cx88_set_freq(core, f);
1351} 1023}
@@ -1354,7 +1026,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
1354static int vidioc_g_register (struct file *file, void *fh, 1026static int vidioc_g_register (struct file *file, void *fh,
1355 struct v4l2_dbg_register *reg) 1027 struct v4l2_dbg_register *reg)
1356{ 1028{
1357 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; 1029 struct cx8800_dev *dev = video_drvdata(file);
1030 struct cx88_core *core = dev->core;
1358 1031
1359 /* cx2388x has a 24-bit register space */ 1032 /* cx2388x has a 24-bit register space */
1360 reg->val = cx_read(reg->reg & 0xfffffc); 1033 reg->val = cx_read(reg->reg & 0xfffffc);
@@ -1365,7 +1038,8 @@ static int vidioc_g_register (struct file *file, void *fh,
1365static int vidioc_s_register (struct file *file, void *fh, 1038static int vidioc_s_register (struct file *file, void *fh,
1366 const struct v4l2_dbg_register *reg) 1039 const struct v4l2_dbg_register *reg)
1367{ 1040{
1368 struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core; 1041 struct cx8800_dev *dev = video_drvdata(file);
1042 struct cx88_core *core = dev->core;
1369 1043
1370 cx_write(reg->reg & 0xfffffc, reg->val); 1044 cx_write(reg->reg & 0xfffffc, reg->val);
1371 return 0; 1045 return 0;
@@ -1379,7 +1053,8 @@ static int vidioc_s_register (struct file *file, void *fh,
1379static int radio_g_tuner (struct file *file, void *priv, 1053static int radio_g_tuner (struct file *file, void *priv,
1380 struct v4l2_tuner *t) 1054 struct v4l2_tuner *t)
1381{ 1055{
1382 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 1056 struct cx8800_dev *dev = video_drvdata(file);
1057 struct cx88_core *core = dev->core;
1383 1058
1384 if (unlikely(t->index > 0)) 1059 if (unlikely(t->index > 0))
1385 return -EINVAL; 1060 return -EINVAL;
@@ -1393,7 +1068,8 @@ static int radio_g_tuner (struct file *file, void *priv,
1393static int radio_s_tuner (struct file *file, void *priv, 1068static int radio_s_tuner (struct file *file, void *priv,
1394 const struct v4l2_tuner *t) 1069 const struct v4l2_tuner *t)
1395{ 1070{
1396 struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core; 1071 struct cx8800_dev *dev = video_drvdata(file);
1072 struct cx88_core *core = dev->core;
1397 1073
1398 if (0 != t->index) 1074 if (0 != t->index)
1399 return -EINVAL; 1075 return -EINVAL;
@@ -1404,32 +1080,6 @@ static int radio_s_tuner (struct file *file, void *priv,
1404 1080
1405/* ----------------------------------------------------------- */ 1081/* ----------------------------------------------------------- */
1406 1082
1407static void cx8800_vid_timeout(unsigned long data)
1408{
1409 struct cx8800_dev *dev = (struct cx8800_dev*)data;
1410 struct cx88_core *core = dev->core;
1411 struct cx88_dmaqueue *q = &dev->vidq;
1412 struct cx88_buffer *buf;
1413 unsigned long flags;
1414
1415 cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
1416
1417 cx_clear(MO_VID_DMACNTRL, 0x11);
1418 cx_clear(VID_CAPTURE_CONTROL, 0x06);
1419
1420 spin_lock_irqsave(&dev->slock,flags);
1421 while (!list_empty(&q->active)) {
1422 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
1423 list_del(&buf->vb.queue);
1424 buf->vb.state = VIDEOBUF_ERROR;
1425 wake_up(&buf->vb.done);
1426 printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", core->name,
1427 buf, buf->vb.i, (unsigned long)buf->risc.dma);
1428 }
1429 restart_video_queue(dev,q);
1430 spin_unlock_irqrestore(&dev->slock,flags);
1431}
1432
1433static const char *cx88_vid_irqs[32] = { 1083static const char *cx88_vid_irqs[32] = {
1434 "y_risci1", "u_risci1", "v_risci1", "vbi_risc1", 1084 "y_risci1", "u_risci1", "v_risci1", "vbi_risc1",
1435 "y_risci2", "u_risci2", "v_risci2", "vbi_risc2", 1085 "y_risci2", "u_risci2", "v_risci2", "vbi_risc2",
@@ -1476,22 +1126,6 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
1476 cx88_wakeup(core, &dev->vbiq, count); 1126 cx88_wakeup(core, &dev->vbiq, count);
1477 spin_unlock(&dev->slock); 1127 spin_unlock(&dev->slock);
1478 } 1128 }
1479
1480 /* risc2 y */
1481 if (status & 0x10) {
1482 dprintk(2,"stopper video\n");
1483 spin_lock(&dev->slock);
1484 restart_video_queue(dev,&dev->vidq);
1485 spin_unlock(&dev->slock);
1486 }
1487
1488 /* risc2 vbi */
1489 if (status & 0x80) {
1490 dprintk(2,"stopper vbi\n");
1491 spin_lock(&dev->slock);
1492 cx8800_restart_vbi_queue(dev,&dev->vbiq);
1493 spin_unlock(&dev->slock);
1494 }
1495} 1129}
1496 1130
1497static irqreturn_t cx8800_irq(int irq, void *dev_id) 1131static irqreturn_t cx8800_irq(int irq, void *dev_id)
@@ -1530,11 +1164,11 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
1530static const struct v4l2_file_operations video_fops = 1164static const struct v4l2_file_operations video_fops =
1531{ 1165{
1532 .owner = THIS_MODULE, 1166 .owner = THIS_MODULE,
1533 .open = video_open, 1167 .open = v4l2_fh_open,
1534 .release = video_release, 1168 .release = vb2_fop_release,
1535 .read = video_read, 1169 .read = vb2_fop_read,
1536 .poll = video_poll, 1170 .poll = vb2_fop_poll,
1537 .mmap = video_mmap, 1171 .mmap = vb2_fop_mmap,
1538 .unlocked_ioctl = video_ioctl2, 1172 .unlocked_ioctl = video_ioctl2,
1539}; 1173};
1540 1174
@@ -1544,17 +1178,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
1544 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, 1178 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
1545 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, 1179 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
1546 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, 1180 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
1547 .vidioc_reqbufs = vidioc_reqbufs, 1181 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1548 .vidioc_querybuf = vidioc_querybuf, 1182 .vidioc_querybuf = vb2_ioctl_querybuf,
1549 .vidioc_qbuf = vidioc_qbuf, 1183 .vidioc_qbuf = vb2_ioctl_qbuf,
1550 .vidioc_dqbuf = vidioc_dqbuf, 1184 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1551 .vidioc_g_std = vidioc_g_std, 1185 .vidioc_g_std = vidioc_g_std,
1552 .vidioc_s_std = vidioc_s_std, 1186 .vidioc_s_std = vidioc_s_std,
1553 .vidioc_enum_input = vidioc_enum_input, 1187 .vidioc_enum_input = vidioc_enum_input,
1554 .vidioc_g_input = vidioc_g_input, 1188 .vidioc_g_input = vidioc_g_input,
1555 .vidioc_s_input = vidioc_s_input, 1189 .vidioc_s_input = vidioc_s_input,
1556 .vidioc_streamon = vidioc_streamon, 1190 .vidioc_streamon = vb2_ioctl_streamon,
1557 .vidioc_streamoff = vidioc_streamoff, 1191 .vidioc_streamoff = vb2_ioctl_streamoff,
1558 .vidioc_g_tuner = vidioc_g_tuner, 1192 .vidioc_g_tuner = vidioc_g_tuner,
1559 .vidioc_s_tuner = vidioc_s_tuner, 1193 .vidioc_s_tuner = vidioc_s_tuner,
1560 .vidioc_g_frequency = vidioc_g_frequency, 1194 .vidioc_g_frequency = vidioc_g_frequency,
@@ -1579,17 +1213,17 @@ static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
1579 .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt, 1213 .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt,
1580 .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt, 1214 .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt,
1581 .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt, 1215 .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt,
1582 .vidioc_reqbufs = vidioc_reqbufs, 1216 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1583 .vidioc_querybuf = vidioc_querybuf, 1217 .vidioc_querybuf = vb2_ioctl_querybuf,
1584 .vidioc_qbuf = vidioc_qbuf, 1218 .vidioc_qbuf = vb2_ioctl_qbuf,
1585 .vidioc_dqbuf = vidioc_dqbuf, 1219 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1586 .vidioc_g_std = vidioc_g_std, 1220 .vidioc_g_std = vidioc_g_std,
1587 .vidioc_s_std = vidioc_s_std, 1221 .vidioc_s_std = vidioc_s_std,
1588 .vidioc_enum_input = vidioc_enum_input, 1222 .vidioc_enum_input = vidioc_enum_input,
1589 .vidioc_g_input = vidioc_g_input, 1223 .vidioc_g_input = vidioc_g_input,
1590 .vidioc_s_input = vidioc_s_input, 1224 .vidioc_s_input = vidioc_s_input,
1591 .vidioc_streamon = vidioc_streamon, 1225 .vidioc_streamon = vb2_ioctl_streamon,
1592 .vidioc_streamoff = vidioc_streamoff, 1226 .vidioc_streamoff = vb2_ioctl_streamoff,
1593 .vidioc_g_tuner = vidioc_g_tuner, 1227 .vidioc_g_tuner = vidioc_g_tuner,
1594 .vidioc_s_tuner = vidioc_s_tuner, 1228 .vidioc_s_tuner = vidioc_s_tuner,
1595 .vidioc_g_frequency = vidioc_g_frequency, 1229 .vidioc_g_frequency = vidioc_g_frequency,
@@ -1610,9 +1244,9 @@ static const struct video_device cx8800_vbi_template = {
1610static const struct v4l2_file_operations radio_fops = 1244static const struct v4l2_file_operations radio_fops =
1611{ 1245{
1612 .owner = THIS_MODULE, 1246 .owner = THIS_MODULE,
1613 .open = video_open, 1247 .open = radio_open,
1614 .poll = v4l2_ctrl_poll, 1248 .poll = v4l2_ctrl_poll,
1615 .release = video_release, 1249 .release = v4l2_fh_release,
1616 .unlocked_ioctl = video_ioctl2, 1250 .unlocked_ioctl = video_ioctl2,
1617}; 1251};
1618 1252
@@ -1676,6 +1310,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1676{ 1310{
1677 struct cx8800_dev *dev; 1311 struct cx8800_dev *dev;
1678 struct cx88_core *core; 1312 struct cx88_core *core;
1313 struct vb2_queue *q;
1679 int err; 1314 int err;
1680 int i; 1315 int i;
1681 1316
@@ -1713,25 +1348,12 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1713 1348
1714 /* initialize driver struct */ 1349 /* initialize driver struct */
1715 spin_lock_init(&dev->slock); 1350 spin_lock_init(&dev->slock);
1716 core->tvnorm = V4L2_STD_NTSC_M;
1717 1351
1718 /* init video dma queues */ 1352 /* init video dma queues */
1719 INIT_LIST_HEAD(&dev->vidq.active); 1353 INIT_LIST_HEAD(&dev->vidq.active);
1720 INIT_LIST_HEAD(&dev->vidq.queued);
1721 dev->vidq.timeout.function = cx8800_vid_timeout;
1722 dev->vidq.timeout.data = (unsigned long)dev;
1723 init_timer(&dev->vidq.timeout);
1724 cx88_risc_stopper(dev->pci,&dev->vidq.stopper,
1725 MO_VID_DMACNTRL,0x11,0x00);
1726 1354
1727 /* init vbi dma queues */ 1355 /* init vbi dma queues */
1728 INIT_LIST_HEAD(&dev->vbiq.active); 1356 INIT_LIST_HEAD(&dev->vbiq.active);
1729 INIT_LIST_HEAD(&dev->vbiq.queued);
1730 dev->vbiq.timeout.function = cx8800_vbi_timeout;
1731 dev->vbiq.timeout.data = (unsigned long)dev;
1732 init_timer(&dev->vbiq.timeout);
1733 cx88_risc_stopper(dev->pci,&dev->vbiq.stopper,
1734 MO_VID_DMACNTRL,0x88,0x00);
1735 1357
1736 /* get irq */ 1358 /* get irq */
1737 err = request_irq(pci_dev->irq, cx8800_irq, 1359 err = request_irq(pci_dev->irq, cx8800_irq,
@@ -1820,9 +1442,10 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1820 /* Sets device info at pci_dev */ 1442 /* Sets device info at pci_dev */
1821 pci_set_drvdata(pci_dev, dev); 1443 pci_set_drvdata(pci_dev, dev);
1822 1444
1823 dev->width = 320; 1445 dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
1824 dev->height = 240; 1446
1825 dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24); 1447 /* Maintain a reference so cx88-blackbird can query the 8800 device. */
1448 core->v4ldev = dev;
1826 1449
1827 /* initial device configuration */ 1450 /* initial device configuration */
1828 mutex_lock(&core->lock); 1451 mutex_lock(&core->lock);
@@ -1831,11 +1454,44 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1831 v4l2_ctrl_handler_setup(&core->audio_hdl); 1454 v4l2_ctrl_handler_setup(&core->audio_hdl);
1832 cx88_video_mux(core, 0); 1455 cx88_video_mux(core, 0);
1833 1456
1457 q = &dev->vb2_vidq;
1458 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1459 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
1460 q->gfp_flags = GFP_DMA32;
1461 q->min_buffers_needed = 2;
1462 q->drv_priv = dev;
1463 q->buf_struct_size = sizeof(struct cx88_buffer);
1464 q->ops = &cx8800_video_qops;
1465 q->mem_ops = &vb2_dma_sg_memops;
1466 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1467 q->lock = &core->lock;
1468
1469 err = vb2_queue_init(q);
1470 if (err < 0)
1471 goto fail_unreg;
1472
1473 q = &dev->vb2_vbiq;
1474 q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
1475 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
1476 q->gfp_flags = GFP_DMA32;
1477 q->min_buffers_needed = 2;
1478 q->drv_priv = dev;
1479 q->buf_struct_size = sizeof(struct cx88_buffer);
1480 q->ops = &cx8800_vbi_qops;
1481 q->mem_ops = &vb2_dma_sg_memops;
1482 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1483 q->lock = &core->lock;
1484
1485 err = vb2_queue_init(q);
1486 if (err < 0)
1487 goto fail_unreg;
1488
1834 /* register v4l devices */ 1489 /* register v4l devices */
1835 dev->video_dev = cx88_vdev_init(core,dev->pci, 1490 dev->video_dev = cx88_vdev_init(core,dev->pci,
1836 &cx8800_video_template,"video"); 1491 &cx8800_video_template,"video");
1837 video_set_drvdata(dev->video_dev, dev); 1492 video_set_drvdata(dev->video_dev, dev);
1838 dev->video_dev->ctrl_handler = &core->video_hdl; 1493 dev->video_dev->ctrl_handler = &core->video_hdl;
1494 dev->video_dev->queue = &dev->vb2_vidq;
1839 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, 1495 err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
1840 video_nr[core->nr]); 1496 video_nr[core->nr]);
1841 if (err < 0) { 1497 if (err < 0) {
@@ -1848,6 +1504,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1848 1504
1849 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); 1505 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
1850 video_set_drvdata(dev->vbi_dev, dev); 1506 video_set_drvdata(dev->vbi_dev, dev);
1507 dev->vbi_dev->queue = &dev->vb2_vbiq;
1851 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, 1508 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
1852 vbi_nr[core->nr]); 1509 vbi_nr[core->nr]);
1853 if (err < 0) { 1510 if (err < 0) {
@@ -1875,7 +1532,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1875 } 1532 }
1876 1533
1877 /* start tvaudio thread */ 1534 /* start tvaudio thread */
1878 if (core->board.tuner_type != TUNER_ABSENT) { 1535 if (core->board.tuner_type != UNSET) {
1879 core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio"); 1536 core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
1880 if (IS_ERR(core->kthread)) { 1537 if (IS_ERR(core->kthread)) {
1881 err = PTR_ERR(core->kthread); 1538 err = PTR_ERR(core->kthread);
@@ -1892,6 +1549,7 @@ fail_unreg:
1892 free_irq(pci_dev->irq, dev); 1549 free_irq(pci_dev->irq, dev);
1893 mutex_unlock(&core->lock); 1550 mutex_unlock(&core->lock);
1894fail_core: 1551fail_core:
1552 core->v4ldev = NULL;
1895 cx88_core_put(core,dev->pci); 1553 cx88_core_put(core,dev->pci);
1896fail_free: 1554fail_free:
1897 kfree(dev); 1555 kfree(dev);
@@ -1913,15 +1571,16 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
1913 cx88_ir_stop(core); 1571 cx88_ir_stop(core);
1914 1572
1915 cx88_shutdown(core); /* FIXME */ 1573 cx88_shutdown(core); /* FIXME */
1916 pci_disable_device(pci_dev);
1917 1574
1918 /* unregister stuff */ 1575 /* unregister stuff */
1919 1576
1920 free_irq(pci_dev->irq, dev); 1577 free_irq(pci_dev->irq, dev);
1921 cx8800_unregister_video(dev); 1578 cx8800_unregister_video(dev);
1579 pci_disable_device(pci_dev);
1580
1581 core->v4ldev = NULL;
1922 1582
1923 /* free memory */ 1583 /* free memory */
1924 btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
1925 cx88_core_put(core,dev->pci); 1584 cx88_core_put(core,dev->pci);
1926 kfree(dev); 1585 kfree(dev);
1927} 1586}
@@ -1938,12 +1597,10 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
1938 if (!list_empty(&dev->vidq.active)) { 1597 if (!list_empty(&dev->vidq.active)) {
1939 printk("%s/0: suspend video\n", core->name); 1598 printk("%s/0: suspend video\n", core->name);
1940 stop_video_dma(dev); 1599 stop_video_dma(dev);
1941 del_timer(&dev->vidq.timeout);
1942 } 1600 }
1943 if (!list_empty(&dev->vbiq.active)) { 1601 if (!list_empty(&dev->vbiq.active)) {
1944 printk("%s/0: suspend vbi\n", core->name); 1602 printk("%s/0: suspend vbi\n", core->name);
1945 cx8800_stop_vbi_dma(dev); 1603 cx8800_stop_vbi_dma(dev);
1946 del_timer(&dev->vbiq.timeout);
1947 } 1604 }
1948 spin_unlock_irqrestore(&dev->slock, flags); 1605 spin_unlock_irqrestore(&dev->slock, flags);
1949 1606
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 28893a6b249e..3b0ae754f165 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -29,19 +29,18 @@
29#include <media/v4l2-fh.h> 29#include <media/v4l2-fh.h>
30#include <media/tuner.h> 30#include <media/tuner.h>
31#include <media/tveeprom.h> 31#include <media/tveeprom.h>
32#include <media/videobuf-dma-sg.h> 32#include <media/videobuf2-dma-sg.h>
33#include <media/cx2341x.h> 33#include <media/cx2341x.h>
34#include <media/videobuf-dvb.h> 34#include <media/videobuf2-dvb.h>
35#include <media/ir-kbd-i2c.h> 35#include <media/ir-kbd-i2c.h>
36#include <media/wm8775.h> 36#include <media/wm8775.h>
37 37
38#include "btcx-risc.h"
39#include "cx88-reg.h" 38#include "cx88-reg.h"
40#include "tuner-xc2028.h" 39#include "tuner-xc2028.h"
41 40
42#include <linux/mutex.h> 41#include <linux/mutex.h>
43 42
44#define CX88_VERSION "0.0.9" 43#define CX88_VERSION "1.0.0"
45 44
46#define UNSET (-1U) 45#define UNSET (-1U)
47 46
@@ -62,7 +61,8 @@
62#define FORMAT_FLAGS_PACKED 0x01 61#define FORMAT_FLAGS_PACKED 0x01
63#define FORMAT_FLAGS_PLANAR 0x02 62#define FORMAT_FLAGS_PLANAR 0x02
64 63
65#define VBI_LINE_COUNT 17 64#define VBI_LINE_PAL_COUNT 18
65#define VBI_LINE_NTSC_COUNT 12
66#define VBI_LINE_LENGTH 2048 66#define VBI_LINE_LENGTH 2048
67 67
68#define AUD_RDS_LINES 4 68#define AUD_RDS_LINES 4
@@ -95,13 +95,13 @@ enum cx8802_board_access {
95 95
96static inline unsigned int norm_maxw(v4l2_std_id norm) 96static inline unsigned int norm_maxw(v4l2_std_id norm)
97{ 97{
98 return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768; 98 return 720;
99} 99}
100 100
101 101
102static inline unsigned int norm_maxh(v4l2_std_id norm) 102static inline unsigned int norm_maxh(v4l2_std_id norm)
103{ 103{
104 return (norm & V4L2_STD_625_50) ? 576 : 480; 104 return (norm & V4L2_STD_525_60) ? 480 : 576;
105} 105}
106 106
107/* ----------------------------------------------------------- */ 107/* ----------------------------------------------------------- */
@@ -311,26 +311,33 @@ enum cx88_tvaudio {
311 311
312#define BUFFER_TIMEOUT msecs_to_jiffies(2000) 312#define BUFFER_TIMEOUT msecs_to_jiffies(2000)
313 313
314struct cx88_riscmem {
315 unsigned int size;
316 __le32 *cpu;
317 __le32 *jmp;
318 dma_addr_t dma;
319};
320
314/* buffer for one video frame */ 321/* buffer for one video frame */
315struct cx88_buffer { 322struct cx88_buffer {
316 /* common v4l buffer stuff -- must be first */ 323 /* common v4l buffer stuff -- must be first */
317 struct videobuf_buffer vb; 324 struct vb2_buffer vb;
325 struct list_head list;
318 326
319 /* cx88 specific */ 327 /* cx88 specific */
320 unsigned int bpl; 328 unsigned int bpl;
321 struct btcx_riscmem risc; 329 struct cx88_riscmem risc;
322 const struct cx8800_fmt *fmt;
323 u32 count; 330 u32 count;
324}; 331};
325 332
326struct cx88_dmaqueue { 333struct cx88_dmaqueue {
327 struct list_head active; 334 struct list_head active;
328 struct list_head queued;
329 struct timer_list timeout;
330 struct btcx_riscmem stopper;
331 u32 count; 335 u32 count;
332}; 336};
333 337
338struct cx8800_dev;
339struct cx8802_dev;
340
334struct cx88_core { 341struct cx88_core {
335 struct list_head devlist; 342 struct list_head devlist;
336 atomic_t refcount; 343 atomic_t refcount;
@@ -376,6 +383,8 @@ struct cx88_core {
376 /* state info */ 383 /* state info */
377 struct task_struct *kthread; 384 struct task_struct *kthread;
378 v4l2_std_id tvnorm; 385 v4l2_std_id tvnorm;
386 unsigned width, height;
387 unsigned field;
379 enum cx88_tvaudio tvaudio; 388 enum cx88_tvaudio tvaudio;
380 u32 audiomode_manual; 389 u32 audiomode_manual;
381 u32 audiomode_current; 390 u32 audiomode_current;
@@ -395,11 +404,14 @@ struct cx88_core {
395 struct mutex lock; 404 struct mutex lock;
396 /* various v4l controls */ 405 /* various v4l controls */
397 u32 freq; 406 u32 freq;
398 int users;
399 int mpeg_users;
400 407
401 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */ 408 /*
409 * cx88-video needs to access cx8802 for hybrid tuner pll access and
410 * for vb2_is_busy() checks.
411 */
402 struct cx8802_dev *dvbdev; 412 struct cx8802_dev *dvbdev;
413 /* cx88-blackbird needs to access cx8800 for vb2_is_busy() checks */
414 struct cx8800_dev *v4ldev;
403 enum cx88_board_type active_type_id; 415 enum cx88_board_type active_type_id;
404 int active_ref; 416 int active_ref;
405 int active_fe_id; 417 int active_fe_id;
@@ -453,24 +465,9 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
453 val; \ 465 val; \
454 }) 466 })
455 467
456struct cx8800_dev;
457struct cx8802_dev;
458
459/* ----------------------------------------------------------- */ 468/* ----------------------------------------------------------- */
460/* function 0: video stuff */ 469/* function 0: video stuff */
461 470
462struct cx8800_fh {
463 struct v4l2_fh fh;
464 struct cx8800_dev *dev;
465 unsigned int resources;
466
467 /* video capture */
468 struct videobuf_queue vidq;
469
470 /* vbi capture */
471 struct videobuf_queue vbiq;
472};
473
474struct cx8800_suspend_state { 471struct cx8800_suspend_state {
475 int disabled; 472 int disabled;
476}; 473};
@@ -490,11 +487,12 @@ struct cx8800_dev {
490 unsigned char pci_rev,pci_lat; 487 unsigned char pci_rev,pci_lat;
491 488
492 const struct cx8800_fmt *fmt; 489 const struct cx8800_fmt *fmt;
493 unsigned int width, height;
494 490
495 /* capture queues */ 491 /* capture queues */
496 struct cx88_dmaqueue vidq; 492 struct cx88_dmaqueue vidq;
493 struct vb2_queue vb2_vidq;
497 struct cx88_dmaqueue vbiq; 494 struct cx88_dmaqueue vbiq;
495 struct vb2_queue vb2_vbiq;
498 496
499 /* various v4l controls */ 497 /* various v4l controls */
500 498
@@ -510,12 +508,6 @@ struct cx8800_dev {
510/* ----------------------------------------------------------- */ 508/* ----------------------------------------------------------- */
511/* function 2: mpeg stuff */ 509/* function 2: mpeg stuff */
512 510
513struct cx8802_fh {
514 struct v4l2_fh fh;
515 struct cx8802_dev *dev;
516 struct videobuf_queue mpegq;
517};
518
519struct cx8802_suspend_state { 511struct cx8802_suspend_state {
520 int disabled; 512 int disabled;
521}; 513};
@@ -559,6 +551,7 @@ struct cx8802_dev {
559 551
560 /* dma queues */ 552 /* dma queues */
561 struct cx88_dmaqueue mpegq; 553 struct cx88_dmaqueue mpegq;
554 struct vb2_queue vb2_mpegq;
562 u32 ts_packet_size; 555 u32 ts_packet_size;
563 u32 ts_packet_count; 556 u32 ts_packet_count;
564 557
@@ -570,9 +563,6 @@ struct cx8802_dev {
570#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD) 563#if IS_ENABLED(CONFIG_VIDEO_CX88_BLACKBIRD)
571 struct video_device *mpeg_dev; 564 struct video_device *mpeg_dev;
572 u32 mailbox; 565 u32 mailbox;
573 int width;
574 int height;
575 unsigned char mpeg_active; /* nonzero if mpeg encoder is active */
576 566
577 /* mpeg params */ 567 /* mpeg params */
578 struct cx2341x_handler cxhdl; 568 struct cx2341x_handler cxhdl;
@@ -580,7 +570,7 @@ struct cx8802_dev {
580 570
581#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB) 571#if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
582 /* for dvb only */ 572 /* for dvb only */
583 struct videobuf_dvb_frontends frontends; 573 struct vb2_dvb_frontends frontends;
584#endif 574#endif
585 575
586#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054) 576#if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
@@ -634,22 +624,17 @@ extern void cx88_shutdown(struct cx88_core *core);
634extern int cx88_reset(struct cx88_core *core); 624extern int cx88_reset(struct cx88_core *core);
635 625
636extern int 626extern int
637cx88_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc, 627cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
638 struct scatterlist *sglist, 628 struct scatterlist *sglist,
639 unsigned int top_offset, unsigned int bottom_offset, 629 unsigned int top_offset, unsigned int bottom_offset,
640 unsigned int bpl, unsigned int padding, unsigned int lines); 630 unsigned int bpl, unsigned int padding, unsigned int lines);
641extern int 631extern int
642cx88_risc_databuffer(struct pci_dev *pci, struct btcx_riscmem *risc, 632cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
643 struct scatterlist *sglist, unsigned int bpl, 633 struct scatterlist *sglist, unsigned int bpl,
644 unsigned int lines, unsigned int lpi); 634 unsigned int lines, unsigned int lpi);
645extern int
646cx88_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
647 u32 reg, u32 mask, u32 value);
648extern void
649cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf);
650 635
651extern void cx88_risc_disasm(struct cx88_core *core, 636extern void cx88_risc_disasm(struct cx88_core *core,
652 struct btcx_riscmem *risc); 637 struct cx88_riscmem *risc);
653extern int cx88_sram_channel_setup(struct cx88_core *core, 638extern int cx88_sram_channel_setup(struct cx88_core *core,
654 const struct sram_channel *ch, 639 const struct sram_channel *ch,
655 unsigned int bpl, u32 risc); 640 unsigned int bpl, u32 risc);
@@ -664,7 +649,7 @@ extern struct video_device *cx88_vdev_init(struct cx88_core *core,
664 struct pci_dev *pci, 649 struct pci_dev *pci,
665 const struct video_device *template_, 650 const struct video_device *template_,
666 const char *type); 651 const char *type);
667extern struct cx88_core* cx88_core_get(struct pci_dev *pci); 652extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
668extern void cx88_core_put(struct cx88_core *core, 653extern void cx88_core_put(struct cx88_core *core,
669 struct pci_dev *pci); 654 struct pci_dev *pci);
670 655
@@ -684,12 +669,10 @@ int cx8800_start_vbi_dma(struct cx8800_dev *dev,
684 struct cx88_dmaqueue *q, 669 struct cx88_dmaqueue *q,
685 struct cx88_buffer *buf); 670 struct cx88_buffer *buf);
686*/ 671*/
687int cx8800_stop_vbi_dma(struct cx8800_dev *dev); 672void cx8800_stop_vbi_dma(struct cx8800_dev *dev);
688int cx8800_restart_vbi_queue(struct cx8800_dev *dev, 673int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
689 struct cx88_dmaqueue *q);
690void cx8800_vbi_timeout(unsigned long data);
691 674
692extern const struct videobuf_queue_ops cx8800_vbi_qops; 675extern const struct vb2_ops cx8800_vbi_qops;
693 676
694/* ----------------------------------------------------------- */ 677/* ----------------------------------------------------------- */
695/* cx88-i2c.c */ 678/* cx88-i2c.c */
@@ -739,14 +722,17 @@ extern void cx88_i2c_init_ir(struct cx88_core *core);
739/* ----------------------------------------------------------- */ 722/* ----------------------------------------------------------- */
740/* cx88-mpeg.c */ 723/* cx88-mpeg.c */
741 724
742int cx8802_buf_prepare(struct videobuf_queue *q,struct cx8802_dev *dev, 725int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
743 struct cx88_buffer *buf, enum v4l2_field field); 726 struct cx88_buffer *buf);
744void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf); 727void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf);
745void cx8802_cancel_buffers(struct cx8802_dev *dev); 728void cx8802_cancel_buffers(struct cx8802_dev *dev);
729int cx8802_start_dma(struct cx8802_dev *dev,
730 struct cx88_dmaqueue *q,
731 struct cx88_buffer *buf);
746 732
747/* ----------------------------------------------------------- */ 733/* ----------------------------------------------------------- */
748/* cx88-video.c*/ 734/* cx88-video.c*/
749int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i); 735int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i);
750int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f); 736int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f);
751int cx88_video_mux(struct cx88_core *core, unsigned int input); 737int cx88_video_mux(struct cx88_core *core, unsigned int input);
752void cx88_querycap(struct file *file, struct cx88_core *core, 738void cx88_querycap(struct file *file, struct cx88_core *core,
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index c82e855a0814..9e3492e20766 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1118,8 +1118,7 @@ static void ddb_ports_detach(struct ddb *dev)
1118 dvb_input_detach(port->input[1]); 1118 dvb_input_detach(port->input[1]);
1119 break; 1119 break;
1120 case DDB_PORT_CI: 1120 case DDB_PORT_CI:
1121 if (port->output->dev) 1121 dvb_unregister_device(port->output->dev);
1122 dvb_unregister_device(port->output->dev);
1123 if (port->en) { 1122 if (port->en) {
1124 ddb_input_stop(port->input[0]); 1123 ddb_input_stop(port->input[0]);
1125 ddb_output_stop(port->output); 1124 ddb_output_stop(port->output);
diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c
index 2b0ab26e11e8..ccf548c255f1 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.c
+++ b/drivers/media/pci/ivtv/ivtv-controls.c
@@ -69,7 +69,7 @@ static int ivtv_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
69 /* fix videodecoder resolution */ 69 /* fix videodecoder resolution */
70 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1); 70 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
71 fmt.height = cxhdl->height; 71 fmt.height = cxhdl->height;
72 fmt.code = V4L2_MBUS_FMT_FIXED; 72 fmt.code = MEDIA_BUS_FMT_FIXED;
73 v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &fmt); 73 v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &fmt);
74 return 0; 74 return 0;
75} 75}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 3e0cb77d5930..4d8ee18c3feb 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -595,7 +595,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
595 fmt->fmt.pix.width /= 2; 595 fmt->fmt.pix.width /= 2;
596 mbus_fmt.width = fmt->fmt.pix.width; 596 mbus_fmt.width = fmt->fmt.pix.width;
597 mbus_fmt.height = h; 597 mbus_fmt.height = h;
598 mbus_fmt.code = V4L2_MBUS_FMT_FIXED; 598 mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
599 v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt); 599 v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt);
600 return ivtv_g_fmt_vid_cap(file, fh, fmt); 600 return ivtv_g_fmt_vid_cap(file, fh, fmt);
601} 601}
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 7338cb2d0a38..bee2329e0b2e 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -76,7 +76,7 @@ void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32
76 int i; 76 int i;
77 struct scatterlist *sg; 77 struct scatterlist *sg;
78 78
79 for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) { 79 for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) {
80 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg)); 80 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
81 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg)); 81 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
82 dma->SGarray[i].dst = cpu_to_le32(buffer_offset); 82 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index aeae54708811..9d9f90cb7740 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1031,9 +1031,6 @@ static int vidioc_querycap(struct file *file, void *fh,
1031 strcpy(cap->card, "meye"); 1031 strcpy(cap->card, "meye");
1032 sprintf(cap->bus_info, "PCI:%s", pci_name(meye.mchip_dev)); 1032 sprintf(cap->bus_info, "PCI:%s", pci_name(meye.mchip_dev));
1033 1033
1034 cap->version = (MEYE_DRIVER_MAJORVERSION << 8) +
1035 MEYE_DRIVER_MINORVERSION;
1036
1037 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | 1034 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
1038 V4L2_CAP_STREAMING; 1035 V4L2_CAP_STREAMING;
1039 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; 1036 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index db887b0c37b1..acc35b42e53c 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -109,9 +109,6 @@ struct pt1_adapter {
109 int sleep; 109 int sleep;
110}; 110};
111 111
112#define pt1_printk(level, pt1, format, arg...) \
113 dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
114
115static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data) 112static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
116{ 113{
117 writel(data, pt1->regs + reg * 4); 114 writel(data, pt1->regs + reg * 4);
@@ -154,7 +151,7 @@ static int pt1_sync(struct pt1 *pt1)
154 return 0; 151 return 0;
155 pt1_write_reg(pt1, 0, 0x00000008); 152 pt1_write_reg(pt1, 0, 0x00000008);
156 } 153 }
157 pt1_printk(KERN_ERR, pt1, "could not sync\n"); 154 dev_err(&pt1->pdev->dev, "could not sync\n");
158 return -EIO; 155 return -EIO;
159} 156}
160 157
@@ -179,7 +176,7 @@ static int pt1_unlock(struct pt1 *pt1)
179 return 0; 176 return 0;
180 schedule_timeout_uninterruptible((HZ + 999) / 1000); 177 schedule_timeout_uninterruptible((HZ + 999) / 1000);
181 } 178 }
182 pt1_printk(KERN_ERR, pt1, "could not unlock\n"); 179 dev_err(&pt1->pdev->dev, "could not unlock\n");
183 return -EIO; 180 return -EIO;
184} 181}
185 182
@@ -193,7 +190,7 @@ static int pt1_reset_pci(struct pt1 *pt1)
193 return 0; 190 return 0;
194 schedule_timeout_uninterruptible((HZ + 999) / 1000); 191 schedule_timeout_uninterruptible((HZ + 999) / 1000);
195 } 192 }
196 pt1_printk(KERN_ERR, pt1, "could not reset PCI\n"); 193 dev_err(&pt1->pdev->dev, "could not reset PCI\n");
197 return -EIO; 194 return -EIO;
198} 195}
199 196
@@ -207,7 +204,7 @@ static int pt1_reset_ram(struct pt1 *pt1)
207 return 0; 204 return 0;
208 schedule_timeout_uninterruptible((HZ + 999) / 1000); 205 schedule_timeout_uninterruptible((HZ + 999) / 1000);
209 } 206 }
210 pt1_printk(KERN_ERR, pt1, "could not reset RAM\n"); 207 dev_err(&pt1->pdev->dev, "could not reset RAM\n");
211 return -EIO; 208 return -EIO;
212} 209}
213 210
@@ -224,7 +221,7 @@ static int pt1_do_enable_ram(struct pt1 *pt1)
224 } 221 }
225 schedule_timeout_uninterruptible((HZ + 999) / 1000); 222 schedule_timeout_uninterruptible((HZ + 999) / 1000);
226 } 223 }
227 pt1_printk(KERN_ERR, pt1, "could not enable RAM\n"); 224 dev_err(&pt1->pdev->dev, "could not enable RAM\n");
228 return -EIO; 225 return -EIO;
229} 226}
230 227
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 1fdeac11501a..7a37e8fe2ee2 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -255,7 +255,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
255 pt3_i2c_reset(pt3); 255 pt3_i2c_reset(pt3);
256 ret = pt3_init_all_demods(pt3); 256 ret = pt3_init_all_demods(pt3);
257 if (ret < 0) { 257 if (ret < 0) {
258 dev_warn(&pt3->pdev->dev, "Failed to init demod chips."); 258 dev_warn(&pt3->pdev->dev, "Failed to init demod chips\n");
259 return ret; 259 return ret;
260 } 260 }
261 261
@@ -271,7 +271,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
271 init0_ter, ARRAY_SIZE(init0_ter)); 271 init0_ter, ARRAY_SIZE(init0_ter));
272 if (ret < 0) { 272 if (ret < 0) {
273 dev_warn(&pt3->pdev->dev, 273 dev_warn(&pt3->pdev->dev,
274 "demod[%d] faild in init sequence0.", i); 274 "demod[%d] failed in init sequence0\n", i);
275 return ret; 275 return ret;
276 } 276 }
277 ret = fe->ops.init(fe); 277 ret = fe->ops.init(fe);
@@ -282,7 +282,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
282 usleep_range(2000, 4000); 282 usleep_range(2000, 4000);
283 ret = pt3_set_tuner_power(pt3, true, false); 283 ret = pt3_set_tuner_power(pt3, true, false);
284 if (ret < 0) { 284 if (ret < 0) {
285 dev_warn(&pt3->pdev->dev, "Failed to control tuner module."); 285 dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
286 return ret; 286 return ret;
287 } 287 }
288 288
@@ -297,7 +297,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
297 cfg_ter, ARRAY_SIZE(cfg_ter)); 297 cfg_ter, ARRAY_SIZE(cfg_ter));
298 if (ret < 0) { 298 if (ret < 0) {
299 dev_warn(&pt3->pdev->dev, 299 dev_warn(&pt3->pdev->dev,
300 "demod[%d] faild in init sequence1.", i); 300 "demod[%d] failed in init sequence1\n", i);
301 return ret; 301 return ret;
302 } 302 }
303 } 303 }
@@ -311,19 +311,19 @@ static int pt3_fe_init(struct pt3_board *pt3)
311 ret = fe->ops.tuner_ops.init(fe); 311 ret = fe->ops.tuner_ops.init(fe);
312 if (ret < 0) { 312 if (ret < 0) {
313 dev_warn(&pt3->pdev->dev, 313 dev_warn(&pt3->pdev->dev,
314 "Failed to init SAT-tuner[%d].", i); 314 "Failed to init SAT-tuner[%d]\n", i);
315 return ret; 315 return ret;
316 } 316 }
317 } 317 }
318 ret = pt3_init_all_mxl301rf(pt3); 318 ret = pt3_init_all_mxl301rf(pt3);
319 if (ret < 0) { 319 if (ret < 0) {
320 dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners."); 320 dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners\n");
321 return ret; 321 return ret;
322 } 322 }
323 323
324 ret = pt3_set_tuner_power(pt3, true, true); 324 ret = pt3_set_tuner_power(pt3, true, true);
325 if (ret < 0) { 325 if (ret < 0) {
326 dev_warn(&pt3->pdev->dev, "Failed to control tuner module."); 326 dev_warn(&pt3->pdev->dev, "Failed to control tuner module\n");
327 return ret; 327 return ret;
328 } 328 }
329 329
@@ -344,7 +344,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
344 } 344 }
345 if (ret < 0) { 345 if (ret < 0) {
346 dev_warn(&pt3->pdev->dev, 346 dev_warn(&pt3->pdev->dev,
347 "Failed in initial tuning of tuner[%d].", i); 347 "Failed in initial tuning of tuner[%d]\n", i);
348 return ret; 348 return ret;
349 } 349 }
350 } 350 }
@@ -366,7 +366,7 @@ static int pt3_fe_init(struct pt3_board *pt3)
366 fe->ops.set_lna = &pt3_set_lna; 366 fe->ops.set_lna = &pt3_set_lna;
367 } 367 }
368 if (i < PT3_NUM_FE) { 368 if (i < PT3_NUM_FE) {
369 dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby.", i); 369 dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby\n", i);
370 return ret; 370 return ret;
371 } 371 }
372 return 0; 372 return 0;
@@ -453,8 +453,8 @@ static int pt3_fetch_thread(void *data)
453 pt3_init_dmabuf(adap); 453 pt3_init_dmabuf(adap);
454 adap->num_discard = PT3_INITIAL_BUF_DROPS; 454 adap->num_discard = PT3_INITIAL_BUF_DROPS;
455 455
456 dev_dbg(adap->dvb_adap.device, 456 dev_dbg(adap->dvb_adap.device, "PT3: [%s] started\n",
457 "PT3: [%s] started.\n", adap->thread->comm); 457 adap->thread->comm);
458 set_freezable(); 458 set_freezable();
459 while (!kthread_freezable_should_stop(&was_frozen)) { 459 while (!kthread_freezable_should_stop(&was_frozen)) {
460 if (was_frozen) 460 if (was_frozen)
@@ -468,8 +468,8 @@ static int pt3_fetch_thread(void *data)
468 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC, 468 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
469 HRTIMER_MODE_REL); 469 HRTIMER_MODE_REL);
470 } 470 }
471 dev_dbg(adap->dvb_adap.device, 471 dev_dbg(adap->dvb_adap.device, "PT3: [%s] exited\n",
472 "PT3: [%s] exited.\n", adap->thread->comm); 472 adap->thread->comm);
473 adap->thread = NULL; 473 adap->thread = NULL;
474 return 0; 474 return 0;
475} 475}
@@ -485,8 +485,8 @@ static int pt3_start_streaming(struct pt3_adapter *adap)
485 int ret = PTR_ERR(thread); 485 int ret = PTR_ERR(thread);
486 486
487 dev_warn(adap->dvb_adap.device, 487 dev_warn(adap->dvb_adap.device,
488 "PT3 (adap:%d, dmx:%d): failed to start kthread.\n", 488 "PT3 (adap:%d, dmx:%d): failed to start kthread\n",
489 adap->dvb_adap.num, adap->dmxdev.dvbdev->id); 489 adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
490 return ret; 490 return ret;
491 } 491 }
492 adap->thread = thread; 492 adap->thread = thread;
@@ -501,8 +501,8 @@ static int pt3_stop_streaming(struct pt3_adapter *adap)
501 ret = pt3_stop_dma(adap); 501 ret = pt3_stop_dma(adap);
502 if (ret) 502 if (ret)
503 dev_warn(adap->dvb_adap.device, 503 dev_warn(adap->dvb_adap.device,
504 "PT3: failed to stop streaming of adap:%d/FE:%d\n", 504 "PT3: failed to stop streaming of adap:%d/FE:%d\n",
505 adap->dvb_adap.num, adap->fe->id); 505 adap->dvb_adap.num, adap->fe->id);
506 506
507 /* kill the fetching thread */ 507 /* kill the fetching thread */
508 ret = kthread_stop(adap->thread); 508 ret = kthread_stop(adap->thread);
@@ -522,8 +522,8 @@ static int pt3_start_feed(struct dvb_demux_feed *feed)
522 return 0; 522 return 0;
523 if (adap->num_feeds != 1) { 523 if (adap->num_feeds != 1) {
524 dev_warn(adap->dvb_adap.device, 524 dev_warn(adap->dvb_adap.device,
525 "%s: unmatched start/stop_feed in adap:%i/dmx:%i.\n", 525 "%s: unmatched start/stop_feed in adap:%i/dmx:%i\n",
526 __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id); 526 __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
527 adap->num_feeds = 1; 527 adap->num_feeds = 1;
528 } 528 }
529 529
@@ -553,10 +553,9 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
553 struct dvb_adapter *da; 553 struct dvb_adapter *da;
554 554
555 adap = kzalloc(sizeof(*adap), GFP_KERNEL); 555 adap = kzalloc(sizeof(*adap), GFP_KERNEL);
556 if (!adap) { 556 if (!adap)
557 dev_err(&pt3->pdev->dev, "failed to alloc mem for adapter.\n");
558 return -ENOMEM; 557 return -ENOMEM;
559 } 558
560 pt3->adaps[index] = adap; 559 pt3->adaps[index] = adap;
561 adap->adap_idx = index; 560 adap->adap_idx = index;
562 561
@@ -565,7 +564,7 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
565 THIS_MODULE, &pt3->pdev->dev, adapter_nr); 564 THIS_MODULE, &pt3->pdev->dev, adapter_nr);
566 if (ret < 0) { 565 if (ret < 0) {
567 dev_err(&pt3->pdev->dev, 566 dev_err(&pt3->pdev->dev,
568 "failed to register adapter dev.\n"); 567 "failed to register adapter dev\n");
569 goto err_mem; 568 goto err_mem;
570 } 569 }
571 da = &adap->dvb_adap; 570 da = &adap->dvb_adap;
@@ -581,7 +580,7 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
581 adap->demux.stop_feed = pt3_stop_feed; 580 adap->demux.stop_feed = pt3_stop_feed;
582 ret = dvb_dmx_init(&adap->demux); 581 ret = dvb_dmx_init(&adap->demux);
583 if (ret < 0) { 582 if (ret < 0) {
584 dev_err(&pt3->pdev->dev, "failed to init dmx dev.\n"); 583 dev_err(&pt3->pdev->dev, "failed to init dmx dev\n");
585 goto err_adap; 584 goto err_adap;
586 } 585 }
587 586
@@ -589,13 +588,13 @@ static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
589 adap->dmxdev.demux = &adap->demux.dmx; 588 adap->dmxdev.demux = &adap->demux.dmx;
590 ret = dvb_dmxdev_init(&adap->dmxdev, da); 589 ret = dvb_dmxdev_init(&adap->dmxdev, da);
591 if (ret < 0) { 590 if (ret < 0) {
592 dev_err(&pt3->pdev->dev, "failed to init dmxdev.\n"); 591 dev_err(&pt3->pdev->dev, "failed to init dmxdev\n");
593 goto err_demux; 592 goto err_demux;
594 } 593 }
595 594
596 ret = pt3_alloc_dmabuf(adap); 595 ret = pt3_alloc_dmabuf(adap);
597 if (ret) { 596 if (ret) {
598 dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers.\n"); 597 dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers\n");
599 goto err_dmabuf; 598 goto err_dmabuf;
600 } 599 }
601 600
@@ -695,7 +694,7 @@ static int pt3_resume(struct device *dev)
695 dvb_frontend_resume(adap->fe); 694 dvb_frontend_resume(adap->fe);
696 ret = pt3_alloc_dmabuf(adap); 695 ret = pt3_alloc_dmabuf(adap);
697 if (ret) { 696 if (ret) {
698 dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs.\n"); 697 dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs\n");
699 continue; 698 continue;
700 } 699 }
701 if (adap->num_feeds > 0) 700 if (adap->num_feeds > 0)
@@ -753,15 +752,14 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
753 if (ret == 0) 752 if (ret == 0)
754 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 753 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
755 else { 754 else {
756 dev_err(&pdev->dev, "Failed to set DMA mask.\n"); 755 dev_err(&pdev->dev, "Failed to set DMA mask\n");
757 goto err_release_regions; 756 goto err_release_regions;
758 } 757 }
759 dev_info(&pdev->dev, "Use 32bit DMA.\n"); 758 dev_info(&pdev->dev, "Use 32bit DMA\n");
760 } 759 }
761 760
762 pt3 = kzalloc(sizeof(*pt3), GFP_KERNEL); 761 pt3 = kzalloc(sizeof(*pt3), GFP_KERNEL);
763 if (!pt3) { 762 if (!pt3) {
764 dev_err(&pdev->dev, "Failed to alloc mem for this dev.\n");
765 ret = -ENOMEM; 763 ret = -ENOMEM;
766 goto err_release_regions; 764 goto err_release_regions;
767 } 765 }
@@ -771,15 +769,15 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
771 pt3->regs[0] = pci_ioremap_bar(pdev, 0); 769 pt3->regs[0] = pci_ioremap_bar(pdev, 0);
772 pt3->regs[1] = pci_ioremap_bar(pdev, 2); 770 pt3->regs[1] = pci_ioremap_bar(pdev, 2);
773 if (pt3->regs[0] == NULL || pt3->regs[1] == NULL) { 771 if (pt3->regs[0] == NULL || pt3->regs[1] == NULL) {
774 dev_err(&pdev->dev, "Failed to ioremap.\n"); 772 dev_err(&pdev->dev, "Failed to ioremap\n");
775 ret = -ENOMEM; 773 ret = -ENOMEM;
776 goto err_kfree; 774 goto err_kfree;
777 } 775 }
778 776
779 ver = ioread32(pt3->regs[0] + REG_VERSION); 777 ver = ioread32(pt3->regs[0] + REG_VERSION);
780 if ((ver >> 16) != 0x0301) { 778 if ((ver >> 16) != 0x0301) {
781 dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported", 779 dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported\n",
782 ver >> 24, (ver & 0x00ff0000) >> 16); 780 ver >> 24, (ver & 0x00ff0000) >> 16);
783 ret = -ENODEV; 781 ret = -ENODEV;
784 goto err_iounmap; 782 goto err_iounmap;
785 } 783 }
@@ -788,7 +786,6 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
788 786
789 pt3->i2c_buf = kmalloc(sizeof(*pt3->i2c_buf), GFP_KERNEL); 787 pt3->i2c_buf = kmalloc(sizeof(*pt3->i2c_buf), GFP_KERNEL);
790 if (pt3->i2c_buf == NULL) { 788 if (pt3->i2c_buf == NULL) {
791 dev_err(&pdev->dev, "Failed to alloc mem for i2c.\n");
792 ret = -ENOMEM; 789 ret = -ENOMEM;
793 goto err_iounmap; 790 goto err_iounmap;
794 } 791 }
@@ -801,7 +798,7 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
801 i2c_set_adapdata(i2c, pt3); 798 i2c_set_adapdata(i2c, pt3);
802 ret = i2c_add_adapter(i2c); 799 ret = i2c_add_adapter(i2c);
803 if (ret < 0) { 800 if (ret < 0) {
804 dev_err(&pdev->dev, "Failed to add i2c adapter.\n"); 801 dev_err(&pdev->dev, "Failed to add i2c adapter\n");
805 goto err_i2cbuf; 802 goto err_i2cbuf;
806 } 803 }
807 804
@@ -815,20 +812,20 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
815 break; 812 break;
816 } 813 }
817 if (i < PT3_NUM_FE) { 814 if (i < PT3_NUM_FE) {
818 dev_err(&pdev->dev, "Failed to create FE%d.\n", i); 815 dev_err(&pdev->dev, "Failed to create FE%d\n", i);
819 goto err_cleanup_adapters; 816 goto err_cleanup_adapters;
820 } 817 }
821 818
822 ret = pt3_fe_init(pt3); 819 ret = pt3_fe_init(pt3);
823 if (ret < 0) { 820 if (ret < 0) {
824 dev_err(&pdev->dev, "Failed to init frontends.\n"); 821 dev_err(&pdev->dev, "Failed to init frontends\n");
825 i = PT3_NUM_FE - 1; 822 i = PT3_NUM_FE - 1;
826 goto err_cleanup_adapters; 823 goto err_cleanup_adapters;
827 } 824 }
828 825
829 dev_info(&pdev->dev, 826 dev_info(&pdev->dev,
830 "successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x).\n", 827 "successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x)\n",
831 ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff); 828 ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff);
832 return 0; 829 return 0;
833 830
834err_cleanup_adapters: 831err_cleanup_adapters:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 236ed725f933..a349e964e0bc 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1001,13 +1001,18 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
1001 saa7134_board_init1(dev); 1001 saa7134_board_init1(dev);
1002 saa7134_hwinit1(dev); 1002 saa7134_hwinit1(dev);
1003 1003
1004 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
1005 if (IS_ERR(dev->alloc_ctx)) {
1006 err = PTR_ERR(dev->alloc_ctx);
1007 goto fail3;
1008 }
1004 /* get irq */ 1009 /* get irq */
1005 err = request_irq(pci_dev->irq, saa7134_irq, 1010 err = request_irq(pci_dev->irq, saa7134_irq,
1006 IRQF_SHARED, dev->name, dev); 1011 IRQF_SHARED, dev->name, dev);
1007 if (err < 0) { 1012 if (err < 0) {
1008 printk(KERN_ERR "%s: can't get IRQ %d\n", 1013 printk(KERN_ERR "%s: can't get IRQ %d\n",
1009 dev->name,pci_dev->irq); 1014 dev->name,pci_dev->irq);
1010 goto fail3; 1015 goto fail4;
1011 } 1016 }
1012 1017
1013 /* wait a bit, register i2c bus */ 1018 /* wait a bit, register i2c bus */
@@ -1065,7 +1070,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
1065 if (err < 0) { 1070 if (err < 0) {
1066 printk(KERN_INFO "%s: can't register video device\n", 1071 printk(KERN_INFO "%s: can't register video device\n",
1067 dev->name); 1072 dev->name);
1068 goto fail4; 1073 goto fail5;
1069 } 1074 }
1070 printk(KERN_INFO "%s: registered device %s [v4l2]\n", 1075 printk(KERN_INFO "%s: registered device %s [v4l2]\n",
1071 dev->name, video_device_node_name(dev->video_dev)); 1076 dev->name, video_device_node_name(dev->video_dev));
@@ -1078,7 +1083,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
1078 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, 1083 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
1079 vbi_nr[dev->nr]); 1084 vbi_nr[dev->nr]);
1080 if (err < 0) 1085 if (err < 0)
1081 goto fail4; 1086 goto fail5;
1082 printk(KERN_INFO "%s: registered device %s\n", 1087 printk(KERN_INFO "%s: registered device %s\n",
1083 dev->name, video_device_node_name(dev->vbi_dev)); 1088 dev->name, video_device_node_name(dev->vbi_dev));
1084 1089
@@ -1089,7 +1094,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
1089 err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO, 1094 err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
1090 radio_nr[dev->nr]); 1095 radio_nr[dev->nr]);
1091 if (err < 0) 1096 if (err < 0)
1092 goto fail4; 1097 goto fail5;
1093 printk(KERN_INFO "%s: registered device %s\n", 1098 printk(KERN_INFO "%s: registered device %s\n",
1094 dev->name, video_device_node_name(dev->radio_dev)); 1099 dev->name, video_device_node_name(dev->radio_dev));
1095 } 1100 }
@@ -1103,10 +1108,12 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
1103 request_submodules(dev); 1108 request_submodules(dev);
1104 return 0; 1109 return 0;
1105 1110
1106 fail4: 1111 fail5:
1107 saa7134_unregister_video(dev); 1112 saa7134_unregister_video(dev);
1108 saa7134_i2c_unregister(dev); 1113 saa7134_i2c_unregister(dev);
1109 free_irq(pci_dev->irq, dev); 1114 free_irq(pci_dev->irq, dev);
1115 fail4:
1116 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
1110 fail3: 1117 fail3:
1111 saa7134_hwfini(dev); 1118 saa7134_hwfini(dev);
1112 iounmap(dev->lmmio); 1119 iounmap(dev->lmmio);
@@ -1173,6 +1180,7 @@ static void saa7134_finidev(struct pci_dev *pci_dev)
1173 1180
1174 /* release resources */ 1181 /* release resources */
1175 free_irq(pci_dev->irq, dev); 1182 free_irq(pci_dev->irq, dev);
1183 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
1176 iounmap(dev->lmmio); 1184 iounmap(dev->lmmio);
1177 release_mem_region(pci_resource_start(pci_dev,0), 1185 release_mem_region(pci_resource_start(pci_dev,0),
1178 pci_resource_len(pci_dev,0)); 1186 pci_resource_len(pci_dev,0));
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e4ea85fd1b23..594dc3ad4750 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -96,7 +96,6 @@ static struct vb2_ops saa7134_empress_qops = {
96 .queue_setup = saa7134_ts_queue_setup, 96 .queue_setup = saa7134_ts_queue_setup,
97 .buf_init = saa7134_ts_buffer_init, 97 .buf_init = saa7134_ts_buffer_init,
98 .buf_prepare = saa7134_ts_buffer_prepare, 98 .buf_prepare = saa7134_ts_buffer_prepare,
99 .buf_finish = saa7134_ts_buffer_finish,
100 .buf_queue = saa7134_vb2_buffer_queue, 99 .buf_queue = saa7134_vb2_buffer_queue,
101 .wait_prepare = vb2_ops_wait_prepare, 100 .wait_prepare = vb2_ops_wait_prepare,
102 .wait_finish = vb2_ops_wait_finish, 101 .wait_finish = vb2_ops_wait_finish,
@@ -140,7 +139,7 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
140 struct saa7134_dev *dev = video_drvdata(file); 139 struct saa7134_dev *dev = video_drvdata(file);
141 struct v4l2_mbus_framefmt mbus_fmt; 140 struct v4l2_mbus_framefmt mbus_fmt;
142 141
143 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); 142 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
144 saa_call_all(dev, video, s_mbus_fmt, &mbus_fmt); 143 saa_call_all(dev, video, s_mbus_fmt, &mbus_fmt);
145 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); 144 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
146 145
@@ -157,7 +156,7 @@ static int empress_try_fmt_vid_cap(struct file *file, void *priv,
157 struct saa7134_dev *dev = video_drvdata(file); 156 struct saa7134_dev *dev = video_drvdata(file);
158 struct v4l2_mbus_framefmt mbus_fmt; 157 struct v4l2_mbus_framefmt mbus_fmt;
159 158
160 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); 159 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
161 saa_call_all(dev, video, try_mbus_fmt, &mbus_fmt); 160 saa_call_all(dev, video, try_mbus_fmt, &mbus_fmt);
162 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); 161 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
163 162
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index bd25323bd947..2709b83d57b1 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -94,7 +94,6 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
94 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 94 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
95 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0); 95 struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
96 unsigned int lines, llength, size; 96 unsigned int lines, llength, size;
97 int ret;
98 97
99 dprintk("buffer_prepare [%p]\n", buf); 98 dprintk("buffer_prepare [%p]\n", buf);
100 99
@@ -108,25 +107,11 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
108 vb2_set_plane_payload(vb2, 0, size); 107 vb2_set_plane_payload(vb2, 0, size);
109 vb2->v4l2_buf.field = dev->field; 108 vb2->v4l2_buf.field = dev->field;
110 109
111 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
112 if (!ret)
113 return -EIO;
114 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 110 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
115 saa7134_buffer_startpage(buf)); 111 saa7134_buffer_startpage(buf));
116} 112}
117EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare); 113EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
118 114
119void saa7134_ts_buffer_finish(struct vb2_buffer *vb2)
120{
121 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
122 struct saa7134_dev *dev = dmaq->dev;
123 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
124 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
125
126 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
127}
128EXPORT_SYMBOL_GPL(saa7134_ts_buffer_finish);
129
130int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 115int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
131 unsigned int *nbuffers, unsigned int *nplanes, 116 unsigned int *nbuffers, unsigned int *nplanes,
132 unsigned int sizes[], void *alloc_ctxs[]) 117 unsigned int sizes[], void *alloc_ctxs[])
@@ -142,6 +127,7 @@ int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
142 *nbuffers = 3; 127 *nbuffers = 3;
143 *nplanes = 1; 128 *nplanes = 1;
144 sizes[0] = size; 129 sizes[0] = size;
130 alloc_ctxs[0] = dev->alloc_ctx;
145 return 0; 131 return 0;
146} 132}
147EXPORT_SYMBOL_GPL(saa7134_ts_queue_setup); 133EXPORT_SYMBOL_GPL(saa7134_ts_queue_setup);
@@ -187,7 +173,6 @@ struct vb2_ops saa7134_ts_qops = {
187 .queue_setup = saa7134_ts_queue_setup, 173 .queue_setup = saa7134_ts_queue_setup,
188 .buf_init = saa7134_ts_buffer_init, 174 .buf_init = saa7134_ts_buffer_init,
189 .buf_prepare = saa7134_ts_buffer_prepare, 175 .buf_prepare = saa7134_ts_buffer_prepare,
190 .buf_finish = saa7134_ts_buffer_finish,
191 .buf_queue = saa7134_vb2_buffer_queue, 176 .buf_queue = saa7134_vb2_buffer_queue,
192 .wait_prepare = vb2_ops_wait_prepare, 177 .wait_prepare = vb2_ops_wait_prepare,
193 .wait_finish = vb2_ops_wait_finish, 178 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 4f0b1012e4f3..5306e549e526 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -120,7 +120,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
120 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 120 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
121 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 121 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
122 unsigned int size; 122 unsigned int size;
123 int ret;
124 123
125 if (dma->sgl->offset) { 124 if (dma->sgl->offset) {
126 pr_err("The buffer is not page-aligned\n"); 125 pr_err("The buffer is not page-aligned\n");
@@ -132,9 +131,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
132 131
133 vb2_set_plane_payload(vb2, 0, size); 132 vb2_set_plane_payload(vb2, 0, size);
134 133
135 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
136 if (!ret)
137 return -EIO;
138 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 134 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
139 saa7134_buffer_startpage(buf)); 135 saa7134_buffer_startpage(buf));
140} 136}
@@ -156,6 +152,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
156 *nbuffers = saa7134_buffer_count(size, *nbuffers); 152 *nbuffers = saa7134_buffer_count(size, *nbuffers);
157 *nplanes = 1; 153 *nplanes = 1;
158 sizes[0] = size; 154 sizes[0] = size;
155 alloc_ctxs[0] = dev->alloc_ctx;
159 return 0; 156 return 0;
160} 157}
161 158
@@ -169,21 +166,10 @@ static int buffer_init(struct vb2_buffer *vb2)
169 return 0; 166 return 0;
170} 167}
171 168
172static void buffer_finish(struct vb2_buffer *vb2)
173{
174 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
175 struct saa7134_dev *dev = dmaq->dev;
176 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
177 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
178
179 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
180}
181
182struct vb2_ops saa7134_vbi_qops = { 169struct vb2_ops saa7134_vbi_qops = {
183 .queue_setup = queue_setup, 170 .queue_setup = queue_setup,
184 .buf_init = buffer_init, 171 .buf_init = buffer_init,
185 .buf_prepare = buffer_prepare, 172 .buf_prepare = buffer_prepare,
186 .buf_finish = buffer_finish,
187 .buf_queue = saa7134_vb2_buffer_queue, 173 .buf_queue = saa7134_vb2_buffer_queue,
188 .wait_prepare = vb2_ops_wait_prepare, 174 .wait_prepare = vb2_ops_wait_prepare,
189 .wait_finish = vb2_ops_wait_finish, 175 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index fc4a427cb51f..701b52f34689 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -883,7 +883,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
883 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); 883 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
884 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); 884 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
885 unsigned int size; 885 unsigned int size;
886 int ret;
887 886
888 if (dma->sgl->offset) { 887 if (dma->sgl->offset) {
889 pr_err("The buffer is not page-aligned\n"); 888 pr_err("The buffer is not page-aligned\n");
@@ -896,23 +895,10 @@ static int buffer_prepare(struct vb2_buffer *vb2)
896 vb2_set_plane_payload(vb2, 0, size); 895 vb2_set_plane_payload(vb2, 0, size);
897 vb2->v4l2_buf.field = dev->field; 896 vb2->v4l2_buf.field = dev->field;
898 897
899 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
900 if (!ret)
901 return -EIO;
902 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 898 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
903 saa7134_buffer_startpage(buf)); 899 saa7134_buffer_startpage(buf));
904} 900}
905 901
906static void buffer_finish(struct vb2_buffer *vb2)
907{
908 struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
909 struct saa7134_dev *dev = dmaq->dev;
910 struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
911 struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
912
913 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
914}
915
916static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 902static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
917 unsigned int *nbuffers, unsigned int *nplanes, 903 unsigned int *nbuffers, unsigned int *nplanes,
918 unsigned int sizes[], void *alloc_ctxs[]) 904 unsigned int sizes[], void *alloc_ctxs[])
@@ -932,6 +918,7 @@ static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
932 *nbuffers = saa7134_buffer_count(size, *nbuffers); 918 *nbuffers = saa7134_buffer_count(size, *nbuffers);
933 *nplanes = 1; 919 *nplanes = 1;
934 sizes[0] = size; 920 sizes[0] = size;
921 alloc_ctxs[0] = dev->alloc_ctx;
935 return 0; 922 return 0;
936} 923}
937 924
@@ -1004,7 +991,6 @@ static struct vb2_ops vb2_qops = {
1004 .queue_setup = queue_setup, 991 .queue_setup = queue_setup,
1005 .buf_init = buffer_init, 992 .buf_init = buffer_init,
1006 .buf_prepare = buffer_prepare, 993 .buf_prepare = buffer_prepare,
1007 .buf_finish = buffer_finish,
1008 .buf_queue = saa7134_vb2_buffer_queue, 994 .buf_queue = saa7134_vb2_buffer_queue,
1009 .wait_prepare = vb2_ops_wait_prepare, 995 .wait_prepare = vb2_ops_wait_prepare,
1010 .wait_finish = vb2_ops_wait_finish, 996 .wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 1a82dd07205b..8bf0553b8d2f 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -588,6 +588,7 @@ struct saa7134_dev {
588 588
589 589
590 /* video+ts+vbi capture */ 590 /* video+ts+vbi capture */
591 void *alloc_ctx;
591 struct saa7134_dmaqueue video_q; 592 struct saa7134_dmaqueue video_q;
592 struct vb2_queue video_vbq; 593 struct vb2_queue video_vbq;
593 struct saa7134_dmaqueue vbi_q; 594 struct saa7134_dmaqueue vbi_q;
@@ -814,7 +815,6 @@ void saa7134_video_fini(struct saa7134_dev *dev);
814 815
815int saa7134_ts_buffer_init(struct vb2_buffer *vb2); 816int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
816int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2); 817int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
817void saa7134_ts_buffer_finish(struct vb2_buffer *vb2);
818int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, 818int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
819 unsigned int *nbuffers, unsigned int *nplanes, 819 unsigned int *nbuffers, unsigned int *nplanes,
820 unsigned int sizes[], void *alloc_ctxs[]); 820 unsigned int sizes[], void *alloc_ctxs[]);
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 66696fa8341d..9bd1f73f82da 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -130,9 +130,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
130 goto fail2; 130 goto fail2;
131 131
132 /* init the buffers to a known pattern, easier during debugging */ 132 /* init the buffers to a known pattern, easier during debugging */
133 memset_io(buf->cpu, 0xff, buf->pci_size); 133 memset(buf->cpu, 0xff, buf->pci_size);
134 buf->crc = crc32(0, buf->cpu, buf->actual_size); 134 buf->crc = crc32(0, buf->cpu, buf->actual_size);
135 memset_io(buf->pt_cpu, 0xff, buf->pt_size); 135 memset(buf->pt_cpu, 0xff, buf->pt_size);
136 136
137 dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n", 137 dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n",
138 __func__, buf, params->numpagetables); 138 __func__, buf, params->numpagetables);
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index 5f6f3094c44e..6c73f5b155f6 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -33,12 +33,12 @@ int saa7164_bus_setup(struct saa7164_dev *dev)
33 b->Type = TYPE_BUS_PCIe; 33 b->Type = TYPE_BUS_PCIe;
34 b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE; 34 b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE;
35 35
36 b->m_pdwSetRing = (u8 *)(dev->bmmio + 36 b->m_pdwSetRing = (u8 __iomem *)(dev->bmmio +
37 ((u32)dev->busdesc.CommandRing)); 37 ((u32)dev->busdesc.CommandRing));
38 38
39 b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE; 39 b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
40 40
41 b->m_pdwGetRing = (u8 *)(dev->bmmio + 41 b->m_pdwGetRing = (u8 __iomem *)(dev->bmmio +
42 ((u32)dev->busdesc.ResponseRing)); 42 ((u32)dev->busdesc.ResponseRing));
43 43
44 b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE; 44 b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
@@ -138,6 +138,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
138 u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp; 138 u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp;
139 u32 new_swp, space_rem; 139 u32 new_swp, space_rem;
140 int ret = SAA_ERR_BAD_PARAMETER; 140 int ret = SAA_ERR_BAD_PARAMETER;
141 u16 size;
141 142
142 if (!msg) { 143 if (!msg) {
143 printk(KERN_ERR "%s() !msg\n", __func__); 144 printk(KERN_ERR "%s() !msg\n", __func__);
@@ -148,10 +149,6 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
148 149
149 saa7164_bus_verify(dev); 150 saa7164_bus_verify(dev);
150 151
151 msg->size = cpu_to_le16(msg->size);
152 msg->command = cpu_to_le32(msg->command);
153 msg->controlselector = cpu_to_le16(msg->controlselector);
154
155 if (msg->size > dev->bus.m_wMaxReqSize) { 152 if (msg->size > dev->bus.m_wMaxReqSize) {
156 printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n", 153 printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n",
157 __func__); 154 __func__);
@@ -169,8 +166,8 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
169 bytes_to_write = sizeof(*msg) + msg->size; 166 bytes_to_write = sizeof(*msg) + msg->size;
170 free_write_space = 0; 167 free_write_space = 0;
171 timeout = SAA_BUS_TIMEOUT; 168 timeout = SAA_BUS_TIMEOUT;
172 curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); 169 curr_srp = saa7164_readl(bus->m_dwSetReadPos);
173 curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos)); 170 curr_swp = saa7164_readl(bus->m_dwSetWritePos);
174 171
175 /* Deal with ring wrapping issues */ 172 /* Deal with ring wrapping issues */
176 if (curr_srp > curr_swp) 173 if (curr_srp > curr_swp)
@@ -203,7 +200,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
203 mdelay(1); 200 mdelay(1);
204 201
205 /* Check the space usage again */ 202 /* Check the space usage again */
206 curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos)); 203 curr_srp = saa7164_readl(bus->m_dwSetReadPos);
207 204
208 /* Deal with ring wrapping issues */ 205 /* Deal with ring wrapping issues */
209 if (curr_srp > curr_swp) 206 if (curr_srp > curr_swp)
@@ -223,6 +220,16 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
223 dprintk(DBGLVL_BUS, "%s() bus->m_dwSizeSetRing = %x\n", __func__, 220 dprintk(DBGLVL_BUS, "%s() bus->m_dwSizeSetRing = %x\n", __func__,
224 bus->m_dwSizeSetRing); 221 bus->m_dwSizeSetRing);
225 222
223 /*
224 * Make a copy of msg->size before it is converted to le16 since it is
225 * used in the code below.
226 */
227 size = msg->size;
228 /* Convert to le16/le32 */
229 msg->size = (__force u16)cpu_to_le16(msg->size);
230 msg->command = (__force u32)cpu_to_le32(msg->command);
231 msg->controlselector = (__force u16)cpu_to_le16(msg->controlselector);
232
226 /* Mental Note: line 462 tmmhComResBusPCIe.cpp */ 233 /* Mental Note: line 462 tmmhComResBusPCIe.cpp */
227 234
228 /* Check if we're going to wrap again */ 235 /* Check if we're going to wrap again */
@@ -243,28 +250,28 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
243 dprintk(DBGLVL_BUS, "%s() tr4\n", __func__); 250 dprintk(DBGLVL_BUS, "%s() tr4\n", __func__);
244 251
245 /* Split the msg into pieces as the ring wraps */ 252 /* Split the msg into pieces as the ring wraps */
246 memcpy(bus->m_pdwSetRing + curr_swp, msg, space_rem); 253 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, space_rem);
247 memcpy(bus->m_pdwSetRing, (u8 *)msg + space_rem, 254 memcpy_toio(bus->m_pdwSetRing, (u8 *)msg + space_rem,
248 sizeof(*msg) - space_rem); 255 sizeof(*msg) - space_rem);
249 256
250 memcpy(bus->m_pdwSetRing + sizeof(*msg) - space_rem, 257 memcpy_toio(bus->m_pdwSetRing + sizeof(*msg) - space_rem,
251 buf, msg->size); 258 buf, size);
252 259
253 } else if (space_rem == sizeof(*msg)) { 260 } else if (space_rem == sizeof(*msg)) {
254 dprintk(DBGLVL_BUS, "%s() tr5\n", __func__); 261 dprintk(DBGLVL_BUS, "%s() tr5\n", __func__);
255 262
256 /* Additional data at the beginning of the ring */ 263 /* Additional data at the beginning of the ring */
257 memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); 264 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
258 memcpy(bus->m_pdwSetRing, buf, msg->size); 265 memcpy_toio(bus->m_pdwSetRing, buf, size);
259 266
260 } else { 267 } else {
261 /* Additional data wraps around the ring */ 268 /* Additional data wraps around the ring */
262 memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); 269 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
263 if (msg->size > 0) { 270 if (size > 0) {
264 memcpy(bus->m_pdwSetRing + curr_swp + 271 memcpy_toio(bus->m_pdwSetRing + curr_swp +
265 sizeof(*msg), buf, space_rem - 272 sizeof(*msg), buf, space_rem -
266 sizeof(*msg)); 273 sizeof(*msg));
267 memcpy(bus->m_pdwSetRing, (u8 *)buf + 274 memcpy_toio(bus->m_pdwSetRing, (u8 *)buf +
268 space_rem - sizeof(*msg), 275 space_rem - sizeof(*msg),
269 bytes_to_write - space_rem); 276 bytes_to_write - space_rem);
270 } 277 }
@@ -276,15 +283,20 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
276 dprintk(DBGLVL_BUS, "%s() tr6\n", __func__); 283 dprintk(DBGLVL_BUS, "%s() tr6\n", __func__);
277 284
278 /* The ring buffer doesn't wrap, two simple copies */ 285 /* The ring buffer doesn't wrap, two simple copies */
279 memcpy(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg)); 286 memcpy_toio(bus->m_pdwSetRing + curr_swp, msg, sizeof(*msg));
280 memcpy(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf, 287 memcpy_toio(bus->m_pdwSetRing + curr_swp + sizeof(*msg), buf,
281 msg->size); 288 size);
282 } 289 }
283 290
284 dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp); 291 dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp);
285 292
286 /* Update the bus write position */ 293 /* Update the bus write position */
287 saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp)); 294 saa7164_writel(bus->m_dwSetWritePos, new_swp);
295
296 /* Convert back to cpu after writing the msg to the ringbuffer. */
297 msg->size = le16_to_cpu((__force __le16)msg->size);
298 msg->command = le32_to_cpu((__force __le32)msg->command);
299 msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
288 ret = SAA_OK; 300 ret = SAA_OK;
289 301
290out: 302out:
@@ -336,8 +348,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
336 /* Peek the bus to see if a msg exists, if it's not what we're expecting 348 /* Peek the bus to see if a msg exists, if it's not what we're expecting
337 * then return cleanly else read the message from the bus. 349 * then return cleanly else read the message from the bus.
338 */ 350 */
339 curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos)); 351 curr_gwp = saa7164_readl(bus->m_dwGetWritePos);
340 curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos)); 352 curr_grp = saa7164_readl(bus->m_dwGetReadPos);
341 353
342 if (curr_gwp == curr_grp) { 354 if (curr_gwp == curr_grp) {
343 ret = SAA_ERR_EMPTY; 355 ret = SAA_ERR_EMPTY;
@@ -369,14 +381,18 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
369 new_grp -= bus->m_dwSizeGetRing; 381 new_grp -= bus->m_dwSizeGetRing;
370 space_rem = bus->m_dwSizeGetRing - curr_grp; 382 space_rem = bus->m_dwSizeGetRing - curr_grp;
371 383
372 memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem); 384 memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem);
373 memcpy((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing, 385 memcpy_fromio((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing,
374 bytes_to_read - space_rem); 386 bytes_to_read - space_rem);
375 387
376 } else { 388 } else {
377 /* No wrapping */ 389 /* No wrapping */
378 memcpy(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read); 390 memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read);
379 } 391 }
392 /* Convert from little endian to CPU */
393 msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
394 msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
395 msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
380 396
381 /* No need to update the read positions, because this was a peek */ 397 /* No need to update the read positions, because this was a peek */
382 /* If the caller specifically want to peek, return */ 398 /* If the caller specifically want to peek, return */
@@ -427,24 +443,24 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
427 443
428 if (space_rem < sizeof(*msg)) { 444 if (space_rem < sizeof(*msg)) {
429 /* msg wraps around the ring */ 445 /* msg wraps around the ring */
430 memcpy(msg, bus->m_pdwGetRing + curr_grp, space_rem); 446 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
431 memcpy((u8 *)msg + space_rem, bus->m_pdwGetRing, 447 memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
432 sizeof(*msg) - space_rem); 448 sizeof(*msg) - space_rem);
433 if (buf) 449 if (buf)
434 memcpy(buf, bus->m_pdwGetRing + sizeof(*msg) - 450 memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
435 space_rem, buf_size); 451 space_rem, buf_size);
436 452
437 } else if (space_rem == sizeof(*msg)) { 453 } else if (space_rem == sizeof(*msg)) {
438 memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); 454 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
439 if (buf) 455 if (buf)
440 memcpy(buf, bus->m_pdwGetRing, buf_size); 456 memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
441 } else { 457 } else {
442 /* Additional data wraps around the ring */ 458 /* Additional data wraps around the ring */
443 memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); 459 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
444 if (buf) { 460 if (buf) {
445 memcpy(buf, bus->m_pdwGetRing + curr_grp + 461 memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
446 sizeof(*msg), space_rem - sizeof(*msg)); 462 sizeof(*msg), space_rem - sizeof(*msg));
447 memcpy(buf + space_rem - sizeof(*msg), 463 memcpy_fromio(buf + space_rem - sizeof(*msg),
448 bus->m_pdwGetRing, bytes_to_read - 464 bus->m_pdwGetRing, bytes_to_read -
449 space_rem); 465 space_rem);
450 } 466 }
@@ -453,19 +469,20 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
453 469
454 } else { 470 } else {
455 /* No wrapping */ 471 /* No wrapping */
456 memcpy(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); 472 memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
457 if (buf) 473 if (buf)
458 memcpy(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), 474 memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
459 buf_size); 475 buf_size);
460 } 476 }
477 /* Convert from little endian to CPU */
478 msg->size = le16_to_cpu((__force __le16)msg->size);
479 msg->command = le32_to_cpu((__force __le32)msg->command);
480 msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
461 481
462 /* Update the read positions, adjusting the ring */ 482 /* Update the read positions, adjusting the ring */
463 saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp)); 483 saa7164_writel(bus->m_dwGetReadPos, new_grp);
464 484
465peekout: 485peekout:
466 msg->size = le16_to_cpu(msg->size);
467 msg->command = le32_to_cpu(msg->command);
468 msg->controlselector = le16_to_cpu(msg->controlselector);
469 ret = SAA_OK; 486 ret = SAA_OK;
470out: 487out:
471 mutex_unlock(&bus->lock); 488 mutex_unlock(&bus->lock);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index cc1be8a7a451..4b0bec3766ed 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -119,7 +119,7 @@ static void saa7164_ts_verifier(struct saa7164_buffer *buf)
119 u32 i; 119 u32 i;
120 u8 cc, a; 120 u8 cc, a;
121 u16 pid; 121 u16 pid;
122 u8 __iomem *bufcpu = (u8 *)buf->cpu; 122 u8 *bufcpu = (u8 *)buf->cpu;
123 123
124 port->sync_errors = 0; 124 port->sync_errors = 0;
125 port->v_cc_errors = 0; 125 port->v_cc_errors = 0;
@@ -260,7 +260,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
260 struct saa7164_user_buffer *ubuf = NULL; 260 struct saa7164_user_buffer *ubuf = NULL;
261 struct list_head *c, *n; 261 struct list_head *c, *n;
262 int i = 0; 262 int i = 0;
263 u8 __iomem *p; 263 u8 *p;
264 264
265 mutex_lock(&port->dmaqueue_lock); 265 mutex_lock(&port->dmaqueue_lock);
266 list_for_each_safe(c, n, &port->dmaqueue.list) { 266 list_for_each_safe(c, n, &port->dmaqueue.list) {
@@ -318,8 +318,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
318 318
319 if (buf->actual_size <= ubuf->actual_size) { 319 if (buf->actual_size <= ubuf->actual_size) {
320 320
321 memcpy_fromio(ubuf->data, buf->cpu, 321 memcpy(ubuf->data, buf->cpu, ubuf->actual_size);
322 ubuf->actual_size);
323 322
324 if (crc_checking) { 323 if (crc_checking) {
325 /* Throw a new checksum on the read buffer */ 324 /* Throw a new checksum on the read buffer */
@@ -346,7 +345,7 @@ static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
346 * with known bad data. We check for this data at a later point 345 * with known bad data. We check for this data at a later point
347 * in time. */ 346 * in time. */
348 saa7164_buffer_zero_offsets(port, bufnr); 347 saa7164_buffer_zero_offsets(port, bufnr);
349 memset_io(buf->cpu, 0xff, buf->pci_size); 348 memset(buf->cpu, 0xff, buf->pci_size);
350 if (crc_checking) { 349 if (crc_checking) {
351 /* Throw yet aanother new checksum on the dma buffer */ 350 /* Throw yet aanother new checksum on the dma buffer */
352 buf->crc = crc32(0, buf->cpu, buf->actual_size); 351 buf->crc = crc32(0, buf->cpu, buf->actual_size);
@@ -1096,7 +1095,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
1096 if (c == 0) 1095 if (c == 0)
1097 seq_printf(m, " %04x:", i); 1096 seq_printf(m, " %04x:", i);
1098 1097
1099 seq_printf(m, " %02x", *(b->m_pdwSetRing + i)); 1098 seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
1100 1099
1101 if (++c == 16) { 1100 if (++c == 16) {
1102 seq_printf(m, "\n"); 1101 seq_printf(m, "\n");
@@ -1111,7 +1110,7 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
1111 if (c == 0) 1110 if (c == 0)
1112 seq_printf(m, " %04x:", i); 1111 seq_printf(m, " %04x:", i);
1113 1112
1114 seq_printf(m, " %02x", *(b->m_pdwGetRing + i)); 1113 seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
1115 1114
1116 if (++c == 16) { 1115 if (++c == 16) {
1117 seq_printf(m, "\n"); 1116 seq_printf(m, "\n");
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 86763203d61d..add06ab5124d 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -72,7 +72,7 @@ static int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
72/* TODO: move dlflags into dev-> and change to write/readl/b */ 72/* TODO: move dlflags into dev-> and change to write/readl/b */
73/* TODO: Excessive levels of debug */ 73/* TODO: Excessive levels of debug */
74static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize, 74static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
75 u32 dlflags, u8 *dst, u32 dstsize) 75 u32 dlflags, u8 __iomem *dst, u32 dstsize)
76{ 76{
77 u32 reg, timeout, offset; 77 u32 reg, timeout, offset;
78 u8 *srcbuf = NULL; 78 u8 *srcbuf = NULL;
@@ -136,7 +136,7 @@ static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
136 srcsize -= dstsize, offset += dstsize) { 136 srcsize -= dstsize, offset += dstsize) {
137 137
138 dprintk(DBGLVL_FW, "%s() memcpy %d\n", __func__, dstsize); 138 dprintk(DBGLVL_FW, "%s() memcpy %d\n", __func__, dstsize);
139 memcpy(dst, srcbuf + offset, dstsize); 139 memcpy_toio(dst, srcbuf + offset, dstsize);
140 140
141 /* Flag the data as ready */ 141 /* Flag the data as ready */
142 saa7164_writel(drflag, 1); 142 saa7164_writel(drflag, 1);
@@ -154,7 +154,7 @@ static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
154 154
155 dprintk(DBGLVL_FW, "%s() memcpy(l) %d\n", __func__, dstsize); 155 dprintk(DBGLVL_FW, "%s() memcpy(l) %d\n", __func__, dstsize);
156 /* Write last block to the device */ 156 /* Write last block to the device */
157 memcpy(dst, srcbuf+offset, srcsize); 157 memcpy_toio(dst, srcbuf+offset, srcsize);
158 158
159 /* Flag the data as ready */ 159 /* Flag the data as ready */
160 saa7164_writel(drflag, 1); 160 saa7164_writel(drflag, 1);
diff --git a/drivers/media/pci/saa7164/saa7164-types.h b/drivers/media/pci/saa7164/saa7164-types.h
index 1d2140a3eb38..f48ba978f835 100644
--- a/drivers/media/pci/saa7164/saa7164-types.h
+++ b/drivers/media/pci/saa7164/saa7164-types.h
@@ -78,9 +78,9 @@ enum tmBusType {
78struct tmComResBusInfo { 78struct tmComResBusInfo {
79 enum tmBusType Type; 79 enum tmBusType Type;
80 u16 m_wMaxReqSize; 80 u16 m_wMaxReqSize;
81 u8 *m_pdwSetRing; 81 u8 __iomem *m_pdwSetRing;
82 u32 m_dwSizeSetRing; 82 u32 m_dwSizeSetRing;
83 u8 *m_pdwGetRing; 83 u8 __iomem *m_pdwGetRing;
84 u32 m_dwSizeGetRing; 84 u32 m_dwSizeGetRing;
85 u32 m_dwSetWritePos; 85 u32 m_dwSetWritePos;
86 u32 m_dwSetReadPos; 86 u32 m_dwSetReadPos;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 8b29e8990301..cd1a07ce27cb 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -313,13 +313,13 @@ struct saa7164_buffer {
313 313
314 /* A block of page align PCI memory */ 314 /* A block of page align PCI memory */
315 u32 pci_size; /* PCI allocation size in bytes */ 315 u32 pci_size; /* PCI allocation size in bytes */
316 u64 __iomem *cpu; /* Virtual address */ 316 u64 *cpu; /* Virtual address */
317 dma_addr_t dma; /* Physical address */ 317 dma_addr_t dma; /* Physical address */
318 u32 crc; /* Checksum for the entire buffer data */ 318 u32 crc; /* Checksum for the entire buffer data */
319 319
320 /* A page table that splits the block into a number of entries */ 320 /* A page table that splits the block into a number of entries */
321 u32 pt_size; /* PCI allocation size in bytes */ 321 u32 pt_size; /* PCI allocation size in bytes */
322 u64 __iomem *pt_cpu; /* Virtual address */ 322 u64 *pt_cpu; /* Virtual address */
323 dma_addr_t pt_dma; /* Physical address */ 323 dma_addr_t pt_dma; /* Physical address */
324 324
325 /* Encoder fops */ 325 /* Encoder fops */
diff --git a/drivers/media/pci/smipcie/Kconfig b/drivers/media/pci/smipcie/Kconfig
new file mode 100644
index 000000000000..c8de53f5ea28
--- /dev/null
+++ b/drivers/media/pci/smipcie/Kconfig
@@ -0,0 +1,17 @@
1config DVB_SMIPCIE
2 tristate "SMI PCIe DVBSky cards"
3 depends on DVB_CORE && PCI && I2C
4 select I2C_ALGOBIT
5 select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
6 select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
7 select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
8 select MEDIA_TUNER_M88RS6000T if MEDIA_SUBDRV_AUTOSELECT
9 select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
10 help
11 Support for cards with SMI PCIe bridge:
12 - DVBSky S950 V3
13 - DVBSky S952 V3
14 - DVBSky T9580 V3
15
16 Say Y or M if you own such a device and want to use it.
17 If unsure say N.
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
new file mode 100644
index 000000000000..be55481a6e95
--- /dev/null
+++ b/drivers/media/pci/smipcie/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_DVB_SMIPCIE) += smipcie.o
2
3ccflags-y += -Idrivers/media/tuners
4ccflags-y += -Idrivers/media/dvb-core
5ccflags-y += -Idrivers/media/dvb-frontends
6
diff --git a/drivers/media/pci/smipcie/smipcie.c b/drivers/media/pci/smipcie/smipcie.c
new file mode 100644
index 000000000000..f773350e67b9
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie.c
@@ -0,0 +1,1099 @@
1/*
2 * SMI PCIe driver for DVBSky cards.
3 *
4 * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "smipcie.h"
18#include "m88ds3103.h"
19#include "m88ts2022.h"
20#include "m88rs6000t.h"
21#include "si2168.h"
22#include "si2157.h"
23
24DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
25
26static int smi_hw_init(struct smi_dev *dev)
27{
28 u32 port_mux, port_ctrl, int_stat;
29
30 /* set port mux.*/
31 port_mux = smi_read(MUX_MODE_CTRL);
32 port_mux &= ~(rbPaMSMask);
33 port_mux |= rbPaMSDtvNoGpio;
34 port_mux &= ~(rbPbMSMask);
35 port_mux |= rbPbMSDtvNoGpio;
36 port_mux &= ~(0x0f0000);
37 port_mux |= 0x50000;
38 smi_write(MUX_MODE_CTRL, port_mux);
39
40 /* set DTV register.*/
41 /* Port A */
42 port_ctrl = smi_read(VIDEO_CTRL_STATUS_A);
43 port_ctrl &= ~0x01;
44 smi_write(VIDEO_CTRL_STATUS_A, port_ctrl);
45 port_ctrl = smi_read(MPEG2_CTRL_A);
46 port_ctrl &= ~0x40;
47 port_ctrl |= 0x80;
48 smi_write(MPEG2_CTRL_A, port_ctrl);
49 /* Port B */
50 port_ctrl = smi_read(VIDEO_CTRL_STATUS_B);
51 port_ctrl &= ~0x01;
52 smi_write(VIDEO_CTRL_STATUS_B, port_ctrl);
53 port_ctrl = smi_read(MPEG2_CTRL_B);
54 port_ctrl &= ~0x40;
55 port_ctrl |= 0x80;
56 smi_write(MPEG2_CTRL_B, port_ctrl);
57
58 /* disable and clear interrupt.*/
59 smi_write(MSI_INT_ENA_CLR, ALL_INT);
60 int_stat = smi_read(MSI_INT_STATUS);
61 smi_write(MSI_INT_STATUS_CLR, int_stat);
62
63 /* reset demod.*/
64 smi_clear(PERIPHERAL_CTRL, 0x0303);
65 msleep(50);
66 smi_set(PERIPHERAL_CTRL, 0x0101);
67 return 0;
68}
69
70/* i2c bit bus.*/
71static void smi_i2c_cfg(struct smi_dev *dev, u32 sw_ctl)
72{
73 u32 dwCtrl;
74
75 dwCtrl = smi_read(sw_ctl);
76 dwCtrl &= ~0x18; /* disable output.*/
77 dwCtrl |= 0x21; /* reset and software mode.*/
78 dwCtrl &= ~0xff00;
79 dwCtrl |= 0x6400;
80 smi_write(sw_ctl, dwCtrl);
81 msleep(20);
82 dwCtrl = smi_read(sw_ctl);
83 dwCtrl &= ~0x20;
84 smi_write(sw_ctl, dwCtrl);
85}
86
87static void smi_i2c_setsda(struct smi_dev *dev, int state, u32 sw_ctl)
88{
89 if (state) {
90 /* set as input.*/
91 smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
92 } else {
93 smi_clear(sw_ctl, SW_I2C_MSK_DAT_OUT);
94 /* set as output.*/
95 smi_set(sw_ctl, SW_I2C_MSK_DAT_EN);
96 }
97}
98
99static void smi_i2c_setscl(void *data, int state, u32 sw_ctl)
100{
101 struct smi_dev *dev = data;
102
103 if (state) {
104 /* set as input.*/
105 smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
106 } else {
107 smi_clear(sw_ctl, SW_I2C_MSK_CLK_OUT);
108 /* set as output.*/
109 smi_set(sw_ctl, SW_I2C_MSK_CLK_EN);
110 }
111}
112
113static int smi_i2c_getsda(void *data, u32 sw_ctl)
114{
115 struct smi_dev *dev = data;
116 /* set as input.*/
117 smi_clear(sw_ctl, SW_I2C_MSK_DAT_EN);
118 udelay(1);
119 return (smi_read(sw_ctl) & SW_I2C_MSK_DAT_IN) ? 1 : 0;
120}
121
122static int smi_i2c_getscl(void *data, u32 sw_ctl)
123{
124 struct smi_dev *dev = data;
125 /* set as input.*/
126 smi_clear(sw_ctl, SW_I2C_MSK_CLK_EN);
127 udelay(1);
128 return (smi_read(sw_ctl) & SW_I2C_MSK_CLK_IN) ? 1 : 0;
129}
130/* i2c 0.*/
131static void smi_i2c0_setsda(void *data, int state)
132{
133 struct smi_dev *dev = data;
134
135 smi_i2c_setsda(dev, state, I2C_A_SW_CTL);
136}
137
138static void smi_i2c0_setscl(void *data, int state)
139{
140 struct smi_dev *dev = data;
141
142 smi_i2c_setscl(dev, state, I2C_A_SW_CTL);
143}
144
145static int smi_i2c0_getsda(void *data)
146{
147 struct smi_dev *dev = data;
148
149 return smi_i2c_getsda(dev, I2C_A_SW_CTL);
150}
151
152static int smi_i2c0_getscl(void *data)
153{
154 struct smi_dev *dev = data;
155
156 return smi_i2c_getscl(dev, I2C_A_SW_CTL);
157}
158/* i2c 1.*/
159static void smi_i2c1_setsda(void *data, int state)
160{
161 struct smi_dev *dev = data;
162
163 smi_i2c_setsda(dev, state, I2C_B_SW_CTL);
164}
165
166static void smi_i2c1_setscl(void *data, int state)
167{
168 struct smi_dev *dev = data;
169
170 smi_i2c_setscl(dev, state, I2C_B_SW_CTL);
171}
172
173static int smi_i2c1_getsda(void *data)
174{
175 struct smi_dev *dev = data;
176
177 return smi_i2c_getsda(dev, I2C_B_SW_CTL);
178}
179
180static int smi_i2c1_getscl(void *data)
181{
182 struct smi_dev *dev = data;
183
184 return smi_i2c_getscl(dev, I2C_B_SW_CTL);
185}
186
187static int smi_i2c_init(struct smi_dev *dev)
188{
189 int ret;
190
191 /* i2c bus 0 */
192 smi_i2c_cfg(dev, I2C_A_SW_CTL);
193 i2c_set_adapdata(&dev->i2c_bus[0], dev);
194 strcpy(dev->i2c_bus[0].name, "SMI-I2C0");
195 dev->i2c_bus[0].owner = THIS_MODULE;
196 dev->i2c_bus[0].dev.parent = &dev->pci_dev->dev;
197 dev->i2c_bus[0].algo_data = &dev->i2c_bit[0];
198 dev->i2c_bit[0].data = dev;
199 dev->i2c_bit[0].setsda = smi_i2c0_setsda;
200 dev->i2c_bit[0].setscl = smi_i2c0_setscl;
201 dev->i2c_bit[0].getsda = smi_i2c0_getsda;
202 dev->i2c_bit[0].getscl = smi_i2c0_getscl;
203 dev->i2c_bit[0].udelay = 12;
204 dev->i2c_bit[0].timeout = 10;
205 /* Raise SCL and SDA */
206 smi_i2c0_setsda(dev, 1);
207 smi_i2c0_setscl(dev, 1);
208
209 ret = i2c_bit_add_bus(&dev->i2c_bus[0]);
210 if (ret < 0)
211 return ret;
212
213 /* i2c bus 1 */
214 smi_i2c_cfg(dev, I2C_B_SW_CTL);
215 i2c_set_adapdata(&dev->i2c_bus[1], dev);
216 strcpy(dev->i2c_bus[1].name, "SMI-I2C1");
217 dev->i2c_bus[1].owner = THIS_MODULE;
218 dev->i2c_bus[1].dev.parent = &dev->pci_dev->dev;
219 dev->i2c_bus[1].algo_data = &dev->i2c_bit[1];
220 dev->i2c_bit[1].data = dev;
221 dev->i2c_bit[1].setsda = smi_i2c1_setsda;
222 dev->i2c_bit[1].setscl = smi_i2c1_setscl;
223 dev->i2c_bit[1].getsda = smi_i2c1_getsda;
224 dev->i2c_bit[1].getscl = smi_i2c1_getscl;
225 dev->i2c_bit[1].udelay = 12;
226 dev->i2c_bit[1].timeout = 10;
227 /* Raise SCL and SDA */
228 smi_i2c1_setsda(dev, 1);
229 smi_i2c1_setscl(dev, 1);
230
231 ret = i2c_bit_add_bus(&dev->i2c_bus[1]);
232 if (ret < 0)
233 i2c_del_adapter(&dev->i2c_bus[0]);
234
235 return ret;
236}
237
238static void smi_i2c_exit(struct smi_dev *dev)
239{
240 i2c_del_adapter(&dev->i2c_bus[0]);
241 i2c_del_adapter(&dev->i2c_bus[1]);
242}
243
244static int smi_read_eeprom(struct i2c_adapter *i2c, u16 reg, u8 *data, u16 size)
245{
246 int ret;
247 u8 b0[2] = { (reg >> 8) & 0xff, reg & 0xff };
248
249 struct i2c_msg msg[] = {
250 { .addr = 0x50, .flags = 0,
251 .buf = b0, .len = 2 },
252 { .addr = 0x50, .flags = I2C_M_RD,
253 .buf = data, .len = size }
254 };
255
256 ret = i2c_transfer(i2c, msg, 2);
257
258 if (ret != 2) {
259 dev_err(&i2c->dev, "%s: reg=0x%x (error=%d)\n",
260 __func__, reg, ret);
261 return ret;
262 }
263 return ret;
264}
265
266/* ts port interrupt operations */
267static void smi_port_disableInterrupt(struct smi_port *port)
268{
269 struct smi_dev *dev = port->dev;
270
271 smi_write(MSI_INT_ENA_CLR,
272 (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
273}
274
275static void smi_port_enableInterrupt(struct smi_port *port)
276{
277 struct smi_dev *dev = port->dev;
278
279 smi_write(MSI_INT_ENA_SET,
280 (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
281}
282
283static void smi_port_clearInterrupt(struct smi_port *port)
284{
285 struct smi_dev *dev = port->dev;
286
287 smi_write(MSI_INT_STATUS_CLR,
288 (port->_dmaInterruptCH0 | port->_dmaInterruptCH1));
289}
290
291/* tasklet handler: DMA data to dmx.*/
292static void smi_dma_xfer(unsigned long data)
293{
294 struct smi_port *port = (struct smi_port *) data;
295 struct smi_dev *dev = port->dev;
296 u32 intr_status, finishedData, dmaManagement;
297 u8 dmaChan0State, dmaChan1State;
298
299 intr_status = port->_int_status;
300 dmaManagement = smi_read(port->DMA_MANAGEMENT);
301 dmaChan0State = (u8)((dmaManagement & 0x00000030) >> 4);
302 dmaChan1State = (u8)((dmaManagement & 0x00300000) >> 20);
303
304 /* CH-0 DMA interrupt.*/
305 if ((intr_status & port->_dmaInterruptCH0) && (dmaChan0State == 0x01)) {
306 dev_dbg(&dev->pci_dev->dev,
307 "Port[%d]-DMA CH0 engine complete successful !\n",
308 port->idx);
309 finishedData = smi_read(port->DMA_CHAN0_TRANS_STATE);
310 finishedData &= 0x003FFFFF;
311 /* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
312 * indicate dma total transfer length and
313 * zero of [21:0] indicate dma total transfer length
314 * equal to 0x400000 (4MB)*/
315 if (finishedData == 0)
316 finishedData = 0x00400000;
317 if (finishedData != SMI_TS_DMA_BUF_SIZE) {
318 dev_dbg(&dev->pci_dev->dev,
319 "DMA CH0 engine complete length mismatched, finish data=%d !\n",
320 finishedData);
321 }
322 dvb_dmx_swfilter_packets(&port->demux,
323 port->cpu_addr[0], (finishedData / 188));
324 /*dvb_dmx_swfilter(&port->demux,
325 port->cpu_addr[0], finishedData);*/
326 }
327 /* CH-1 DMA interrupt.*/
328 if ((intr_status & port->_dmaInterruptCH1) && (dmaChan1State == 0x01)) {
329 dev_dbg(&dev->pci_dev->dev,
330 "Port[%d]-DMA CH1 engine complete successful !\n",
331 port->idx);
332 finishedData = smi_read(port->DMA_CHAN1_TRANS_STATE);
333 finishedData &= 0x003FFFFF;
334 /* value of DMA_PORT0_CHAN0_TRANS_STATE register [21:0]
335 * indicate dma total transfer length and
336 * zero of [21:0] indicate dma total transfer length
337 * equal to 0x400000 (4MB)*/
338 if (finishedData == 0)
339 finishedData = 0x00400000;
340 if (finishedData != SMI_TS_DMA_BUF_SIZE) {
341 dev_dbg(&dev->pci_dev->dev,
342 "DMA CH1 engine complete length mismatched, finish data=%d !\n",
343 finishedData);
344 }
345 dvb_dmx_swfilter_packets(&port->demux,
346 port->cpu_addr[1], (finishedData / 188));
347 /*dvb_dmx_swfilter(&port->demux,
348 port->cpu_addr[1], finishedData);*/
349 }
350 /* restart DMA.*/
351 if (intr_status & port->_dmaInterruptCH0)
352 dmaManagement |= 0x00000002;
353 if (intr_status & port->_dmaInterruptCH1)
354 dmaManagement |= 0x00020000;
355 smi_write(port->DMA_MANAGEMENT, dmaManagement);
356 /* Re-enable interrupts */
357 smi_port_enableInterrupt(port);
358}
359
360static void smi_port_dma_free(struct smi_port *port)
361{
362 if (port->cpu_addr[0]) {
363 pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
364 port->cpu_addr[0], port->dma_addr[0]);
365 port->cpu_addr[0] = NULL;
366 }
367 if (port->cpu_addr[1]) {
368 pci_free_consistent(port->dev->pci_dev, SMI_TS_DMA_BUF_SIZE,
369 port->cpu_addr[1], port->dma_addr[1]);
370 port->cpu_addr[1] = NULL;
371 }
372}
373
374static int smi_port_init(struct smi_port *port, int dmaChanUsed)
375{
376 dev_dbg(&port->dev->pci_dev->dev,
377 "%s, port %d, dmaused %d\n", __func__, port->idx, dmaChanUsed);
378 port->enable = 0;
379 if (port->idx == 0) {
380 /* Port A */
381 port->_dmaInterruptCH0 = dmaChanUsed & 0x01;
382 port->_dmaInterruptCH1 = dmaChanUsed & 0x02;
383
384 port->DMA_CHAN0_ADDR_LOW = DMA_PORTA_CHAN0_ADDR_LOW;
385 port->DMA_CHAN0_ADDR_HI = DMA_PORTA_CHAN0_ADDR_HI;
386 port->DMA_CHAN0_TRANS_STATE = DMA_PORTA_CHAN0_TRANS_STATE;
387 port->DMA_CHAN0_CONTROL = DMA_PORTA_CHAN0_CONTROL;
388 port->DMA_CHAN1_ADDR_LOW = DMA_PORTA_CHAN1_ADDR_LOW;
389 port->DMA_CHAN1_ADDR_HI = DMA_PORTA_CHAN1_ADDR_HI;
390 port->DMA_CHAN1_TRANS_STATE = DMA_PORTA_CHAN1_TRANS_STATE;
391 port->DMA_CHAN1_CONTROL = DMA_PORTA_CHAN1_CONTROL;
392 port->DMA_MANAGEMENT = DMA_PORTA_MANAGEMENT;
393 } else {
394 /* Port B */
395 port->_dmaInterruptCH0 = (dmaChanUsed << 2) & 0x04;
396 port->_dmaInterruptCH1 = (dmaChanUsed << 2) & 0x08;
397
398 port->DMA_CHAN0_ADDR_LOW = DMA_PORTB_CHAN0_ADDR_LOW;
399 port->DMA_CHAN0_ADDR_HI = DMA_PORTB_CHAN0_ADDR_HI;
400 port->DMA_CHAN0_TRANS_STATE = DMA_PORTB_CHAN0_TRANS_STATE;
401 port->DMA_CHAN0_CONTROL = DMA_PORTB_CHAN0_CONTROL;
402 port->DMA_CHAN1_ADDR_LOW = DMA_PORTB_CHAN1_ADDR_LOW;
403 port->DMA_CHAN1_ADDR_HI = DMA_PORTB_CHAN1_ADDR_HI;
404 port->DMA_CHAN1_TRANS_STATE = DMA_PORTB_CHAN1_TRANS_STATE;
405 port->DMA_CHAN1_CONTROL = DMA_PORTB_CHAN1_CONTROL;
406 port->DMA_MANAGEMENT = DMA_PORTB_MANAGEMENT;
407 }
408
409 if (port->_dmaInterruptCH0) {
410 port->cpu_addr[0] = pci_alloc_consistent(port->dev->pci_dev,
411 SMI_TS_DMA_BUF_SIZE,
412 &port->dma_addr[0]);
413 if (!port->cpu_addr[0]) {
414 dev_err(&port->dev->pci_dev->dev,
415 "Port[%d] DMA CH0 memory allocation failed!\n",
416 port->idx);
417 goto err;
418 }
419 }
420
421 if (port->_dmaInterruptCH1) {
422 port->cpu_addr[1] = pci_alloc_consistent(port->dev->pci_dev,
423 SMI_TS_DMA_BUF_SIZE,
424 &port->dma_addr[1]);
425 if (!port->cpu_addr[1]) {
426 dev_err(&port->dev->pci_dev->dev,
427 "Port[%d] DMA CH1 memory allocation failed!\n",
428 port->idx);
429 goto err;
430 }
431 }
432
433 smi_port_disableInterrupt(port);
434 tasklet_init(&port->tasklet, smi_dma_xfer, (unsigned long)port);
435 tasklet_disable(&port->tasklet);
436 port->enable = 1;
437 return 0;
438err:
439 smi_port_dma_free(port);
440 return -ENOMEM;
441}
442
443static void smi_port_exit(struct smi_port *port)
444{
445 smi_port_disableInterrupt(port);
446 tasklet_kill(&port->tasklet);
447 smi_port_dma_free(port);
448 port->enable = 0;
449}
450
451static void smi_port_irq(struct smi_port *port, u32 int_status)
452{
453 u32 port_req_irq = port->_dmaInterruptCH0 | port->_dmaInterruptCH1;
454
455 if (int_status & port_req_irq) {
456 smi_port_disableInterrupt(port);
457 port->_int_status = int_status;
458 smi_port_clearInterrupt(port);
459 tasklet_schedule(&port->tasklet);
460 }
461}
462
463static irqreturn_t smi_irq_handler(int irq, void *dev_id)
464{
465 struct smi_dev *dev = dev_id;
466 struct smi_port *port0 = &dev->ts_port[0];
467 struct smi_port *port1 = &dev->ts_port[1];
468
469 u32 intr_status = smi_read(MSI_INT_STATUS);
470
471 /* ts0 interrupt.*/
472 if (dev->info->ts_0)
473 smi_port_irq(port0, intr_status);
474
475 /* ts1 interrupt.*/
476 if (dev->info->ts_1)
477 smi_port_irq(port1, intr_status);
478
479 return IRQ_HANDLED;
480}
481
482static struct i2c_client *smi_add_i2c_client(struct i2c_adapter *adapter,
483 struct i2c_board_info *info)
484{
485 struct i2c_client *client;
486
487 request_module(info->type);
488 client = i2c_new_device(adapter, info);
489 if (client == NULL || client->dev.driver == NULL)
490 goto err_add_i2c_client;
491
492 if (!try_module_get(client->dev.driver->owner)) {
493 i2c_unregister_device(client);
494 goto err_add_i2c_client;
495 }
496 return client;
497
498err_add_i2c_client:
499 client = NULL;
500 return client;
501}
502
503static void smi_del_i2c_client(struct i2c_client *client)
504{
505 module_put(client->dev.driver->owner);
506 i2c_unregister_device(client);
507}
508
509static const struct m88ds3103_config smi_dvbsky_m88ds3103_cfg = {
510 .i2c_addr = 0x68,
511 .clock = 27000000,
512 .i2c_wr_max = 33,
513 .clock_out = 0,
514 .ts_mode = M88DS3103_TS_PARALLEL,
515 .ts_clk = 16000,
516 .ts_clk_pol = 1,
517 .agc = 0x99,
518 .lnb_hv_pol = 0,
519 .lnb_en_pol = 1,
520};
521
522static int smi_dvbsky_m88ds3103_fe_attach(struct smi_port *port)
523{
524 int ret = 0;
525 struct smi_dev *dev = port->dev;
526 struct i2c_adapter *i2c;
527 /* tuner I2C module */
528 struct i2c_adapter *tuner_i2c_adapter;
529 struct i2c_client *tuner_client;
530 struct i2c_board_info tuner_info;
531 struct m88ts2022_config m88ts2022_config = {
532 .clock = 27000000,
533 };
534 memset(&tuner_info, 0, sizeof(struct i2c_board_info));
535 i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
536
537 /* attach demod */
538 port->fe = dvb_attach(m88ds3103_attach,
539 &smi_dvbsky_m88ds3103_cfg, i2c, &tuner_i2c_adapter);
540 if (!port->fe) {
541 ret = -ENODEV;
542 return ret;
543 }
544 /* attach tuner */
545 m88ts2022_config.fe = port->fe;
546 strlcpy(tuner_info.type, "m88ts2022", I2C_NAME_SIZE);
547 tuner_info.addr = 0x60;
548 tuner_info.platform_data = &m88ts2022_config;
549 tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
550 if (!tuner_client) {
551 ret = -ENODEV;
552 goto err_tuner_i2c_device;
553 }
554
555 /* delegate signal strength measurement to tuner */
556 port->fe->ops.read_signal_strength =
557 port->fe->ops.tuner_ops.get_rf_strength;
558
559 port->i2c_client_tuner = tuner_client;
560 return ret;
561
562err_tuner_i2c_device:
563 dvb_frontend_detach(port->fe);
564 return ret;
565}
566
567static const struct m88ds3103_config smi_dvbsky_m88rs6000_cfg = {
568 .i2c_addr = 0x69,
569 .clock = 27000000,
570 .i2c_wr_max = 33,
571 .ts_mode = M88DS3103_TS_PARALLEL,
572 .ts_clk = 16000,
573 .ts_clk_pol = 1,
574 .agc = 0x99,
575 .lnb_hv_pol = 0,
576 .lnb_en_pol = 1,
577};
578
579static int smi_dvbsky_m88rs6000_fe_attach(struct smi_port *port)
580{
581 int ret = 0;
582 struct smi_dev *dev = port->dev;
583 struct i2c_adapter *i2c;
584 /* tuner I2C module */
585 struct i2c_adapter *tuner_i2c_adapter;
586 struct i2c_client *tuner_client;
587 struct i2c_board_info tuner_info;
588 struct m88rs6000t_config m88rs6000t_config;
589
590 memset(&tuner_info, 0, sizeof(struct i2c_board_info));
591 i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
592
593 /* attach demod */
594 port->fe = dvb_attach(m88ds3103_attach,
595 &smi_dvbsky_m88rs6000_cfg, i2c, &tuner_i2c_adapter);
596 if (!port->fe) {
597 ret = -ENODEV;
598 return ret;
599 }
600 /* attach tuner */
601 m88rs6000t_config.fe = port->fe;
602 strlcpy(tuner_info.type, "m88rs6000t", I2C_NAME_SIZE);
603 tuner_info.addr = 0x21;
604 tuner_info.platform_data = &m88rs6000t_config;
605 tuner_client = smi_add_i2c_client(tuner_i2c_adapter, &tuner_info);
606 if (!tuner_client) {
607 ret = -ENODEV;
608 goto err_tuner_i2c_device;
609 }
610
611 /* delegate signal strength measurement to tuner */
612 port->fe->ops.read_signal_strength =
613 port->fe->ops.tuner_ops.get_rf_strength;
614
615 port->i2c_client_tuner = tuner_client;
616 return ret;
617
618err_tuner_i2c_device:
619 dvb_frontend_detach(port->fe);
620 return ret;
621}
622
623static int smi_dvbsky_sit2_fe_attach(struct smi_port *port)
624{
625 int ret = 0;
626 struct smi_dev *dev = port->dev;
627 struct i2c_adapter *i2c;
628 struct i2c_adapter *tuner_i2c_adapter;
629 struct i2c_client *client_tuner, *client_demod;
630 struct i2c_board_info client_info;
631 struct si2168_config si2168_config;
632 struct si2157_config si2157_config;
633
634 /* select i2c bus */
635 i2c = (port->idx == 0) ? &dev->i2c_bus[0] : &dev->i2c_bus[1];
636
637 /* attach demod */
638 memset(&si2168_config, 0, sizeof(si2168_config));
639 si2168_config.i2c_adapter = &tuner_i2c_adapter;
640 si2168_config.fe = &port->fe;
641 si2168_config.ts_mode = SI2168_TS_PARALLEL;
642
643 memset(&client_info, 0, sizeof(struct i2c_board_info));
644 strlcpy(client_info.type, "si2168", I2C_NAME_SIZE);
645 client_info.addr = 0x64;
646 client_info.platform_data = &si2168_config;
647
648 client_demod = smi_add_i2c_client(i2c, &client_info);
649 if (!client_demod) {
650 ret = -ENODEV;
651 return ret;
652 }
653 port->i2c_client_demod = client_demod;
654
655 /* attach tuner */
656 memset(&si2157_config, 0, sizeof(si2157_config));
657 si2157_config.fe = port->fe;
658
659 memset(&client_info, 0, sizeof(struct i2c_board_info));
660 strlcpy(client_info.type, "si2157", I2C_NAME_SIZE);
661 client_info.addr = 0x60;
662 client_info.platform_data = &si2157_config;
663
664 client_tuner = smi_add_i2c_client(tuner_i2c_adapter, &client_info);
665 if (!client_tuner) {
666 smi_del_i2c_client(port->i2c_client_demod);
667 port->i2c_client_demod = NULL;
668 ret = -ENODEV;
669 return ret;
670 }
671 port->i2c_client_tuner = client_tuner;
672 return ret;
673}
674
675static int smi_fe_init(struct smi_port *port)
676{
677 int ret = 0;
678 struct smi_dev *dev = port->dev;
679 struct dvb_adapter *adap = &port->dvb_adapter;
680 u8 mac_ee[16];
681
682 dev_dbg(&port->dev->pci_dev->dev,
683 "%s: port %d, fe_type = %d\n",
684 __func__, port->idx, port->fe_type);
685 switch (port->fe_type) {
686 case DVBSKY_FE_M88DS3103:
687 ret = smi_dvbsky_m88ds3103_fe_attach(port);
688 break;
689 case DVBSKY_FE_M88RS6000:
690 ret = smi_dvbsky_m88rs6000_fe_attach(port);
691 break;
692 case DVBSKY_FE_SIT2:
693 ret = smi_dvbsky_sit2_fe_attach(port);
694 break;
695 }
696 if (ret < 0)
697 return ret;
698
699 /* register dvb frontend */
700 ret = dvb_register_frontend(adap, port->fe);
701 if (ret < 0) {
702 if (port->i2c_client_tuner)
703 smi_del_i2c_client(port->i2c_client_tuner);
704 if (port->i2c_client_demod)
705 smi_del_i2c_client(port->i2c_client_demod);
706 dvb_frontend_detach(port->fe);
707 return ret;
708 }
709 /* init MAC.*/
710 ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
711 dev_info(&port->dev->pci_dev->dev,
712 "DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
713 memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
714 return ret;
715}
716
717static void smi_fe_exit(struct smi_port *port)
718{
719 dvb_unregister_frontend(port->fe);
720 /* remove I2C demod and tuner */
721 if (port->i2c_client_tuner)
722 smi_del_i2c_client(port->i2c_client_tuner);
723 if (port->i2c_client_demod)
724 smi_del_i2c_client(port->i2c_client_demod);
725 dvb_frontend_detach(port->fe);
726}
727
728static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
729 int (*start_feed)(struct dvb_demux_feed *),
730 int (*stop_feed)(struct dvb_demux_feed *),
731 void *priv)
732{
733 dvbdemux->priv = priv;
734
735 dvbdemux->filternum = 256;
736 dvbdemux->feednum = 256;
737 dvbdemux->start_feed = start_feed;
738 dvbdemux->stop_feed = stop_feed;
739 dvbdemux->write_to_decoder = NULL;
740 dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
741 DMX_SECTION_FILTERING |
742 DMX_MEMORY_BASED_FILTERING);
743 return dvb_dmx_init(dvbdemux);
744}
745
746static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
747 struct dvb_demux *dvbdemux,
748 struct dmx_frontend *hw_frontend,
749 struct dmx_frontend *mem_frontend,
750 struct dvb_adapter *dvb_adapter)
751{
752 int ret;
753
754 dmxdev->filternum = 256;
755 dmxdev->demux = &dvbdemux->dmx;
756 dmxdev->capabilities = 0;
757 ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
758 if (ret < 0)
759 return ret;
760
761 hw_frontend->source = DMX_FRONTEND_0;
762 dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
763 mem_frontend->source = DMX_MEMORY_FE;
764 dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
765 return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
766}
767
768static u32 smi_config_DMA(struct smi_port *port)
769{
770 struct smi_dev *dev = port->dev;
771 u32 totalLength = 0, dmaMemPtrLow, dmaMemPtrHi, dmaCtlReg;
772 u8 chanLatencyTimer = 0, dmaChanEnable = 1, dmaTransStart = 1;
773 u32 dmaManagement = 0, tlpTransUnit = DMA_TRANS_UNIT_188;
774 u8 tlpTc = 0, tlpTd = 1, tlpEp = 0, tlpAttr = 0;
775 u64 mem;
776
777 dmaManagement = smi_read(port->DMA_MANAGEMENT);
778 /* Setup Channel-0 */
779 if (port->_dmaInterruptCH0) {
780 totalLength = SMI_TS_DMA_BUF_SIZE;
781 mem = port->dma_addr[0];
782 dmaMemPtrLow = mem & 0xffffffff;
783 dmaMemPtrHi = mem >> 32;
784 dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
785 | (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
786 dmaManagement |= dmaChanEnable | (dmaTransStart << 1)
787 | (chanLatencyTimer << 8);
788 /* write DMA register, start DMA engine */
789 smi_write(port->DMA_CHAN0_ADDR_LOW, dmaMemPtrLow);
790 smi_write(port->DMA_CHAN0_ADDR_HI, dmaMemPtrHi);
791 smi_write(port->DMA_CHAN0_CONTROL, dmaCtlReg);
792 }
793 /* Setup Channel-1 */
794 if (port->_dmaInterruptCH1) {
795 totalLength = SMI_TS_DMA_BUF_SIZE;
796 mem = port->dma_addr[1];
797 dmaMemPtrLow = mem & 0xffffffff;
798 dmaMemPtrHi = mem >> 32;
799 dmaCtlReg = (totalLength) | (tlpTransUnit << 22) | (tlpTc << 25)
800 | (tlpTd << 28) | (tlpEp << 29) | (tlpAttr << 30);
801 dmaManagement |= (dmaChanEnable << 16) | (dmaTransStart << 17)
802 | (chanLatencyTimer << 24);
803 /* write DMA register, start DMA engine */
804 smi_write(port->DMA_CHAN1_ADDR_LOW, dmaMemPtrLow);
805 smi_write(port->DMA_CHAN1_ADDR_HI, dmaMemPtrHi);
806 smi_write(port->DMA_CHAN1_CONTROL, dmaCtlReg);
807 }
808 return dmaManagement;
809}
810
811static int smi_start_feed(struct dvb_demux_feed *dvbdmxfeed)
812{
813 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
814 struct smi_port *port = dvbdmx->priv;
815 struct smi_dev *dev = port->dev;
816 u32 dmaManagement;
817
818 if (port->users++ == 0) {
819 dmaManagement = smi_config_DMA(port);
820 smi_port_clearInterrupt(port);
821 smi_port_enableInterrupt(port);
822 smi_write(port->DMA_MANAGEMENT, dmaManagement);
823 tasklet_enable(&port->tasklet);
824 }
825 return port->users;
826}
827
828static int smi_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
829{
830 struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
831 struct smi_port *port = dvbdmx->priv;
832 struct smi_dev *dev = port->dev;
833
834 if (--port->users)
835 return port->users;
836
837 tasklet_disable(&port->tasklet);
838 smi_port_disableInterrupt(port);
839 smi_clear(port->DMA_MANAGEMENT, 0x30003);
840 return 0;
841}
842
843static int smi_dvb_init(struct smi_port *port)
844{
845 int ret;
846 struct dvb_adapter *adap = &port->dvb_adapter;
847 struct dvb_demux *dvbdemux = &port->demux;
848
849 dev_dbg(&port->dev->pci_dev->dev,
850 "%s, port %d\n", __func__, port->idx);
851
852 ret = dvb_register_adapter(adap, "SMI_DVB", THIS_MODULE,
853 &port->dev->pci_dev->dev,
854 adapter_nr);
855 if (ret < 0) {
856 dev_err(&port->dev->pci_dev->dev, "Fail to register DVB adapter.\n");
857 return ret;
858 }
859 ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
860 smi_start_feed,
861 smi_stop_feed, port);
862 if (ret < 0)
863 goto err_del_dvb_register_adapter;
864
865 ret = my_dvb_dmxdev_ts_card_init(&port->dmxdev, &port->demux,
866 &port->hw_frontend,
867 &port->mem_frontend, adap);
868 if (ret < 0)
869 goto err_del_dvb_dmx;
870
871 ret = dvb_net_init(adap, &port->dvbnet, port->dmxdev.demux);
872 if (ret < 0)
873 goto err_del_dvb_dmxdev;
874 return 0;
875err_del_dvb_dmxdev:
876 dvbdemux->dmx.close(&dvbdemux->dmx);
877 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
878 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
879 dvb_dmxdev_release(&port->dmxdev);
880err_del_dvb_dmx:
881 dvb_dmx_release(&port->demux);
882err_del_dvb_register_adapter:
883 dvb_unregister_adapter(&port->dvb_adapter);
884 return ret;
885}
886
887static void smi_dvb_exit(struct smi_port *port)
888{
889 struct dvb_demux *dvbdemux = &port->demux;
890
891 dvb_net_release(&port->dvbnet);
892
893 dvbdemux->dmx.close(&dvbdemux->dmx);
894 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->hw_frontend);
895 dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &port->mem_frontend);
896 dvb_dmxdev_release(&port->dmxdev);
897 dvb_dmx_release(&port->demux);
898
899 dvb_unregister_adapter(&port->dvb_adapter);
900}
901
902static int smi_port_attach(struct smi_dev *dev,
903 struct smi_port *port, int index)
904{
905 int ret, dmachs;
906
907 port->dev = dev;
908 port->idx = index;
909 port->fe_type = (index == 0) ? dev->info->fe_0 : dev->info->fe_1;
910 dmachs = (index == 0) ? dev->info->ts_0 : dev->info->ts_1;
911 /* port init.*/
912 ret = smi_port_init(port, dmachs);
913 if (ret < 0)
914 return ret;
915 /* dvb init.*/
916 ret = smi_dvb_init(port);
917 if (ret < 0)
918 goto err_del_port_init;
919 /* fe init.*/
920 ret = smi_fe_init(port);
921 if (ret < 0)
922 goto err_del_dvb_init;
923 return 0;
924err_del_dvb_init:
925 smi_dvb_exit(port);
926err_del_port_init:
927 smi_port_exit(port);
928 return ret;
929}
930
931static void smi_port_detach(struct smi_port *port)
932{
933 smi_fe_exit(port);
934 smi_dvb_exit(port);
935 smi_port_exit(port);
936}
937
938static int smi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
939{
940 struct smi_dev *dev;
941 int ret = -ENOMEM;
942
943 if (pci_enable_device(pdev) < 0)
944 return -ENODEV;
945
946 dev = kzalloc(sizeof(struct smi_dev), GFP_KERNEL);
947 if (!dev) {
948 ret = -ENOMEM;
949 goto err_pci_disable_device;
950 }
951
952 dev->pci_dev = pdev;
953 pci_set_drvdata(pdev, dev);
954 dev->info = (struct smi_cfg_info *) id->driver_data;
955 dev_info(&dev->pci_dev->dev,
956 "card detected: %s\n", dev->info->name);
957
958 dev->nr = dev->info->type;
959 dev->lmmio = ioremap(pci_resource_start(dev->pci_dev, 0),
960 pci_resource_len(dev->pci_dev, 0));
961 if (!dev->lmmio) {
962 ret = -ENOMEM;
963 goto err_kfree;
964 }
965
966 /* should we set to 32bit DMA? */
967 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
968 if (ret < 0)
969 goto err_pci_iounmap;
970
971 pci_set_master(pdev);
972
973 ret = smi_hw_init(dev);
974 if (ret < 0)
975 goto err_pci_iounmap;
976
977 ret = smi_i2c_init(dev);
978 if (ret < 0)
979 goto err_pci_iounmap;
980
981 if (dev->info->ts_0) {
982 ret = smi_port_attach(dev, &dev->ts_port[0], 0);
983 if (ret < 0)
984 goto err_del_i2c_adaptor;
985 }
986
987 if (dev->info->ts_1) {
988 ret = smi_port_attach(dev, &dev->ts_port[1], 1);
989 if (ret < 0)
990 goto err_del_port0_attach;
991 }
992
993#ifdef CONFIG_PCI_MSI /* to do msi interrupt.???*/
994 if (pci_msi_enabled())
995 ret = pci_enable_msi(dev->pci_dev);
996 if (ret)
997 dev_info(&dev->pci_dev->dev, "MSI not available.\n");
998#endif
999
1000 ret = request_irq(dev->pci_dev->irq, smi_irq_handler,
1001 IRQF_SHARED, "SMI_PCIE", dev);
1002 if (ret < 0)
1003 goto err_del_port1_attach;
1004
1005 return 0;
1006
1007err_del_port1_attach:
1008 if (dev->info->ts_1)
1009 smi_port_detach(&dev->ts_port[1]);
1010err_del_port0_attach:
1011 if (dev->info->ts_0)
1012 smi_port_detach(&dev->ts_port[0]);
1013err_del_i2c_adaptor:
1014 smi_i2c_exit(dev);
1015err_pci_iounmap:
1016 iounmap(dev->lmmio);
1017err_kfree:
1018 pci_set_drvdata(pdev, NULL);
1019 kfree(dev);
1020err_pci_disable_device:
1021 pci_disable_device(pdev);
1022 return ret;
1023}
1024
1025static void smi_remove(struct pci_dev *pdev)
1026{
1027 struct smi_dev *dev = pci_get_drvdata(pdev);
1028
1029 smi_write(MSI_INT_ENA_CLR, ALL_INT);
1030 free_irq(dev->pci_dev->irq, dev);
1031#ifdef CONFIG_PCI_MSI
1032 pci_disable_msi(dev->pci_dev);
1033#endif
1034 if (dev->info->ts_1)
1035 smi_port_detach(&dev->ts_port[1]);
1036 if (dev->info->ts_0)
1037 smi_port_detach(&dev->ts_port[0]);
1038
1039 smi_i2c_exit(dev);
1040 iounmap(dev->lmmio);
1041 pci_set_drvdata(pdev, NULL);
1042 pci_disable_device(pdev);
1043 kfree(dev);
1044}
1045
1046/* DVBSky cards */
1047static struct smi_cfg_info dvbsky_s950_cfg = {
1048 .type = SMI_DVBSKY_S950,
1049 .name = "DVBSky S950 V3",
1050 .ts_0 = SMI_TS_NULL,
1051 .ts_1 = SMI_TS_DMA_BOTH,
1052 .fe_0 = DVBSKY_FE_NULL,
1053 .fe_1 = DVBSKY_FE_M88DS3103,
1054};
1055
1056static struct smi_cfg_info dvbsky_s952_cfg = {
1057 .type = SMI_DVBSKY_S952,
1058 .name = "DVBSky S952 V3",
1059 .ts_0 = SMI_TS_DMA_BOTH,
1060 .ts_1 = SMI_TS_DMA_BOTH,
1061 .fe_0 = DVBSKY_FE_M88RS6000,
1062 .fe_1 = DVBSKY_FE_M88RS6000,
1063};
1064
1065static struct smi_cfg_info dvbsky_t9580_cfg = {
1066 .type = SMI_DVBSKY_T9580,
1067 .name = "DVBSky T9580 V3",
1068 .ts_0 = SMI_TS_DMA_BOTH,
1069 .ts_1 = SMI_TS_DMA_BOTH,
1070 .fe_0 = DVBSKY_FE_SIT2,
1071 .fe_1 = DVBSKY_FE_M88DS3103,
1072};
1073
1074/* PCI IDs */
1075#define SMI_ID(_subvend, _subdev, _driverdata) { \
1076 .vendor = SMI_VID, .device = SMI_PID, \
1077 .subvendor = _subvend, .subdevice = _subdev, \
1078 .driver_data = (unsigned long)&_driverdata }
1079
1080static const struct pci_device_id smi_id_table[] = {
1081 SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
1082 SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
1083 SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
1084 {0}
1085};
1086MODULE_DEVICE_TABLE(pci, smi_id_table);
1087
1088static struct pci_driver smipcie_driver = {
1089 .name = "SMI PCIe driver",
1090 .id_table = smi_id_table,
1091 .probe = smi_probe,
1092 .remove = smi_remove,
1093};
1094
1095module_pci_driver(smipcie_driver);
1096
1097MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
1098MODULE_DESCRIPTION("SMI PCIe driver");
1099MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
new file mode 100644
index 000000000000..10cdf20f4839
--- /dev/null
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -0,0 +1,299 @@
1/*
2 * SMI PCIe driver for DVBSky cards.
3 *
4 * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _SMI_PCIE_H_
18#define _SMI_PCIE_H_
19
20#include <linux/i2c.h>
21#include <linux/i2c-algo-bit.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/proc_fs.h>
27#include <linux/pci.h>
28#include <linux/dma-mapping.h>
29#include <linux/slab.h>
30#include <media/rc-core.h>
31
32#include "demux.h"
33#include "dmxdev.h"
34#include "dvb_demux.h"
35#include "dvb_frontend.h"
36#include "dvb_net.h"
37#include "dvbdev.h"
38
39/* -------- Register Base -------- */
40#define MSI_CONTROL_REG_BASE 0x0800
41#define SYSTEM_CONTROL_REG_BASE 0x0880
42#define PCIE_EP_DEBUG_REG_BASE 0x08C0
43#define IR_CONTROL_REG_BASE 0x0900
44#define I2C_A_CONTROL_REG_BASE 0x0940
45#define I2C_B_CONTROL_REG_BASE 0x0980
46#define ATV_PORTA_CONTROL_REG_BASE 0x09C0
47#define DTV_PORTA_CONTROL_REG_BASE 0x0A00
48#define AES_PORTA_CONTROL_REG_BASE 0x0A80
49#define DMA_PORTA_CONTROL_REG_BASE 0x0AC0
50#define ATV_PORTB_CONTROL_REG_BASE 0x0B00
51#define DTV_PORTB_CONTROL_REG_BASE 0x0B40
52#define AES_PORTB_CONTROL_REG_BASE 0x0BC0
53#define DMA_PORTB_CONTROL_REG_BASE 0x0C00
54#define UART_A_REGISTER_BASE 0x0C40
55#define UART_B_REGISTER_BASE 0x0C80
56#define GPS_CONTROL_REG_BASE 0x0CC0
57#define DMA_PORTC_CONTROL_REG_BASE 0x0D00
58#define DMA_PORTD_CONTROL_REG_BASE 0x0D00
59#define AES_RANDOM_DATA_BASE 0x0D80
60#define AES_KEY_IN_BASE 0x0D90
61#define RANDOM_DATA_LIB_BASE 0x0E00
62#define IR_DATA_BUFFER_BASE 0x0F00
63#define PORTA_TS_BUFFER_BASE 0x1000
64#define PORTA_I2S_BUFFER_BASE 0x1400
65#define PORTB_TS_BUFFER_BASE 0x1800
66#define PORTB_I2S_BUFFER_BASE 0x1C00
67
68/* -------- MSI control and state register -------- */
69#define MSI_DELAY_TIMER (MSI_CONTROL_REG_BASE + 0x00)
70#define MSI_INT_STATUS (MSI_CONTROL_REG_BASE + 0x08)
71#define MSI_INT_STATUS_CLR (MSI_CONTROL_REG_BASE + 0x0C)
72#define MSI_INT_STATUS_SET (MSI_CONTROL_REG_BASE + 0x10)
73#define MSI_INT_ENA (MSI_CONTROL_REG_BASE + 0x14)
74#define MSI_INT_ENA_CLR (MSI_CONTROL_REG_BASE + 0x18)
75#define MSI_INT_ENA_SET (MSI_CONTROL_REG_BASE + 0x1C)
76#define MSI_SOFT_RESET (MSI_CONTROL_REG_BASE + 0x20)
77#define MSI_CFG_SRC0 (MSI_CONTROL_REG_BASE + 0x24)
78
79/* -------- Hybird Controller System Control register -------- */
80#define MUX_MODE_CTRL (SYSTEM_CONTROL_REG_BASE + 0x00)
81 #define rbPaMSMask 0x07
82 #define rbPaMSDtvNoGpio 0x00 /*[2:0], DTV Simple mode */
83 #define rbPaMSDtv4bitGpio 0x01 /*[2:0], DTV TS2 Serial mode)*/
84 #define rbPaMSDtv7bitGpio 0x02 /*[2:0], DTV TS0 Serial mode*/
85 #define rbPaMS8bitGpio 0x03 /*[2:0], GPIO mode selected;(8bit GPIO)*/
86 #define rbPaMSAtv 0x04 /*[2:0], 3'b1xx: ATV mode select*/
87 #define rbPbMSMask 0x38
88 #define rbPbMSDtvNoGpio 0x00 /*[5:3], DTV Simple mode */
89 #define rbPbMSDtv4bitGpio 0x08 /*[5:3], DTV TS2 Serial mode*/
90 #define rbPbMSDtv7bitGpio 0x10 /*[5:3], DTV TS0 Serial mode*/
91 #define rbPbMS8bitGpio 0x18 /*[5:3], GPIO mode selected;(8bit GPIO)*/
92 #define rbPbMSAtv 0x20 /*[5:3], 3'b1xx: ATV mode select*/
93 #define rbPaAESEN 0x40 /*[6], port A AES enable bit*/
94 #define rbPbAESEN 0x80 /*[7], port B AES enable bit*/
95
96#define INTERNAL_RST (SYSTEM_CONTROL_REG_BASE + 0x04)
97#define PERIPHERAL_CTRL (SYSTEM_CONTROL_REG_BASE + 0x08)
98#define GPIO_0to7_CTRL (SYSTEM_CONTROL_REG_BASE + 0x0C)
99#define GPIO_8to15_CTRL (SYSTEM_CONTROL_REG_BASE + 0x10)
100#define GPIO_16to24_CTRL (SYSTEM_CONTROL_REG_BASE + 0x14)
101#define GPIO_INT_SRC_CFG (SYSTEM_CONTROL_REG_BASE + 0x18)
102#define SYS_BUF_STATUS (SYSTEM_CONTROL_REG_BASE + 0x1C)
103#define PCIE_IP_REG_ACS (SYSTEM_CONTROL_REG_BASE + 0x20)
104#define PCIE_IP_REG_ACS_ADDR (SYSTEM_CONTROL_REG_BASE + 0x24)
105#define PCIE_IP_REG_ACS_DATA (SYSTEM_CONTROL_REG_BASE + 0x28)
106
107/* -------- IR Control register -------- */
108#define IR_Init_Reg (IR_CONTROL_REG_BASE + 0x00)
109#define IR_Idle_Cnt_Low (IR_CONTROL_REG_BASE + 0x04)
110#define IR_Idle_Cnt_High (IR_CONTROL_REG_BASE + 0x05)
111#define IR_Unit_Cnt_Low (IR_CONTROL_REG_BASE + 0x06)
112#define IR_Unit_Cnt_High (IR_CONTROL_REG_BASE + 0x07)
113#define IR_Data_Cnt (IR_CONTROL_REG_BASE + 0x08)
114#define rbIRen 0x80
115#define rbIRhighidle 0x10
116#define rbIRlowidle 0x00
117#define rbIRVld 0x04
118
119/* -------- I2C A control and state register -------- */
120#define I2C_A_CTL_STATUS (I2C_A_CONTROL_REG_BASE + 0x00)
121#define I2C_A_ADDR (I2C_A_CONTROL_REG_BASE + 0x04)
122#define I2C_A_SW_CTL (I2C_A_CONTROL_REG_BASE + 0x08)
123#define I2C_A_TIME_OUT_CNT (I2C_A_CONTROL_REG_BASE + 0x0C)
124#define I2C_A_FIFO_STATUS (I2C_A_CONTROL_REG_BASE + 0x10)
125#define I2C_A_FS_EN (I2C_A_CONTROL_REG_BASE + 0x14)
126#define I2C_A_FIFO_DATA (I2C_A_CONTROL_REG_BASE + 0x20)
127
128/* -------- I2C B control and state register -------- */
129#define I2C_B_CTL_STATUS (I2C_B_CONTROL_REG_BASE + 0x00)
130#define I2C_B_ADDR (I2C_B_CONTROL_REG_BASE + 0x04)
131#define I2C_B_SW_CTL (I2C_B_CONTROL_REG_BASE + 0x08)
132#define I2C_B_TIME_OUT_CNT (I2C_B_CONTROL_REG_BASE + 0x0C)
133#define I2C_B_FIFO_STATUS (I2C_B_CONTROL_REG_BASE + 0x10)
134#define I2C_B_FS_EN (I2C_B_CONTROL_REG_BASE + 0x14)
135#define I2C_B_FIFO_DATA (I2C_B_CONTROL_REG_BASE + 0x20)
136
137#define VIDEO_CTRL_STATUS_A (ATV_PORTA_CONTROL_REG_BASE + 0x04)
138
139/* -------- Digital TV control register, Port A -------- */
140#define MPEG2_CTRL_A (DTV_PORTA_CONTROL_REG_BASE + 0x00)
141#define SERIAL_IN_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x4C)
142#define VLD_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x60)
143#define ERR_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x64)
144#define BRD_CNT_ADDR_A (DTV_PORTA_CONTROL_REG_BASE + 0x68)
145
146/* -------- DMA Control Register, Port A -------- */
147#define DMA_PORTA_CHAN0_ADDR_LOW (DMA_PORTA_CONTROL_REG_BASE + 0x00)
148#define DMA_PORTA_CHAN0_ADDR_HI (DMA_PORTA_CONTROL_REG_BASE + 0x04)
149#define DMA_PORTA_CHAN0_TRANS_STATE (DMA_PORTA_CONTROL_REG_BASE + 0x08)
150#define DMA_PORTA_CHAN0_CONTROL (DMA_PORTA_CONTROL_REG_BASE + 0x0C)
151#define DMA_PORTA_CHAN1_ADDR_LOW (DMA_PORTA_CONTROL_REG_BASE + 0x10)
152#define DMA_PORTA_CHAN1_ADDR_HI (DMA_PORTA_CONTROL_REG_BASE + 0x14)
153#define DMA_PORTA_CHAN1_TRANS_STATE (DMA_PORTA_CONTROL_REG_BASE + 0x18)
154#define DMA_PORTA_CHAN1_CONTROL (DMA_PORTA_CONTROL_REG_BASE + 0x1C)
155#define DMA_PORTA_MANAGEMENT (DMA_PORTA_CONTROL_REG_BASE + 0x20)
156#define VIDEO_CTRL_STATUS_B (ATV_PORTB_CONTROL_REG_BASE + 0x04)
157
158/* -------- Digital TV control register, Port B -------- */
159#define MPEG2_CTRL_B (DTV_PORTB_CONTROL_REG_BASE + 0x00)
160#define SERIAL_IN_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x4C)
161#define VLD_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x60)
162#define ERR_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x64)
163#define BRD_CNT_ADDR_B (DTV_PORTB_CONTROL_REG_BASE + 0x68)
164
165/* -------- AES control register, Port B -------- */
166#define AES_CTRL_B (AES_PORTB_CONTROL_REG_BASE + 0x00)
167#define AES_KEY_BASE_B (AES_PORTB_CONTROL_REG_BASE + 0x04)
168
169/* -------- DMA Control Register, Port B -------- */
170#define DMA_PORTB_CHAN0_ADDR_LOW (DMA_PORTB_CONTROL_REG_BASE + 0x00)
171#define DMA_PORTB_CHAN0_ADDR_HI (DMA_PORTB_CONTROL_REG_BASE + 0x04)
172#define DMA_PORTB_CHAN0_TRANS_STATE (DMA_PORTB_CONTROL_REG_BASE + 0x08)
173#define DMA_PORTB_CHAN0_CONTROL (DMA_PORTB_CONTROL_REG_BASE + 0x0C)
174#define DMA_PORTB_CHAN1_ADDR_LOW (DMA_PORTB_CONTROL_REG_BASE + 0x10)
175#define DMA_PORTB_CHAN1_ADDR_HI (DMA_PORTB_CONTROL_REG_BASE + 0x14)
176#define DMA_PORTB_CHAN1_TRANS_STATE (DMA_PORTB_CONTROL_REG_BASE + 0x18)
177#define DMA_PORTB_CHAN1_CONTROL (DMA_PORTB_CONTROL_REG_BASE + 0x1C)
178#define DMA_PORTB_MANAGEMENT (DMA_PORTB_CONTROL_REG_BASE + 0x20)
179
180#define DMA_TRANS_UNIT_188 (0x00000007)
181
182/* -------- Macro define of 24 interrupt resource --------*/
183#define DMA_A_CHAN0_DONE_INT (0x00000001)
184#define DMA_A_CHAN1_DONE_INT (0x00000002)
185#define DMA_B_CHAN0_DONE_INT (0x00000004)
186#define DMA_B_CHAN1_DONE_INT (0x00000008)
187#define DMA_C_CHAN0_DONE_INT (0x00000010)
188#define DMA_C_CHAN1_DONE_INT (0x00000020)
189#define DMA_D_CHAN0_DONE_INT (0x00000040)
190#define DMA_D_CHAN1_DONE_INT (0x00000080)
191#define DATA_BUF_OVERFLOW_INT (0x00000100)
192#define UART_0_X_INT (0x00000200)
193#define UART_1_X_INT (0x00000400)
194#define IR_X_INT (0x00000800)
195#define GPIO_0_INT (0x00001000)
196#define GPIO_1_INT (0x00002000)
197#define GPIO_2_INT (0x00004000)
198#define GPIO_3_INT (0x00008000)
199#define ALL_INT (0x0000FFFF)
200
201/* software I2C bit mask */
202#define SW_I2C_MSK_MODE 0x01
203#define SW_I2C_MSK_CLK_OUT 0x02
204#define SW_I2C_MSK_DAT_OUT 0x04
205#define SW_I2C_MSK_CLK_EN 0x08
206#define SW_I2C_MSK_DAT_EN 0x10
207#define SW_I2C_MSK_DAT_IN 0x40
208#define SW_I2C_MSK_CLK_IN 0x80
209
210#define SMI_VID 0x1ADE
211#define SMI_PID 0x3038
212#define SMI_TS_DMA_BUF_SIZE (1024 * 188)
213
214struct smi_cfg_info {
215#define SMI_DVBSKY_S952 0
216#define SMI_DVBSKY_S950 1
217#define SMI_DVBSKY_T9580 2
218#define SMI_DVBSKY_T982 3
219 int type;
220 char *name;
221#define SMI_TS_NULL 0
222#define SMI_TS_DMA_SINGLE 1
223#define SMI_TS_DMA_BOTH 3
224/* SMI_TS_NULL: not use;
225 * SMI_TS_DMA_SINGLE: use DMA 0 only;
226 * SMI_TS_DMA_BOTH:use DMA 0 and 1.*/
227 int ts_0;
228 int ts_1;
229#define DVBSKY_FE_NULL 0
230#define DVBSKY_FE_M88RS6000 1
231#define DVBSKY_FE_M88DS3103 2
232#define DVBSKY_FE_SIT2 3
233 int fe_0;
234 int fe_1;
235};
236
237struct smi_port {
238 struct smi_dev *dev;
239 int idx;
240 int enable;
241 int fe_type;
242 /* regs */
243 u32 DMA_CHAN0_ADDR_LOW;
244 u32 DMA_CHAN0_ADDR_HI;
245 u32 DMA_CHAN0_TRANS_STATE;
246 u32 DMA_CHAN0_CONTROL;
247 u32 DMA_CHAN1_ADDR_LOW;
248 u32 DMA_CHAN1_ADDR_HI;
249 u32 DMA_CHAN1_TRANS_STATE;
250 u32 DMA_CHAN1_CONTROL;
251 u32 DMA_MANAGEMENT;
252 /* dma */
253 dma_addr_t dma_addr[2];
254 u8 *cpu_addr[2];
255 u32 _dmaInterruptCH0;
256 u32 _dmaInterruptCH1;
257 u32 _int_status;
258 struct tasklet_struct tasklet;
259 /* dvb */
260 struct dmx_frontend hw_frontend;
261 struct dmx_frontend mem_frontend;
262 struct dmxdev dmxdev;
263 struct dvb_adapter dvb_adapter;
264 struct dvb_demux demux;
265 struct dvb_net dvbnet;
266 int users;
267 struct dvb_frontend *fe;
268 /* frontend i2c module */
269 struct i2c_client *i2c_client_demod;
270 struct i2c_client *i2c_client_tuner;
271};
272
273struct smi_dev {
274 int nr;
275 struct smi_cfg_info *info;
276
277 /* pcie */
278 struct pci_dev *pci_dev;
279 u32 __iomem *lmmio;
280
281 /* ts port */
282 struct smi_port ts_port[2];
283
284 /* i2c */
285 struct i2c_adapter i2c_bus[2];
286 struct i2c_algo_bit_data i2c_bit[2];
287};
288
289#define smi_read(reg) readl(dev->lmmio + ((reg)>>2))
290#define smi_write(reg, value) writel((value), dev->lmmio + ((reg)>>2))
291
292#define smi_andor(reg, mask, value) \
293 writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
294 ((value) & (mask)), dev->lmmio+((reg)>>2))
295
296#define smi_set(reg, bit) smi_andor((reg), (bit), (bit))
297#define smi_clear(reg, bit) smi_andor((reg), (bit), 0)
298
299#endif /* #ifndef _SMI_PCIE_H_ */
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 172583d736fe..8cbe6b49f4c2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
105 if (!status) 105 if (!status)
106 return IRQ_NONE; 106 return IRQ_NONE;
107 107
108 if (status & ~solo_dev->irq_mask) { 108 /* Acknowledge all interrupts immediately */
109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, 109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
110 status & ~solo_dev->irq_mask);
111 status &= solo_dev->irq_mask;
112 }
113 110
114 if (status & SOLO_IRQ_PCI_ERR) 111 if (status & SOLO_IRQ_PCI_ERR)
115 solo_p2m_error_isr(solo_dev); 112 solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
132 if (status & SOLO_IRQ_G723) 129 if (status & SOLO_IRQ_G723)
133 solo_g723_isr(solo_dev); 130 solo_g723_isr(solo_dev);
134 131
135 /* Clear all interrupts handled */
136 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
137
138 return IRQ_HANDLED; 132 return IRQ_HANDLED;
139} 133}
140 134
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 28023f9f1dc7..6e933d383fa2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -463,7 +463,6 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
463 struct solo_dev *solo_dev = solo_enc->solo_dev; 463 struct solo_dev *solo_dev = solo_enc->solo_dev;
464 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 464 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
465 int frame_size; 465 int frame_size;
466 int ret;
467 466
468 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; 467 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
469 468
@@ -473,22 +472,10 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
473 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN); 472 frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
474 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); 473 vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
475 474
476 /* may discard all previous data in vbuf->sgl */ 475 return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
477 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
478 DMA_FROM_DEVICE))
479 return -ENOMEM;
480 ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
481 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), 476 vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
482 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), 477 frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
483 SOLO_JPEG_EXT_SIZE(solo_dev)); 478 SOLO_JPEG_EXT_SIZE(solo_dev));
484 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
485 DMA_FROM_DEVICE);
486
487 /* add the header only after dma_unmap_sg() */
488 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
489 solo_enc->jpeg_header, solo_enc->jpeg_len);
490
491 return ret;
492} 479}
493 480
494static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, 481static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
@@ -498,7 +485,6 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
498 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); 485 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
499 int frame_off, frame_size; 486 int frame_off, frame_size;
500 int skip = 0; 487 int skip = 0;
501 int ret;
502 488
503 if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh)) 489 if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
504 return -EIO; 490 return -EIO;
@@ -521,21 +507,9 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
521 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); 507 sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
522 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN); 508 frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
523 509
524 /* may discard all previous data in vbuf->sgl */ 510 return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
525 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
526 DMA_FROM_DEVICE))
527 return -ENOMEM;
528 ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
529 SOLO_MP4E_EXT_ADDR(solo_dev), 511 SOLO_MP4E_EXT_ADDR(solo_dev),
530 SOLO_MP4E_EXT_SIZE(solo_dev)); 512 SOLO_MP4E_EXT_SIZE(solo_dev));
531 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
532 DMA_FROM_DEVICE);
533
534 /* add the header only after dma_unmap_sg() */
535 if (!vop_type(vh))
536 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
537 solo_enc->vop, solo_enc->vop_len);
538 return ret;
539} 513}
540 514
541static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, 515static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
@@ -703,9 +677,7 @@ static int solo_ring_thread(void *data)
703 677
704 if (timeout == -ERESTARTSYS || kthread_should_stop()) 678 if (timeout == -ERESTARTSYS || kthread_should_stop())
705 break; 679 break;
706 solo_irq_off(solo_dev, SOLO_IRQ_ENCODER);
707 solo_handle_ring(solo_dev); 680 solo_handle_ring(solo_dev);
708 solo_irq_on(solo_dev, SOLO_IRQ_ENCODER);
709 try_to_freeze(); 681 try_to_freeze();
710 } 682 }
711 683
@@ -720,7 +692,10 @@ static int solo_enc_queue_setup(struct vb2_queue *q,
720 unsigned int *num_planes, unsigned int sizes[], 692 unsigned int *num_planes, unsigned int sizes[],
721 void *alloc_ctxs[]) 693 void *alloc_ctxs[])
722{ 694{
695 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
696
723 sizes[0] = FRAME_BUF_SIZE; 697 sizes[0] = FRAME_BUF_SIZE;
698 alloc_ctxs[0] = solo_enc->alloc_ctx;
724 *num_planes = 1; 699 *num_planes = 1;
725 700
726 if (*num_buffers < MIN_VID_BUFFERS) 701 if (*num_buffers < MIN_VID_BUFFERS)
@@ -770,26 +745,51 @@ static void solo_ring_stop(struct solo_dev *solo_dev)
770static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count) 745static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
771{ 746{
772 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q); 747 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
773 int ret;
774 748
775 ret = solo_enc_on(solo_enc); 749 return solo_enc_on(solo_enc);
776 if (ret)
777 return ret;
778 return solo_ring_start(solo_enc->solo_dev);
779} 750}
780 751
781static void solo_enc_stop_streaming(struct vb2_queue *q) 752static void solo_enc_stop_streaming(struct vb2_queue *q)
782{ 753{
783 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q); 754 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
755 unsigned long flags;
784 756
757 spin_lock_irqsave(&solo_enc->av_lock, flags);
785 solo_enc_off(solo_enc); 758 solo_enc_off(solo_enc);
786 INIT_LIST_HEAD(&solo_enc->vidq_active); 759 while (!list_empty(&solo_enc->vidq_active)) {
787 solo_ring_stop(solo_enc->solo_dev); 760 struct solo_vb2_buf *buf = list_entry(
761 solo_enc->vidq_active.next,
762 struct solo_vb2_buf, list);
763
764 list_del(&buf->list);
765 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
766 }
767 spin_unlock_irqrestore(&solo_enc->av_lock, flags);
768}
769
770static void solo_enc_buf_finish(struct vb2_buffer *vb)
771{
772 struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
773 struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
774
775 switch (solo_enc->fmt) {
776 case V4L2_PIX_FMT_MPEG4:
777 case V4L2_PIX_FMT_H264:
778 if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
779 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
780 solo_enc->vop, solo_enc->vop_len);
781 break;
782 default: /* V4L2_PIX_FMT_MJPEG */
783 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
784 solo_enc->jpeg_header, solo_enc->jpeg_len);
785 break;
786 }
788} 787}
789 788
790static struct vb2_ops solo_enc_video_qops = { 789static struct vb2_ops solo_enc_video_qops = {
791 .queue_setup = solo_enc_queue_setup, 790 .queue_setup = solo_enc_queue_setup,
792 .buf_queue = solo_enc_buf_queue, 791 .buf_queue = solo_enc_buf_queue,
792 .buf_finish = solo_enc_buf_finish,
793 .start_streaming = solo_enc_start_streaming, 793 .start_streaming = solo_enc_start_streaming,
794 .stop_streaming = solo_enc_stop_streaming, 794 .stop_streaming = solo_enc_stop_streaming,
795 .wait_prepare = vb2_ops_wait_prepare, 795 .wait_prepare = vb2_ops_wait_prepare,
@@ -1263,6 +1263,11 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
1263 return ERR_PTR(-ENOMEM); 1263 return ERR_PTR(-ENOMEM);
1264 1264
1265 hdl = &solo_enc->hdl; 1265 hdl = &solo_enc->hdl;
1266 solo_enc->alloc_ctx = vb2_dma_sg_init_ctx(&solo_dev->pdev->dev);
1267 if (IS_ERR(solo_enc->alloc_ctx)) {
1268 ret = PTR_ERR(solo_enc->alloc_ctx);
1269 goto hdl_free;
1270 }
1266 v4l2_ctrl_handler_init(hdl, 10); 1271 v4l2_ctrl_handler_init(hdl, 10);
1267 v4l2_ctrl_new_std(hdl, &solo_ctrl_ops, 1272 v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
1268 V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); 1273 V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
@@ -1366,6 +1371,7 @@ pci_free:
1366 solo_enc->desc_items, solo_enc->desc_dma); 1371 solo_enc->desc_items, solo_enc->desc_dma);
1367hdl_free: 1372hdl_free:
1368 v4l2_ctrl_handler_free(hdl); 1373 v4l2_ctrl_handler_free(hdl);
1374 vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
1369 kfree(solo_enc); 1375 kfree(solo_enc);
1370 return ERR_PTR(ret); 1376 return ERR_PTR(ret);
1371} 1377}
@@ -1375,8 +1381,12 @@ static void solo_enc_free(struct solo_enc_dev *solo_enc)
1375 if (solo_enc == NULL) 1381 if (solo_enc == NULL)
1376 return; 1382 return;
1377 1383
1384 pci_free_consistent(solo_enc->solo_dev->pdev,
1385 sizeof(struct solo_p2m_desc) * solo_enc->desc_nelts,
1386 solo_enc->desc_items, solo_enc->desc_dma);
1378 video_unregister_device(solo_enc->vfd); 1387 video_unregister_device(solo_enc->vfd);
1379 v4l2_ctrl_handler_free(&solo_enc->hdl); 1388 v4l2_ctrl_handler_free(&solo_enc->hdl);
1389 vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
1380 kfree(solo_enc); 1390 kfree(solo_enc);
1381} 1391}
1382 1392
@@ -1419,13 +1429,15 @@ int solo_enc_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
1419 solo_dev->v4l2_enc[0]->vfd->num, 1429 solo_dev->v4l2_enc[0]->vfd->num,
1420 solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num); 1430 solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num);
1421 1431
1422 return 0; 1432 return solo_ring_start(solo_dev);
1423} 1433}
1424 1434
1425void solo_enc_v4l2_exit(struct solo_dev *solo_dev) 1435void solo_enc_v4l2_exit(struct solo_dev *solo_dev)
1426{ 1436{
1427 int i; 1437 int i;
1428 1438
1439 solo_ring_stop(solo_dev);
1440
1429 for (i = 0; i < solo_dev->nr_chans; i++) 1441 for (i = 0; i < solo_dev->nr_chans; i++)
1430 solo_enc_free(solo_dev->v4l2_enc[i]); 1442 solo_enc_free(solo_dev->v4l2_enc[i]);
1431 1443
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 72017b7f0a75..bd8edfa319b8 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -180,6 +180,7 @@ struct solo_enc_dev {
180 u32 sequence; 180 u32 sequence;
181 struct vb2_queue vidq; 181 struct vb2_queue vidq;
182 struct list_head vidq_active; 182 struct list_head vidq_active;
183 void *alloc_ctx;
183 int desc_count; 184 int desc_count;
184 int desc_nelts; 185 int desc_nelts;
185 struct solo_p2m_desc *desc_items; 186 struct solo_p2m_desc *desc_items;
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 63f0b64057cb..c135165a8b26 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -304,13 +304,19 @@ static int tw68_initdev(struct pci_dev *pci_dev,
304 /* Then do any initialisation wanted before interrupts are on */ 304 /* Then do any initialisation wanted before interrupts are on */
305 tw68_hw_init1(dev); 305 tw68_hw_init1(dev);
306 306
307 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
308 if (IS_ERR(dev->alloc_ctx)) {
309 err = PTR_ERR(dev->alloc_ctx);
310 goto fail3;
311 }
312
307 /* get irq */ 313 /* get irq */
308 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq, 314 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
309 IRQF_SHARED, dev->name, dev); 315 IRQF_SHARED, dev->name, dev);
310 if (err < 0) { 316 if (err < 0) {
311 pr_err("%s: can't get IRQ %d\n", 317 pr_err("%s: can't get IRQ %d\n",
312 dev->name, pci_dev->irq); 318 dev->name, pci_dev->irq);
313 goto fail3; 319 goto fail4;
314 } 320 }
315 321
316 /* 322 /*
@@ -324,7 +330,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
324 if (err < 0) { 330 if (err < 0) {
325 pr_err("%s: can't register video device\n", 331 pr_err("%s: can't register video device\n",
326 dev->name); 332 dev->name);
327 goto fail4; 333 goto fail5;
328 } 334 }
329 tw_setl(TW68_INTMASK, dev->pci_irqmask); 335 tw_setl(TW68_INTMASK, dev->pci_irqmask);
330 336
@@ -333,8 +339,10 @@ static int tw68_initdev(struct pci_dev *pci_dev,
333 339
334 return 0; 340 return 0;
335 341
336fail4: 342fail5:
337 video_unregister_device(&dev->vdev); 343 video_unregister_device(&dev->vdev);
344fail4:
345 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
338fail3: 346fail3:
339 iounmap(dev->lmmio); 347 iounmap(dev->lmmio);
340fail2: 348fail2:
@@ -358,6 +366,7 @@ static void tw68_finidev(struct pci_dev *pci_dev)
358 /* unregister */ 366 /* unregister */
359 video_unregister_device(&dev->vdev); 367 video_unregister_device(&dev->vdev);
360 v4l2_ctrl_handler_free(&dev->hdl); 368 v4l2_ctrl_handler_free(&dev->hdl);
369 vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
361 370
362 /* release resources */ 371 /* release resources */
363 iounmap(dev->lmmio); 372 iounmap(dev->lmmio);
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 5c94ac7c88d9..8355e55b4e8e 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -384,6 +384,7 @@ static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
384 unsigned tot_bufs = q->num_buffers + *num_buffers; 384 unsigned tot_bufs = q->num_buffers + *num_buffers;
385 385
386 sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3; 386 sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
387 alloc_ctxs[0] = dev->alloc_ctx;
387 /* 388 /*
388 * We allow create_bufs, but only if the sizeimage is the same as the 389 * We allow create_bufs, but only if the sizeimage is the same as the
389 * current sizeimage. The tw68_buffer_count calculation becomes quite 390 * current sizeimage. The tw68_buffer_count calculation becomes quite
@@ -461,17 +462,12 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
461 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 462 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
462 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0); 463 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
463 unsigned size, bpl; 464 unsigned size, bpl;
464 int rc;
465 465
466 size = (dev->width * dev->height * dev->fmt->depth) >> 3; 466 size = (dev->width * dev->height * dev->fmt->depth) >> 3;
467 if (vb2_plane_size(vb, 0) < size) 467 if (vb2_plane_size(vb, 0) < size)
468 return -EINVAL; 468 return -EINVAL;
469 vb2_set_plane_payload(vb, 0, size); 469 vb2_set_plane_payload(vb, 0, size);
470 470
471 rc = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
472 if (!rc)
473 return -EIO;
474
475 bpl = (dev->width * dev->fmt->depth) >> 3; 471 bpl = (dev->width * dev->fmt->depth) >> 3;
476 switch (dev->field) { 472 switch (dev->field) {
477 case V4L2_FIELD_TOP: 473 case V4L2_FIELD_TOP:
@@ -505,11 +501,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb)
505{ 501{
506 struct vb2_queue *vq = vb->vb2_queue; 502 struct vb2_queue *vq = vb->vb2_queue;
507 struct tw68_dev *dev = vb2_get_drv_priv(vq); 503 struct tw68_dev *dev = vb2_get_drv_priv(vq);
508 struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
509 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); 504 struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
510 505
511 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
512
513 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma); 506 pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
514} 507}
515 508
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 2c8abe26b13b..7a7501bd165f 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -181,6 +181,7 @@ struct tw68_dev {
181 unsigned field; 181 unsigned field;
182 struct vb2_queue vidq; 182 struct vb2_queue vidq;
183 struct list_head active; 183 struct list_head active;
184 void *alloc_ctx;
184 185
185 /* various v4l controls */ 186 /* various v4l controls */
186 const struct tw68_tvnorm *tvnorm; /* video */ 187 const struct tw68_tvnorm *tvnorm; /* video */
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 099d5fbebb7c..2b25d31c46f6 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1528,8 +1528,9 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
1528 strncpy(cap->driver, "zoran", sizeof(cap->driver)-1); 1528 strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
1529 snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", 1529 snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
1530 pci_name(zr->pci_dev)); 1530 pci_name(zr->pci_dev));
1531 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | 1531 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
1532 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY; 1532 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
1533 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1533 return 0; 1534 return 0;
1534} 1535}
1535 1536
diff --git a/drivers/media/pci/zoran/zoran_procfs.c b/drivers/media/pci/zoran/zoran_procfs.c
index f7ceee0cdefd..437652761093 100644
--- a/drivers/media/pci/zoran/zoran_procfs.c
+++ b/drivers/media/pci/zoran/zoran_procfs.c
@@ -157,8 +157,8 @@ static ssize_t zoran_write(struct file *file, const char __user *buffer,
157 return -EFAULT; 157 return -EFAULT;
158 } 158 }
159 string[count] = 0; 159 string[count] = 0;
160 dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%zu zr=%p\n", 160 dprintk(4, KERN_INFO "%s: write_proc: name=%pD count=%zu zr=%p\n",
161 ZR_DEVNAME(zr), file->f_path.dentry->d_name.name, count, zr); 161 ZR_DEVNAME(zr), file, count, zr);
162 ldelim = " \t\n"; 162 ldelim = " \t\n";
163 tdelim = "="; 163 tdelim = "=";
164 line = strpbrk(sp, ldelim); 164 line = strpbrk(sp, ldelim);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 3aac88f1d54a..0c61155699f7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -267,8 +267,8 @@ if V4L_TEST_DRIVERS
267 267
268source "drivers/media/platform/vivid/Kconfig" 268source "drivers/media/platform/vivid/Kconfig"
269 269
270config VIDEO_MEM2MEM_TESTDEV 270config VIDEO_VIM2M
271 tristate "Virtual test device for mem2mem framework" 271 tristate "Virtual Memory-to-Memory Driver"
272 depends on VIDEO_DEV && VIDEO_V4L2 272 depends on VIDEO_DEV && VIDEO_V4L2
273 select VIDEOBUF2_VMALLOC 273 select VIDEOBUF2_VMALLOC
274 select V4L2_MEM2MEM_DEV 274 select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 579046bc276f..b818afb4d336 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
17obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o 17obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
18 18
19obj-$(CONFIG_VIDEO_VIVID) += vivid/ 19obj-$(CONFIG_VIDEO_VIVID) += vivid/
20obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o 20obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
21 21
22obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/ 22obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
23 23
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 9b5daa65841c..3112844e4fc1 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -49,7 +49,7 @@
49struct bcap_format { 49struct bcap_format {
50 char *desc; 50 char *desc;
51 u32 pixelformat; 51 u32 pixelformat;
52 enum v4l2_mbus_pixelcode mbus_code; 52 u32 mbus_code;
53 int bpp; /* bits per pixel */ 53 int bpp; /* bits per pixel */
54 int dlen; /* data length for ppi in bits */ 54 int dlen; /* data length for ppi in bits */
55}; 55};
@@ -116,35 +116,35 @@ static const struct bcap_format bcap_formats[] = {
116 { 116 {
117 .desc = "YCbCr 4:2:2 Interleaved UYVY", 117 .desc = "YCbCr 4:2:2 Interleaved UYVY",
118 .pixelformat = V4L2_PIX_FMT_UYVY, 118 .pixelformat = V4L2_PIX_FMT_UYVY,
119 .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8, 119 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
120 .bpp = 16, 120 .bpp = 16,
121 .dlen = 8, 121 .dlen = 8,
122 }, 122 },
123 { 123 {
124 .desc = "YCbCr 4:2:2 Interleaved YUYV", 124 .desc = "YCbCr 4:2:2 Interleaved YUYV",
125 .pixelformat = V4L2_PIX_FMT_YUYV, 125 .pixelformat = V4L2_PIX_FMT_YUYV,
126 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 126 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
127 .bpp = 16, 127 .bpp = 16,
128 .dlen = 8, 128 .dlen = 8,
129 }, 129 },
130 { 130 {
131 .desc = "YCbCr 4:2:2 Interleaved UYVY", 131 .desc = "YCbCr 4:2:2 Interleaved UYVY",
132 .pixelformat = V4L2_PIX_FMT_UYVY, 132 .pixelformat = V4L2_PIX_FMT_UYVY,
133 .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16, 133 .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
134 .bpp = 16, 134 .bpp = 16,
135 .dlen = 16, 135 .dlen = 16,
136 }, 136 },
137 { 137 {
138 .desc = "RGB 565", 138 .desc = "RGB 565",
139 .pixelformat = V4L2_PIX_FMT_RGB565, 139 .pixelformat = V4L2_PIX_FMT_RGB565,
140 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE, 140 .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
141 .bpp = 16, 141 .bpp = 16,
142 .dlen = 8, 142 .dlen = 8,
143 }, 143 },
144 { 144 {
145 .desc = "RGB 444", 145 .desc = "RGB 444",
146 .pixelformat = V4L2_PIX_FMT_RGB444, 146 .pixelformat = V4L2_PIX_FMT_RGB444,
147 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE, 147 .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
148 .bpp = 16, 148 .bpp = 16,
149 .dlen = 8, 149 .dlen = 8,
150 }, 150 },
@@ -161,7 +161,7 @@ static struct bcap_buffer *to_bcap_vb(struct vb2_buffer *vb)
161 161
162static int bcap_init_sensor_formats(struct bcap_device *bcap_dev) 162static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
163{ 163{
164 enum v4l2_mbus_pixelcode code; 164 u32 code;
165 struct bcap_format *sf; 165 struct bcap_format *sf;
166 unsigned int num_formats = 0; 166 unsigned int num_formats = 0;
167 int i, j; 167 int i, j;
@@ -349,18 +349,6 @@ static void bcap_buffer_cleanup(struct vb2_buffer *vb)
349 spin_unlock_irqrestore(&bcap_dev->lock, flags); 349 spin_unlock_irqrestore(&bcap_dev->lock, flags);
350} 350}
351 351
352static void bcap_lock(struct vb2_queue *vq)
353{
354 struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
355 mutex_lock(&bcap_dev->mutex);
356}
357
358static void bcap_unlock(struct vb2_queue *vq)
359{
360 struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
361 mutex_unlock(&bcap_dev->mutex);
362}
363
364static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count) 352static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
365{ 353{
366 struct bcap_device *bcap_dev = vb2_get_drv_priv(vq); 354 struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
@@ -457,8 +445,8 @@ static struct vb2_ops bcap_video_qops = {
457 .buf_prepare = bcap_buffer_prepare, 445 .buf_prepare = bcap_buffer_prepare,
458 .buf_cleanup = bcap_buffer_cleanup, 446 .buf_cleanup = bcap_buffer_cleanup,
459 .buf_queue = bcap_buffer_queue, 447 .buf_queue = bcap_buffer_queue,
460 .wait_prepare = bcap_unlock, 448 .wait_prepare = vb2_ops_wait_prepare,
461 .wait_finish = bcap_lock, 449 .wait_finish = vb2_ops_wait_finish,
462 .start_streaming = bcap_start_streaming, 450 .start_streaming = bcap_start_streaming,
463 .stop_streaming = bcap_stop_streaming, 451 .stop_streaming = bcap_stop_streaming,
464}; 452};
@@ -841,7 +829,8 @@ static int bcap_querycap(struct file *file, void *priv,
841{ 829{
842 struct bcap_device *bcap_dev = video_drvdata(file); 830 struct bcap_device *bcap_dev = video_drvdata(file);
843 831
844 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 832 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
833 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
845 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver)); 834 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
846 strlcpy(cap->bus_info, "Blackfin Platform", sizeof(cap->bus_info)); 835 strlcpy(cap->bus_info, "Blackfin Platform", sizeof(cap->bus_info));
847 strlcpy(cap->card, bcap_dev->cfg->card_name, sizeof(cap->card)); 836 strlcpy(cap->card, bcap_dev->cfg->card_name, sizeof(cap->card));
@@ -995,6 +984,7 @@ static int bcap_probe(struct platform_device *pdev)
995 q->ops = &bcap_video_qops; 984 q->ops = &bcap_video_qops;
996 q->mem_ops = &vb2_dma_contig_memops; 985 q->mem_ops = &vb2_dma_contig_memops;
997 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 986 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
987 q->lock = &bcap_dev->mutex;
998 988
999 ret = vb2_queue_init(q); 989 ret = vb2_queue_init(q);
1000 if (ret) 990 if (ret)
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index 3543291e6273..25ce15561695 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,3 +1,3 @@
1coda-objs := coda-common.o coda-bit.o coda-h264.o 1coda-objs := coda-common.o coda-bit.o coda-h264.o coda-jpeg.o
2 2
3obj-$(CONFIG_VIDEO_CODA) += coda.o 3obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 9b8ea8bbeb4e..b4029ae293d3 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -217,11 +217,34 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
217void coda_fill_bitstream(struct coda_ctx *ctx) 217void coda_fill_bitstream(struct coda_ctx *ctx)
218{ 218{
219 struct vb2_buffer *src_buf; 219 struct vb2_buffer *src_buf;
220 struct coda_timestamp *ts; 220 struct coda_buffer_meta *meta;
221 u32 start;
221 222
222 while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) { 223 while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
224 /*
225 * Only queue a single JPEG into the bitstream buffer, except
226 * to increase payload over 512 bytes or if in hold state.
227 */
228 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
229 (coda_get_bitstream_payload(ctx) >= 512) && !ctx->hold)
230 break;
231
223 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 232 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
224 233
234 /* Drop frames that do not start/end with a SOI/EOI markers */
235 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
236 !coda_jpeg_check_buffer(ctx, src_buf)) {
237 v4l2_err(&ctx->dev->v4l2_dev,
238 "dropping invalid JPEG frame\n");
239 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
240 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
241 continue;
242 }
243
244 /* Buffer start position */
245 start = ctx->bitstream_fifo.kfifo.in &
246 ctx->bitstream_fifo.kfifo.mask;
247
225 if (coda_bitstream_try_queue(ctx, src_buf)) { 248 if (coda_bitstream_try_queue(ctx, src_buf)) {
226 /* 249 /*
227 * Source buffer is queued in the bitstream ringbuffer; 250 * Source buffer is queued in the bitstream ringbuffer;
@@ -229,12 +252,16 @@ void coda_fill_bitstream(struct coda_ctx *ctx)
229 */ 252 */
230 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 253 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
231 254
232 ts = kmalloc(sizeof(*ts), GFP_KERNEL); 255 meta = kmalloc(sizeof(*meta), GFP_KERNEL);
233 if (ts) { 256 if (meta) {
234 ts->sequence = src_buf->v4l2_buf.sequence; 257 meta->sequence = src_buf->v4l2_buf.sequence;
235 ts->timecode = src_buf->v4l2_buf.timecode; 258 meta->timecode = src_buf->v4l2_buf.timecode;
236 ts->timestamp = src_buf->v4l2_buf.timestamp; 259 meta->timestamp = src_buf->v4l2_buf.timestamp;
237 list_add_tail(&ts->list, &ctx->timestamp_list); 260 meta->start = start;
261 meta->end = ctx->bitstream_fifo.kfifo.in &
262 ctx->bitstream_fifo.kfifo.mask;
263 list_add_tail(&meta->list,
264 &ctx->buffer_meta_list);
238 } 265 }
239 266
240 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); 267 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
@@ -691,6 +718,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
691 struct vb2_buffer *buf; 718 struct vb2_buffer *buf;
692 int gamma, ret, value; 719 int gamma, ret, value;
693 u32 dst_fourcc; 720 u32 dst_fourcc;
721 u32 stride;
694 722
695 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); 723 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
696 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); 724 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -710,6 +738,14 @@ static int coda_start_encoding(struct coda_ctx *ctx)
710 return -EFAULT; 738 return -EFAULT;
711 } 739 }
712 740
741 if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
742 if (!ctx->params.jpeg_qmat_tab[0])
743 ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
744 if (!ctx->params.jpeg_qmat_tab[1])
745 ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
746 coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
747 }
748
713 mutex_lock(&dev->coda_mutex); 749 mutex_lock(&dev->coda_mutex);
714 750
715 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR); 751 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
@@ -729,10 +765,10 @@ static int coda_start_encoding(struct coda_ctx *ctx)
729 break; 765 break;
730 } 766 }
731 767
732 value = coda_read(dev, CODA_REG_BIT_FRAME_MEM_CTRL); 768 ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
733 value &= ~(1 << 2 | 0x7 << 9); 769 if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
734 ctx->frame_mem_ctrl = value; 770 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
735 coda_write(dev, value, CODA_REG_BIT_FRAME_MEM_CTRL); 771 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
736 772
737 if (dev->devtype->product == CODA_DX6) { 773 if (dev->devtype->product == CODA_DX6) {
738 /* Configure the coda */ 774 /* Configure the coda */
@@ -741,6 +777,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
741 } 777 }
742 778
743 /* Could set rotation here if needed */ 779 /* Could set rotation here if needed */
780 value = 0;
744 switch (dev->devtype->product) { 781 switch (dev->devtype->product) {
745 case CODA_DX6: 782 case CODA_DX6:
746 value = (q_data_src->width & CODADX6_PICWIDTH_MASK) 783 value = (q_data_src->width & CODADX6_PICWIDTH_MASK)
@@ -764,6 +801,8 @@ static int coda_start_encoding(struct coda_ctx *ctx)
764 << CODA_PICHEIGHT_OFFSET; 801 << CODA_PICHEIGHT_OFFSET;
765 } 802 }
766 coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE); 803 coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
804 if (dst_fourcc == V4L2_PIX_FMT_JPEG)
805 ctx->params.framerate = 0;
767 coda_write(dev, ctx->params.framerate, 806 coda_write(dev, ctx->params.framerate,
768 CODA_CMD_ENC_SEQ_SRC_F_RATE); 807 CODA_CMD_ENC_SEQ_SRC_F_RATE);
769 808
@@ -797,6 +836,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
797 } 836 }
798 coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA); 837 coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
799 break; 838 break;
839 case V4L2_PIX_FMT_JPEG:
840 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_PARA);
841 coda_write(dev, ctx->params.jpeg_restart_interval,
842 CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL);
843 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_EN);
844 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE);
845 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET);
846
847 coda_jpeg_write_tables(ctx);
848 break;
800 default: 849 default:
801 v4l2_err(v4l2_dev, 850 v4l2_err(v4l2_dev,
802 "dst format (0x%08x) invalid.\n", dst_fourcc); 851 "dst format (0x%08x) invalid.\n", dst_fourcc);
@@ -804,28 +853,36 @@ static int coda_start_encoding(struct coda_ctx *ctx)
804 goto out; 853 goto out;
805 } 854 }
806 855
807 switch (ctx->params.slice_mode) { 856 /*
808 case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE: 857 * slice mode and GOP size registers are used for thumb size/offset
809 value = 0; 858 * in JPEG mode
810 break; 859 */
811 case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB: 860 if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
812 value = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK) 861 switch (ctx->params.slice_mode) {
813 << CODA_SLICING_SIZE_OFFSET; 862 case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
814 value |= (1 & CODA_SLICING_UNIT_MASK) 863 value = 0;
815 << CODA_SLICING_UNIT_OFFSET; 864 break;
816 value |= 1 & CODA_SLICING_MODE_MASK; 865 case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
817 break; 866 value = (ctx->params.slice_max_mb &
818 case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES: 867 CODA_SLICING_SIZE_MASK)
819 value = (ctx->params.slice_max_bits & CODA_SLICING_SIZE_MASK) 868 << CODA_SLICING_SIZE_OFFSET;
820 << CODA_SLICING_SIZE_OFFSET; 869 value |= (1 & CODA_SLICING_UNIT_MASK)
821 value |= (0 & CODA_SLICING_UNIT_MASK) 870 << CODA_SLICING_UNIT_OFFSET;
822 << CODA_SLICING_UNIT_OFFSET; 871 value |= 1 & CODA_SLICING_MODE_MASK;
823 value |= 1 & CODA_SLICING_MODE_MASK; 872 break;
824 break; 873 case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
874 value = (ctx->params.slice_max_bits &
875 CODA_SLICING_SIZE_MASK)
876 << CODA_SLICING_SIZE_OFFSET;
877 value |= (0 & CODA_SLICING_UNIT_MASK)
878 << CODA_SLICING_UNIT_OFFSET;
879 value |= 1 & CODA_SLICING_MODE_MASK;
880 break;
881 }
882 coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
883 value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
884 coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
825 } 885 }
826 coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
827 value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
828 coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
829 886
830 if (ctx->params.bitrate) { 887 if (ctx->params.bitrate) {
831 /* Rate control enabled */ 888 /* Rate control enabled */
@@ -916,19 +973,24 @@ static int coda_start_encoding(struct coda_ctx *ctx)
916 goto out; 973 goto out;
917 } 974 }
918 975
919 if (dev->devtype->product == CODA_960) 976 if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
920 ctx->num_internal_frames = 4; 977 if (dev->devtype->product == CODA_960)
921 else 978 ctx->num_internal_frames = 4;
922 ctx->num_internal_frames = 2; 979 else
923 ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc); 980 ctx->num_internal_frames = 2;
924 if (ret < 0) { 981 ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
925 v4l2_err(v4l2_dev, "failed to allocate framebuffers\n"); 982 if (ret < 0) {
926 goto out; 983 v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
984 goto out;
985 }
986 stride = q_data_src->bytesperline;
987 } else {
988 ctx->num_internal_frames = 0;
989 stride = 0;
927 } 990 }
928
929 coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM); 991 coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
930 coda_write(dev, q_data_src->bytesperline, 992 coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
931 CODA_CMD_SET_FRAME_BUF_STRIDE); 993
932 if (dev->devtype->product == CODA_7541) { 994 if (dev->devtype->product == CODA_7541) {
933 coda_write(dev, q_data_src->bytesperline, 995 coda_write(dev, q_data_src->bytesperline,
934 CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE); 996 CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
@@ -1036,9 +1098,10 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1036 struct coda_dev *dev = ctx->dev; 1098 struct coda_dev *dev = ctx->dev;
1037 int force_ipicture; 1099 int force_ipicture;
1038 int quant_param = 0; 1100 int quant_param = 0;
1039 u32 picture_y, picture_cb, picture_cr;
1040 u32 pic_stream_buffer_addr, pic_stream_buffer_size; 1101 u32 pic_stream_buffer_addr, pic_stream_buffer_size;
1102 u32 rot_mode = 0;
1041 u32 dst_fourcc; 1103 u32 dst_fourcc;
1104 u32 reg;
1042 1105
1043 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 1106 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1044 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 1107 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1076,7 +1139,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1076 ctx->vpu_header_size[0] + 1139 ctx->vpu_header_size[0] +
1077 ctx->vpu_header_size[1] + 1140 ctx->vpu_header_size[1] +
1078 ctx->vpu_header_size[2]; 1141 ctx->vpu_header_size[2];
1079 pic_stream_buffer_size = CODA_MAX_FRAME_SIZE - 1142 pic_stream_buffer_size = q_data_dst->sizeimage -
1080 ctx->vpu_header_size[0] - 1143 ctx->vpu_header_size[0] -
1081 ctx->vpu_header_size[1] - 1144 ctx->vpu_header_size[1] -
1082 ctx->vpu_header_size[2]; 1145 ctx->vpu_header_size[2];
@@ -1090,7 +1153,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1090 } else { 1153 } else {
1091 pic_stream_buffer_addr = 1154 pic_stream_buffer_addr =
1092 vb2_dma_contig_plane_dma_addr(dst_buf, 0); 1155 vb2_dma_contig_plane_dma_addr(dst_buf, 0);
1093 pic_stream_buffer_size = CODA_MAX_FRAME_SIZE; 1156 pic_stream_buffer_size = q_data_dst->sizeimage;
1094 } 1157 }
1095 1158
1096 if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { 1159 if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
@@ -1102,6 +1165,9 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1102 case V4L2_PIX_FMT_MPEG4: 1165 case V4L2_PIX_FMT_MPEG4:
1103 quant_param = ctx->params.mpeg4_intra_qp; 1166 quant_param = ctx->params.mpeg4_intra_qp;
1104 break; 1167 break;
1168 case V4L2_PIX_FMT_JPEG:
1169 quant_param = 30;
1170 break;
1105 default: 1171 default:
1106 v4l2_warn(&ctx->dev->v4l2_dev, 1172 v4l2_warn(&ctx->dev->v4l2_dev,
1107 "cannot set intra qp, fmt not supported\n"); 1173 "cannot set intra qp, fmt not supported\n");
@@ -1124,42 +1190,22 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
1124 } 1190 }
1125 1191
1126 /* submit */ 1192 /* submit */
1127 coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode, 1193 if (ctx->params.rot_mode)
1128 CODA_CMD_ENC_PIC_ROT_MODE); 1194 rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
1195 coda_write(dev, rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
1129 coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS); 1196 coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
1130 1197
1131
1132 picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
1133 switch (q_data_src->fourcc) {
1134 case V4L2_PIX_FMT_YVU420:
1135 /* Switch Cb and Cr for YVU420 format */
1136 picture_cr = picture_y + q_data_src->bytesperline *
1137 q_data_src->height;
1138 picture_cb = picture_cr + q_data_src->bytesperline / 2 *
1139 q_data_src->height / 2;
1140 break;
1141 case V4L2_PIX_FMT_YUV420:
1142 default:
1143 picture_cb = picture_y + q_data_src->bytesperline *
1144 q_data_src->height;
1145 picture_cr = picture_cb + q_data_src->bytesperline / 2 *
1146 q_data_src->height / 2;
1147 break;
1148 }
1149
1150 if (dev->devtype->product == CODA_960) { 1198 if (dev->devtype->product == CODA_960) {
1151 coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX); 1199 coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
1152 coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE); 1200 coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
1153 coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC); 1201 coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
1154 1202
1155 coda_write(dev, picture_y, CODA9_CMD_ENC_PIC_SRC_ADDR_Y); 1203 reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
1156 coda_write(dev, picture_cb, CODA9_CMD_ENC_PIC_SRC_ADDR_CB);
1157 coda_write(dev, picture_cr, CODA9_CMD_ENC_PIC_SRC_ADDR_CR);
1158 } else { 1204 } else {
1159 coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y); 1205 reg = CODA_CMD_ENC_PIC_SRC_ADDR_Y;
1160 coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
1161 coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
1162 } 1206 }
1207 coda_write_base(ctx, q_data_src, src_buf, reg);
1208
1163 coda_write(dev, force_ipicture << 1 & 0x2, 1209 coda_write(dev, force_ipicture << 1 & 0x2,
1164 CODA_CMD_ENC_PIC_OPTION); 1210 CODA_CMD_ENC_PIC_OPTION);
1165 1211
@@ -1293,7 +1339,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1293 u32 bitstream_buf, bitstream_size; 1339 u32 bitstream_buf, bitstream_size;
1294 struct coda_dev *dev = ctx->dev; 1340 struct coda_dev *dev = ctx->dev;
1295 int width, height; 1341 int width, height;
1296 u32 src_fourcc; 1342 u32 src_fourcc, dst_fourcc;
1297 u32 val; 1343 u32 val;
1298 int ret; 1344 int ret;
1299 1345
@@ -1303,6 +1349,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1303 bitstream_buf = ctx->bitstream.paddr; 1349 bitstream_buf = ctx->bitstream.paddr;
1304 bitstream_size = ctx->bitstream.size; 1350 bitstream_size = ctx->bitstream.size;
1305 src_fourcc = q_data_src->fourcc; 1351 src_fourcc = q_data_src->fourcc;
1352 dst_fourcc = q_data_dst->fourcc;
1306 1353
1307 /* Allocate per-instance buffers */ 1354 /* Allocate per-instance buffers */
1308 ret = coda_alloc_context_buffers(ctx, q_data_src); 1355 ret = coda_alloc_context_buffers(ctx, q_data_src);
@@ -1314,6 +1361,11 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1314 /* Update coda bitstream read and write pointers from kfifo */ 1361 /* Update coda bitstream read and write pointers from kfifo */
1315 coda_kfifo_sync_to_device_full(ctx); 1362 coda_kfifo_sync_to_device_full(ctx);
1316 1363
1364 ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE;
1365 if (dst_fourcc == V4L2_PIX_FMT_NV12)
1366 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
1367 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
1368
1317 ctx->display_idx = -1; 1369 ctx->display_idx = -1;
1318 ctx->frm_dis_flg = 0; 1370 ctx->frm_dis_flg = 0;
1319 coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); 1371 coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
@@ -1327,6 +1379,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1327 if ((dev->devtype->product == CODA_7541) || 1379 if ((dev->devtype->product == CODA_7541) ||
1328 (dev->devtype->product == CODA_960)) 1380 (dev->devtype->product == CODA_960))
1329 val |= CODA_REORDER_ENABLE; 1381 val |= CODA_REORDER_ENABLE;
1382 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
1383 val |= CODA_NO_INT_ENABLE;
1330 coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION); 1384 coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
1331 1385
1332 ctx->params.codec_mode = ctx->codec->mode; 1386 ctx->params.codec_mode = ctx->codec->mode;
@@ -1442,13 +1496,23 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
1442 } 1496 }
1443 1497
1444 if (dev->devtype->product == CODA_960) { 1498 if (dev->devtype->product == CODA_960) {
1445 coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY); 1499 int cbb_size, crb_size;
1446 1500
1501 coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
1502 /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
1447 coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE); 1503 coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
1504
1505 if (dst_fourcc == V4L2_PIX_FMT_NV12) {
1506 cbb_size = 0;
1507 crb_size = 16;
1508 } else {
1509 cbb_size = 8;
1510 crb_size = 8;
1511 }
1448 coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET | 1512 coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
1449 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET | 1513 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
1450 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET | 1514 cbb_size << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
1451 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET, 1515 crb_size << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
1452 CODA9_CMD_SET_FRAME_CACHE_CONFIG); 1516 CODA9_CMD_SET_FRAME_CACHE_CONFIG);
1453 } 1517 }
1454 1518
@@ -1501,20 +1565,11 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
1501 struct vb2_buffer *dst_buf; 1565 struct vb2_buffer *dst_buf;
1502 struct coda_dev *dev = ctx->dev; 1566 struct coda_dev *dev = ctx->dev;
1503 struct coda_q_data *q_data_dst; 1567 struct coda_q_data *q_data_dst;
1504 u32 stridey, height; 1568 u32 reg_addr, reg_stride;
1505 u32 picture_y, picture_cb, picture_cr;
1506 1569
1507 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 1570 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1508 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); 1571 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1509 1572
1510 if (ctx->params.rot_mode & CODA_ROT_90) {
1511 stridey = q_data_dst->height;
1512 height = q_data_dst->width;
1513 } else {
1514 stridey = q_data_dst->width;
1515 height = q_data_dst->height;
1516 }
1517
1518 /* Try to copy source buffer contents into the bitstream ringbuffer */ 1573 /* Try to copy source buffer contents into the bitstream ringbuffer */
1519 mutex_lock(&ctx->bitstream_mutex); 1574 mutex_lock(&ctx->bitstream_mutex);
1520 coda_fill_bitstream(ctx); 1575 coda_fill_bitstream(ctx);
@@ -1545,17 +1600,6 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
1545 if (dev->devtype->product == CODA_960) 1600 if (dev->devtype->product == CODA_960)
1546 coda_set_gdi_regs(ctx); 1601 coda_set_gdi_regs(ctx);
1547 1602
1548 /* Set rotator output */
1549 picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
1550 if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
1551 /* Switch Cr and Cb for YVU420 format */
1552 picture_cr = picture_y + stridey * height;
1553 picture_cb = picture_cr + stridey / 2 * height / 2;
1554 } else {
1555 picture_cb = picture_y + stridey * height;
1556 picture_cr = picture_cb + stridey / 2 * height / 2;
1557 }
1558
1559 if (dev->devtype->product == CODA_960) { 1603 if (dev->devtype->product == CODA_960) {
1560 /* 1604 /*
1561 * The CODA960 seems to have an internal list of buffers with 1605 * The CODA960 seems to have an internal list of buffers with
@@ -1565,16 +1609,16 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
1565 */ 1609 */
1566 coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index, 1610 coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
1567 CODA9_CMD_DEC_PIC_ROT_INDEX); 1611 CODA9_CMD_DEC_PIC_ROT_INDEX);
1568 coda_write(dev, picture_y, CODA9_CMD_DEC_PIC_ROT_ADDR_Y); 1612
1569 coda_write(dev, picture_cb, CODA9_CMD_DEC_PIC_ROT_ADDR_CB); 1613 reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
1570 coda_write(dev, picture_cr, CODA9_CMD_DEC_PIC_ROT_ADDR_CR); 1614 reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
1571 coda_write(dev, stridey, CODA9_CMD_DEC_PIC_ROT_STRIDE);
1572 } else { 1615 } else {
1573 coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y); 1616 reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
1574 coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB); 1617 reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
1575 coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
1576 coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
1577 } 1618 }
1619 coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
1620 coda_write(dev, q_data_dst->bytesperline, reg_stride);
1621
1578 coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode, 1622 coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
1579 CODA_CMD_DEC_PIC_ROT_MODE); 1623 CODA_CMD_DEC_PIC_ROT_MODE);
1580 1624
@@ -1599,6 +1643,26 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
1599 coda_write(dev, ctx->iram_info.axi_sram_use, 1643 coda_write(dev, ctx->iram_info.axi_sram_use,
1600 CODA7_REG_BIT_AXI_SRAM_USE); 1644 CODA7_REG_BIT_AXI_SRAM_USE);
1601 1645
1646 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
1647 struct coda_buffer_meta *meta;
1648
1649 /* If this is the last buffer in the bitstream, add padding */
1650 meta = list_first_entry(&ctx->buffer_meta_list,
1651 struct coda_buffer_meta, list);
1652 if (meta->end == (ctx->bitstream_fifo.kfifo.in &
1653 ctx->bitstream_fifo.kfifo.mask)) {
1654 static unsigned char buf[512];
1655 unsigned int pad;
1656
1657 /* Pad to multiple of 256 and then add 256 more */
1658 pad = ((0 - meta->end) & 0xff) + 256;
1659
1660 memset(buf, 0xff, sizeof(buf));
1661
1662 kfifo_in(&ctx->bitstream_fifo, buf, pad);
1663 }
1664 }
1665
1602 coda_kfifo_sync_to_device_full(ctx); 1666 coda_kfifo_sync_to_device_full(ctx);
1603 1667
1604 coda_command_async(ctx, CODA_COMMAND_PIC_RUN); 1668 coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
@@ -1612,7 +1676,8 @@ static void coda_finish_decode(struct coda_ctx *ctx)
1612 struct coda_q_data *q_data_src; 1676 struct coda_q_data *q_data_src;
1613 struct coda_q_data *q_data_dst; 1677 struct coda_q_data *q_data_dst;
1614 struct vb2_buffer *dst_buf; 1678 struct vb2_buffer *dst_buf;
1615 struct coda_timestamp *ts; 1679 struct coda_buffer_meta *meta;
1680 unsigned long payload;
1616 int width, height; 1681 int width, height;
1617 int decoded_idx; 1682 int decoded_idx;
1618 int display_idx; 1683 int display_idx;
@@ -1739,23 +1804,23 @@ static void coda_finish_decode(struct coda_ctx *ctx)
1739 val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1; 1804 val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
1740 val -= ctx->sequence_offset; 1805 val -= ctx->sequence_offset;
1741 mutex_lock(&ctx->bitstream_mutex); 1806 mutex_lock(&ctx->bitstream_mutex);
1742 if (!list_empty(&ctx->timestamp_list)) { 1807 if (!list_empty(&ctx->buffer_meta_list)) {
1743 ts = list_first_entry(&ctx->timestamp_list, 1808 meta = list_first_entry(&ctx->buffer_meta_list,
1744 struct coda_timestamp, list); 1809 struct coda_buffer_meta, list);
1745 list_del(&ts->list); 1810 list_del(&meta->list);
1746 if (val != (ts->sequence & 0xffff)) { 1811 if (val != (meta->sequence & 0xffff)) {
1747 v4l2_err(&dev->v4l2_dev, 1812 v4l2_err(&dev->v4l2_dev,
1748 "sequence number mismatch (%d(%d) != %d)\n", 1813 "sequence number mismatch (%d(%d) != %d)\n",
1749 val, ctx->sequence_offset, 1814 val, ctx->sequence_offset,
1750 ts->sequence); 1815 meta->sequence);
1751 } 1816 }
1752 ctx->frame_timestamps[decoded_idx] = *ts; 1817 ctx->frame_metas[decoded_idx] = *meta;
1753 kfree(ts); 1818 kfree(meta);
1754 } else { 1819 } else {
1755 v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n"); 1820 v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
1756 memset(&ctx->frame_timestamps[decoded_idx], 0, 1821 memset(&ctx->frame_metas[decoded_idx], 0,
1757 sizeof(struct coda_timestamp)); 1822 sizeof(struct coda_buffer_meta));
1758 ctx->frame_timestamps[decoded_idx].sequence = val; 1823 ctx->frame_metas[decoded_idx].sequence = val;
1759 } 1824 }
1760 mutex_unlock(&ctx->bitstream_mutex); 1825 mutex_unlock(&ctx->bitstream_mutex);
1761 1826
@@ -1794,11 +1859,22 @@ static void coda_finish_decode(struct coda_ctx *ctx)
1794 V4L2_BUF_FLAG_PFRAME | 1859 V4L2_BUF_FLAG_PFRAME |
1795 V4L2_BUF_FLAG_BFRAME); 1860 V4L2_BUF_FLAG_BFRAME);
1796 dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx]; 1861 dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
1797 ts = &ctx->frame_timestamps[ctx->display_idx]; 1862 meta = &ctx->frame_metas[ctx->display_idx];
1798 dst_buf->v4l2_buf.timecode = ts->timecode; 1863 dst_buf->v4l2_buf.timecode = meta->timecode;
1799 dst_buf->v4l2_buf.timestamp = ts->timestamp; 1864 dst_buf->v4l2_buf.timestamp = meta->timestamp;
1800 1865
1801 vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2); 1866 switch (q_data_dst->fourcc) {
1867 case V4L2_PIX_FMT_YUV420:
1868 case V4L2_PIX_FMT_YVU420:
1869 case V4L2_PIX_FMT_NV12:
1870 default:
1871 payload = width * height * 3 / 2;
1872 break;
1873 case V4L2_PIX_FMT_YUV422P:
1874 payload = width * height * 2;
1875 break;
1876 }
1877 vb2_set_plane_payload(dst_buf, 0, payload);
1802 1878
1803 v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ? 1879 v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ?
1804 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 1880 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index ced47609f5ef..90b7791d36e1 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -43,6 +43,7 @@
43#define CODA_NAME "coda" 43#define CODA_NAME "coda"
44 44
45#define CODADX6_MAX_INSTANCES 4 45#define CODADX6_MAX_INSTANCES 4
46#define CODA_MAX_FORMATS 4
46 47
47#define CODA_PARA_BUF_SIZE (10 * 1024) 48#define CODA_PARA_BUF_SIZE (10 * 1024)
48#define CODA_ISRAM_SIZE (2048 * 2) 49#define CODA_ISRAM_SIZE (2048 * 2)
@@ -82,6 +83,34 @@ unsigned int coda_read(struct coda_dev *dev, u32 reg)
82 return data; 83 return data;
83} 84}
84 85
86void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
87 struct vb2_buffer *buf, unsigned int reg_y)
88{
89 u32 base_y = vb2_dma_contig_plane_dma_addr(buf, 0);
90 u32 base_cb, base_cr;
91
92 switch (q_data->fourcc) {
93 case V4L2_PIX_FMT_YVU420:
94 /* Switch Cb and Cr for YVU420 format */
95 base_cr = base_y + q_data->bytesperline * q_data->height;
96 base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
97 break;
98 case V4L2_PIX_FMT_YUV420:
99 case V4L2_PIX_FMT_NV12:
100 default:
101 base_cb = base_y + q_data->bytesperline * q_data->height;
102 base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
103 break;
104 case V4L2_PIX_FMT_YUV422P:
105 base_cb = base_y + q_data->bytesperline * q_data->height;
106 base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
107 }
108
109 coda_write(ctx->dev, base_y, reg_y);
110 coda_write(ctx->dev, base_cb, reg_y + 4);
111 coda_write(ctx->dev, base_cr, reg_y + 8);
112}
113
85/* 114/*
86 * Array of all formats supported by any version of Coda: 115 * Array of all formats supported by any version of Coda:
87 */ 116 */
@@ -95,6 +124,14 @@ static const struct coda_fmt coda_formats[] = {
95 .fourcc = V4L2_PIX_FMT_YVU420, 124 .fourcc = V4L2_PIX_FMT_YVU420,
96 }, 125 },
97 { 126 {
127 .name = "YUV 4:2:0 Partial interleaved Y/CbCr",
128 .fourcc = V4L2_PIX_FMT_NV12,
129 },
130 {
131 .name = "YUV 4:2:2 Planar, YCbCr",
132 .fourcc = V4L2_PIX_FMT_YUV422P,
133 },
134 {
98 .name = "H264 Encoded Stream", 135 .name = "H264 Encoded Stream",
99 .fourcc = V4L2_PIX_FMT_H264, 136 .fourcc = V4L2_PIX_FMT_H264,
100 }, 137 },
@@ -102,6 +139,10 @@ static const struct coda_fmt coda_formats[] = {
102 .name = "MPEG4 Encoded Stream", 139 .name = "MPEG4 Encoded Stream",
103 .fourcc = V4L2_PIX_FMT_MPEG4, 140 .fourcc = V4L2_PIX_FMT_MPEG4,
104 }, 141 },
142 {
143 .name = "JPEG Encoded Images",
144 .fourcc = V4L2_PIX_FMT_JPEG,
145 },
105}; 146};
106 147
107#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \ 148#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
@@ -122,8 +163,10 @@ static const struct coda_codec codadx6_codecs[] = {
122static const struct coda_codec coda7_codecs[] = { 163static const struct coda_codec coda7_codecs[] = {
123 CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720), 164 CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
124 CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720), 165 CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
166 CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
125 CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088), 167 CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
126 CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), 168 CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
169 CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
127}; 170};
128 171
129static const struct coda_codec coda9_codecs[] = { 172static const struct coda_codec coda9_codecs[] = {
@@ -133,17 +176,115 @@ static const struct coda_codec coda9_codecs[] = {
133 CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088), 176 CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
134}; 177};
135 178
179struct coda_video_device {
180 const char *name;
181 enum coda_inst_type type;
182 const struct coda_context_ops *ops;
183 u32 src_formats[CODA_MAX_FORMATS];
184 u32 dst_formats[CODA_MAX_FORMATS];
185};
186
187static const struct coda_video_device coda_bit_encoder = {
188 .name = "coda-encoder",
189 .type = CODA_INST_ENCODER,
190 .ops = &coda_bit_encode_ops,
191 .src_formats = {
192 V4L2_PIX_FMT_YUV420,
193 V4L2_PIX_FMT_YVU420,
194 V4L2_PIX_FMT_NV12,
195 },
196 .dst_formats = {
197 V4L2_PIX_FMT_H264,
198 V4L2_PIX_FMT_MPEG4,
199 },
200};
201
202static const struct coda_video_device coda_bit_jpeg_encoder = {
203 .name = "coda-jpeg-encoder",
204 .type = CODA_INST_ENCODER,
205 .ops = &coda_bit_encode_ops,
206 .src_formats = {
207 V4L2_PIX_FMT_YUV420,
208 V4L2_PIX_FMT_YVU420,
209 V4L2_PIX_FMT_NV12,
210 V4L2_PIX_FMT_YUV422P,
211 },
212 .dst_formats = {
213 V4L2_PIX_FMT_JPEG,
214 },
215};
216
217static const struct coda_video_device coda_bit_decoder = {
218 .name = "coda-decoder",
219 .type = CODA_INST_DECODER,
220 .ops = &coda_bit_decode_ops,
221 .src_formats = {
222 V4L2_PIX_FMT_H264,
223 V4L2_PIX_FMT_MPEG4,
224 },
225 .dst_formats = {
226 V4L2_PIX_FMT_YUV420,
227 V4L2_PIX_FMT_YVU420,
228 V4L2_PIX_FMT_NV12,
229 },
230};
231
232static const struct coda_video_device coda_bit_jpeg_decoder = {
233 .name = "coda-jpeg-decoder",
234 .type = CODA_INST_DECODER,
235 .ops = &coda_bit_decode_ops,
236 .src_formats = {
237 V4L2_PIX_FMT_JPEG,
238 },
239 .dst_formats = {
240 V4L2_PIX_FMT_YUV420,
241 V4L2_PIX_FMT_YVU420,
242 V4L2_PIX_FMT_NV12,
243 V4L2_PIX_FMT_YUV422P,
244 },
245};
246
247static const struct coda_video_device *codadx6_video_devices[] = {
248 &coda_bit_encoder,
249};
250
251static const struct coda_video_device *coda7_video_devices[] = {
252 &coda_bit_jpeg_encoder,
253 &coda_bit_jpeg_decoder,
254 &coda_bit_encoder,
255 &coda_bit_decoder,
256};
257
258static const struct coda_video_device *coda9_video_devices[] = {
259 &coda_bit_encoder,
260 &coda_bit_decoder,
261};
262
136static bool coda_format_is_yuv(u32 fourcc) 263static bool coda_format_is_yuv(u32 fourcc)
137{ 264{
138 switch (fourcc) { 265 switch (fourcc) {
139 case V4L2_PIX_FMT_YUV420: 266 case V4L2_PIX_FMT_YUV420:
140 case V4L2_PIX_FMT_YVU420: 267 case V4L2_PIX_FMT_YVU420:
268 case V4L2_PIX_FMT_NV12:
269 case V4L2_PIX_FMT_YUV422P:
141 return true; 270 return true;
142 default: 271 default:
143 return false; 272 return false;
144 } 273 }
145} 274}
146 275
276static const char *coda_format_name(u32 fourcc)
277{
278 int i;
279
280 for (i = 0; i < ARRAY_SIZE(coda_formats); i++) {
281 if (coda_formats[i].fourcc == fourcc)
282 return coda_formats[i].name;
283 }
284
285 return NULL;
286}
287
147/* 288/*
148 * Normalize all supported YUV 4:2:0 formats to the value used in the codec 289 * Normalize all supported YUV 4:2:0 formats to the value used in the codec
149 * tables. 290 * tables.
@@ -202,6 +343,17 @@ static void coda_get_max_dimensions(struct coda_dev *dev,
202 *max_h = h; 343 *max_h = h;
203} 344}
204 345
346const struct coda_video_device *to_coda_video_device(struct video_device *vdev)
347{
348 struct coda_dev *dev = video_get_drvdata(vdev);
349 unsigned int i = vdev - dev->vfd;
350
351 if (i >= dev->devtype->num_vdevs)
352 return NULL;
353
354 return dev->devtype->vdevs[i];
355}
356
205const char *coda_product_name(int product) 357const char *coda_product_name(int product)
206{ 358{
207 static char buf[9]; 359 static char buf[9];
@@ -240,58 +392,28 @@ static int coda_querycap(struct file *file, void *priv,
240static int coda_enum_fmt(struct file *file, void *priv, 392static int coda_enum_fmt(struct file *file, void *priv,
241 struct v4l2_fmtdesc *f) 393 struct v4l2_fmtdesc *f)
242{ 394{
243 struct coda_ctx *ctx = fh_to_ctx(priv); 395 struct video_device *vdev = video_devdata(file);
244 const struct coda_codec *codecs = ctx->dev->devtype->codecs; 396 const struct coda_video_device *cvd = to_coda_video_device(vdev);
245 const struct coda_fmt *formats = coda_formats; 397 const u32 *formats;
246 const struct coda_fmt *fmt; 398 const char *name;
247 int num_codecs = ctx->dev->devtype->num_codecs; 399
248 int num_formats = ARRAY_SIZE(coda_formats); 400 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
249 int i, k, num = 0; 401 formats = cvd->src_formats;
250 bool yuv; 402 else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
251 403 formats = cvd->dst_formats;
252 if (ctx->inst_type == CODA_INST_ENCODER)
253 yuv = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
254 else 404 else
255 yuv = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE); 405 return -EINVAL;
256
257 for (i = 0; i < num_formats; i++) {
258 /* Skip either raw or compressed formats */
259 if (yuv != coda_format_is_yuv(formats[i].fourcc))
260 continue;
261 /* All uncompressed formats are always supported */
262 if (yuv) {
263 if (num == f->index)
264 break;
265 ++num;
266 continue;
267 }
268 /* Compressed formats may be supported, check the codec list */
269 for (k = 0; k < num_codecs; k++) {
270 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
271 formats[i].fourcc == codecs[k].dst_fourcc)
272 break;
273 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
274 formats[i].fourcc == codecs[k].src_fourcc)
275 break;
276 }
277 if (k < num_codecs) {
278 if (num == f->index)
279 break;
280 ++num;
281 }
282 }
283 406
284 if (i < num_formats) { 407 if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
285 fmt = &formats[i]; 408 return -EINVAL;
286 strlcpy(f->description, fmt->name, sizeof(f->description)); 409
287 f->pixelformat = fmt->fourcc; 410 name = coda_format_name(formats[f->index]);
288 if (!yuv) 411 strlcpy(f->description, name, sizeof(f->description));
289 f->flags |= V4L2_FMT_FLAG_COMPRESSED; 412 f->pixelformat = formats[f->index];
290 return 0; 413 if (!coda_format_is_yuv(formats[f->index]))
291 } 414 f->flags |= V4L2_FMT_FLAG_COMPRESSED;
292 415
293 /* Format not found */ 416 return 0;
294 return -EINVAL;
295} 417}
296 418
297static int coda_g_fmt(struct file *file, void *priv, 419static int coda_g_fmt(struct file *file, void *priv,
@@ -311,7 +433,37 @@ static int coda_g_fmt(struct file *file, void *priv,
311 f->fmt.pix.bytesperline = q_data->bytesperline; 433 f->fmt.pix.bytesperline = q_data->bytesperline;
312 434
313 f->fmt.pix.sizeimage = q_data->sizeimage; 435 f->fmt.pix.sizeimage = q_data->sizeimage;
314 f->fmt.pix.colorspace = ctx->colorspace; 436 if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
437 f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
438 else
439 f->fmt.pix.colorspace = ctx->colorspace;
440
441 return 0;
442}
443
444static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
445{
446 struct coda_q_data *q_data;
447 const u32 *formats;
448 int i;
449
450 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
451 formats = ctx->cvd->src_formats;
452 else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
453 formats = ctx->cvd->dst_formats;
454 else
455 return -EINVAL;
456
457 for (i = 0; i < CODA_MAX_FORMATS; i++) {
458 if (formats[i] == f->fmt.pix.pixelformat) {
459 f->fmt.pix.pixelformat = formats[i];
460 return 0;
461 }
462 }
463
464 /* Fall back to currently set pixelformat */
465 q_data = get_q_data(ctx, f->type);
466 f->fmt.pix.pixelformat = q_data->fourcc;
315 467
316 return 0; 468 return 0;
317} 469}
@@ -320,7 +472,6 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
320 struct v4l2_format *f) 472 struct v4l2_format *f)
321{ 473{
322 struct coda_dev *dev = ctx->dev; 474 struct coda_dev *dev = ctx->dev;
323 struct coda_q_data *q_data;
324 unsigned int max_w, max_h; 475 unsigned int max_w, max_h;
325 enum v4l2_field field; 476 enum v4l2_field field;
326 477
@@ -342,30 +493,35 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
342 switch (f->fmt.pix.pixelformat) { 493 switch (f->fmt.pix.pixelformat) {
343 case V4L2_PIX_FMT_YUV420: 494 case V4L2_PIX_FMT_YUV420:
344 case V4L2_PIX_FMT_YVU420: 495 case V4L2_PIX_FMT_YVU420:
345 case V4L2_PIX_FMT_H264: 496 case V4L2_PIX_FMT_NV12:
346 case V4L2_PIX_FMT_MPEG4: 497 /*
347 case V4L2_PIX_FMT_JPEG: 498 * Frame stride must be at least multiple of 8,
348 break; 499 * but multiple of 16 for h.264 or JPEG 4:2:x
349 default: 500 */
350 q_data = get_q_data(ctx, f->type);
351 if (!q_data)
352 return -EINVAL;
353 f->fmt.pix.pixelformat = q_data->fourcc;
354 }
355
356 switch (f->fmt.pix.pixelformat) {
357 case V4L2_PIX_FMT_YUV420:
358 case V4L2_PIX_FMT_YVU420:
359 /* Frame stride must be multiple of 8, but 16 for h.264 */
360 f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16); 501 f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
361 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * 502 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
362 f->fmt.pix.height * 3 / 2; 503 f->fmt.pix.height * 3 / 2;
363 break; 504 break;
505 case V4L2_PIX_FMT_YUV422P:
506 f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
507 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
508 f->fmt.pix.height * 2;
509 break;
510 case V4L2_PIX_FMT_JPEG:
511 f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
512 /* fallthrough */
364 case V4L2_PIX_FMT_H264: 513 case V4L2_PIX_FMT_H264:
365 case V4L2_PIX_FMT_MPEG4: 514 case V4L2_PIX_FMT_MPEG4:
366 case V4L2_PIX_FMT_JPEG:
367 f->fmt.pix.bytesperline = 0; 515 f->fmt.pix.bytesperline = 0;
368 f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE; 516 /*
517 * This is a rough estimate for sensible compressed buffer
518 * sizes (between 1 and 16 bits per pixel). This could be
519 * improved by better format specific worst case estimates.
520 */
521 f->fmt.pix.sizeimage = round_up(clamp(f->fmt.pix.sizeimage,
522 f->fmt.pix.width * f->fmt.pix.height / 8,
523 f->fmt.pix.width * f->fmt.pix.height * 2),
524 PAGE_SIZE);
369 break; 525 break;
370 default: 526 default:
371 BUG(); 527 BUG();
@@ -378,34 +534,35 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
378 struct v4l2_format *f) 534 struct v4l2_format *f)
379{ 535{
380 struct coda_ctx *ctx = fh_to_ctx(priv); 536 struct coda_ctx *ctx = fh_to_ctx(priv);
381 const struct coda_codec *codec = NULL; 537 const struct coda_q_data *q_data_src;
538 const struct coda_codec *codec;
382 struct vb2_queue *src_vq; 539 struct vb2_queue *src_vq;
383 int ret; 540 int ret;
384 541
542 ret = coda_try_pixelformat(ctx, f);
543 if (ret < 0)
544 return ret;
545
546 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
547
385 /* 548 /*
386 * If the source format is already fixed, try to find a codec that 549 * If the source format is already fixed, only allow the same output
387 * converts to the given destination format 550 * resolution
388 */ 551 */
389 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); 552 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
390 if (vb2_is_streaming(src_vq)) { 553 if (vb2_is_streaming(src_vq)) {
391 struct coda_q_data *q_data_src;
392
393 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
394 codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
395 f->fmt.pix.pixelformat);
396 if (!codec)
397 return -EINVAL;
398
399 f->fmt.pix.width = q_data_src->width; 554 f->fmt.pix.width = q_data_src->width;
400 f->fmt.pix.height = q_data_src->height; 555 f->fmt.pix.height = q_data_src->height;
401 } else {
402 /* Otherwise determine codec by encoded format, if possible */
403 codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
404 f->fmt.pix.pixelformat);
405 } 556 }
406 557
407 f->fmt.pix.colorspace = ctx->colorspace; 558 f->fmt.pix.colorspace = ctx->colorspace;
408 559
560 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
561 codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
562 f->fmt.pix.pixelformat);
563 if (!codec)
564 return -EINVAL;
565
409 ret = coda_try_fmt(ctx, codec, f); 566 ret = coda_try_fmt(ctx, codec, f);
410 if (ret < 0) 567 if (ret < 0)
411 return ret; 568 return ret;
@@ -426,21 +583,24 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
426 struct v4l2_format *f) 583 struct v4l2_format *f)
427{ 584{
428 struct coda_ctx *ctx = fh_to_ctx(priv); 585 struct coda_ctx *ctx = fh_to_ctx(priv);
429 const struct coda_codec *codec = NULL; 586 struct coda_dev *dev = ctx->dev;
587 const struct coda_q_data *q_data_dst;
588 const struct coda_codec *codec;
589 int ret;
430 590
431 /* Determine codec by encoded format, returns NULL if raw or invalid */ 591 ret = coda_try_pixelformat(ctx, f);
432 if (ctx->inst_type == CODA_INST_DECODER) { 592 if (ret < 0)
433 codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat, 593 return ret;
434 V4L2_PIX_FMT_YUV420); 594
435 if (!codec) 595 if (!f->fmt.pix.colorspace) {
436 codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_H264, 596 if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
437 V4L2_PIX_FMT_YUV420); 597 f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
438 if (!codec) 598 else
439 return -EINVAL; 599 f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
440 } 600 }
441 601
442 if (!f->fmt.pix.colorspace) 602 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
443 f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; 603 codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc);
444 604
445 return coda_try_fmt(ctx, codec, f); 605 return coda_try_fmt(ctx, codec, f);
446} 606}
@@ -781,6 +941,7 @@ static int coda_job_ready(void *m2m_priv)
781 941
782 if (ctx->hold || 942 if (ctx->hold ||
783 ((ctx->inst_type == CODA_INST_DECODER) && 943 ((ctx->inst_type == CODA_INST_DECODER) &&
944 !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
784 (coda_get_bitstream_payload(ctx) < 512) && 945 (coda_get_bitstream_payload(ctx) < 512) &&
785 !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) { 946 !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
786 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, 947 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
@@ -862,25 +1023,17 @@ static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
862 1023
863static void set_default_params(struct coda_ctx *ctx) 1024static void set_default_params(struct coda_ctx *ctx)
864{ 1025{
865 u32 src_fourcc, dst_fourcc; 1026 unsigned int max_w, max_h, size;
866 int max_w;
867 int max_h;
868 1027
869 if (ctx->inst_type == CODA_INST_ENCODER) { 1028 ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
870 src_fourcc = V4L2_PIX_FMT_YUV420; 1029 ctx->cvd->dst_formats[0]);
871 dst_fourcc = V4L2_PIX_FMT_H264; 1030 max_w = min(ctx->codec->max_w, 1920U);
872 } else { 1031 max_h = min(ctx->codec->max_h, 1088U);
873 src_fourcc = V4L2_PIX_FMT_H264; 1032 size = max_w * max_h * 3 / 2;
874 dst_fourcc = V4L2_PIX_FMT_YUV420;
875 }
876 ctx->codec = coda_find_codec(ctx->dev, src_fourcc, dst_fourcc);
877 max_w = ctx->codec->max_w;
878 max_h = ctx->codec->max_h;
879 1033
880 ctx->params.codec_mode = ctx->codec->mode; 1034 ctx->params.codec_mode = ctx->codec->mode;
881 ctx->colorspace = V4L2_COLORSPACE_REC709; 1035 ctx->colorspace = V4L2_COLORSPACE_REC709;
882 ctx->params.framerate = 30; 1036 ctx->params.framerate = 30;
883 ctx->aborting = 0;
884 1037
885 /* Default formats for output and input queues */ 1038 /* Default formats for output and input queues */
886 ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc; 1039 ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
@@ -891,14 +1044,14 @@ static void set_default_params(struct coda_ctx *ctx)
891 ctx->q_data[V4L2_M2M_DST].height = max_h; 1044 ctx->q_data[V4L2_M2M_DST].height = max_h;
892 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) { 1045 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
893 ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w; 1046 ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
894 ctx->q_data[V4L2_M2M_SRC].sizeimage = (max_w * max_h * 3) / 2; 1047 ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
895 ctx->q_data[V4L2_M2M_DST].bytesperline = 0; 1048 ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
896 ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE; 1049 ctx->q_data[V4L2_M2M_DST].sizeimage = round_up(size, PAGE_SIZE);
897 } else { 1050 } else {
898 ctx->q_data[V4L2_M2M_SRC].bytesperline = 0; 1051 ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
899 ctx->q_data[V4L2_M2M_SRC].sizeimage = CODA_MAX_FRAME_SIZE; 1052 ctx->q_data[V4L2_M2M_SRC].sizeimage = round_up(size, PAGE_SIZE);
900 ctx->q_data[V4L2_M2M_DST].bytesperline = max_w; 1053 ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
901 ctx->q_data[V4L2_M2M_DST].sizeimage = (max_w * max_h * 3) / 2; 1054 ctx->q_data[V4L2_M2M_DST].sizeimage = size;
902 } 1055 }
903 ctx->q_data[V4L2_M2M_SRC].rect.width = max_w; 1056 ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
904 ctx->q_data[V4L2_M2M_SRC].rect.height = max_h; 1057 ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
@@ -964,7 +1117,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
964 * In the decoder case, immediately try to copy the buffer into the 1117 * In the decoder case, immediately try to copy the buffer into the
965 * bitstream ringbuffer and mark it as ready to be dequeued. 1118 * bitstream ringbuffer and mark it as ready to be dequeued.
966 */ 1119 */
967 if (q_data->fourcc == V4L2_PIX_FMT_H264 && 1120 if (ctx->inst_type == CODA_INST_DECODER &&
968 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1121 vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
969 /* 1122 /*
970 * For backwards compatibility, queuing an empty buffer marks 1123 * For backwards compatibility, queuing an empty buffer marks
@@ -1027,12 +1180,13 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1027 struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev; 1180 struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
1028 struct coda_q_data *q_data_src, *q_data_dst; 1181 struct coda_q_data *q_data_src, *q_data_dst;
1029 struct vb2_buffer *buf; 1182 struct vb2_buffer *buf;
1030 u32 dst_fourcc;
1031 int ret = 0; 1183 int ret = 0;
1032 1184
1033 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); 1185 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1034 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1186 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1035 if (q_data_src->fourcc == V4L2_PIX_FMT_H264) { 1187 if (q_data_src->fourcc == V4L2_PIX_FMT_H264 ||
1188 (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
1189 ctx->dev->devtype->product == CODA_7541)) {
1036 /* copy the buffers that where queued before streamon */ 1190 /* copy the buffers that where queued before streamon */
1037 mutex_lock(&ctx->bitstream_mutex); 1191 mutex_lock(&ctx->bitstream_mutex);
1038 coda_fill_bitstream(ctx); 1192 coda_fill_bitstream(ctx);
@@ -1063,13 +1217,12 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1063 if (!(ctx->streamon_out & ctx->streamon_cap)) 1217 if (!(ctx->streamon_out & ctx->streamon_cap))
1064 return 0; 1218 return 0;
1065 1219
1066 /* Allow decoder device_run with no new buffers queued */ 1220 /* Allow BIT decoder device_run with no new buffers queued */
1067 if (ctx->inst_type == CODA_INST_DECODER) 1221 if (ctx->inst_type == CODA_INST_DECODER)
1068 v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true); 1222 v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
1069 1223
1070 ctx->gopcounter = ctx->params.gop_size - 1; 1224 ctx->gopcounter = ctx->params.gop_size - 1;
1071 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); 1225 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1072 dst_fourcc = q_data_dst->fourcc;
1073 1226
1074 ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc, 1227 ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
1075 q_data_dst->fourcc); 1228 q_data_dst->fourcc);
@@ -1079,6 +1232,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1079 goto err; 1232 goto err;
1080 } 1233 }
1081 1234
1235 if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
1236 ctx->params.gop_size = 1;
1237 ctx->gopcounter = ctx->params.gop_size - 1;
1238
1082 ret = ctx->ops->start_streaming(ctx); 1239 ret = ctx->ops->start_streaming(ctx);
1083 if (ctx->inst_type == CODA_INST_DECODER) { 1240 if (ctx->inst_type == CODA_INST_DECODER) {
1084 if (ret == -EAGAIN) 1241 if (ret == -EAGAIN)
@@ -1093,10 +1250,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
1093err: 1250err:
1094 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1251 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1095 while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) 1252 while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
1096 v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED); 1253 v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
1097 } else { 1254 } else {
1098 while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) 1255 while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
1099 v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED); 1256 v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
1100 } 1257 }
1101 return ret; 1258 return ret;
1102} 1259}
@@ -1131,19 +1288,20 @@ static void coda_stop_streaming(struct vb2_queue *q)
1131 } 1288 }
1132 1289
1133 if (!ctx->streamon_out && !ctx->streamon_cap) { 1290 if (!ctx->streamon_out && !ctx->streamon_cap) {
1134 struct coda_timestamp *ts; 1291 struct coda_buffer_meta *meta;
1135 1292
1136 mutex_lock(&ctx->bitstream_mutex); 1293 mutex_lock(&ctx->bitstream_mutex);
1137 while (!list_empty(&ctx->timestamp_list)) { 1294 while (!list_empty(&ctx->buffer_meta_list)) {
1138 ts = list_first_entry(&ctx->timestamp_list, 1295 meta = list_first_entry(&ctx->buffer_meta_list,
1139 struct coda_timestamp, list); 1296 struct coda_buffer_meta, list);
1140 list_del(&ts->list); 1297 list_del(&meta->list);
1141 kfree(ts); 1298 kfree(meta);
1142 } 1299 }
1143 mutex_unlock(&ctx->bitstream_mutex); 1300 mutex_unlock(&ctx->bitstream_mutex);
1144 kfifo_init(&ctx->bitstream_fifo, 1301 kfifo_init(&ctx->bitstream_fifo,
1145 ctx->bitstream.vaddr, ctx->bitstream.size); 1302 ctx->bitstream.vaddr, ctx->bitstream.size);
1146 ctx->runcounter = 0; 1303 ctx->runcounter = 0;
1304 ctx->aborting = 0;
1147 } 1305 }
1148} 1306}
1149 1307
@@ -1226,6 +1384,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
1226 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: 1384 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
1227 ctx->params.intra_refresh = ctrl->val; 1385 ctx->params.intra_refresh = ctrl->val;
1228 break; 1386 break;
1387 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
1388 coda_set_jpeg_compression_quality(ctx, ctrl->val);
1389 break;
1390 case V4L2_CID_JPEG_RESTART_INTERVAL:
1391 ctx->params.jpeg_restart_interval = ctrl->val;
1392 break;
1229 default: 1393 default:
1230 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, 1394 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
1231 "Invalid control, id=%d, val=%d\n", 1395 "Invalid control, id=%d, val=%d\n",
@@ -1240,14 +1404,8 @@ static const struct v4l2_ctrl_ops coda_ctrl_ops = {
1240 .s_ctrl = coda_s_ctrl, 1404 .s_ctrl = coda_s_ctrl,
1241}; 1405};
1242 1406
1243static int coda_ctrls_setup(struct coda_ctx *ctx) 1407static void coda_encode_ctrls(struct coda_ctx *ctx)
1244{ 1408{
1245 v4l2_ctrl_handler_init(&ctx->ctrls, 9);
1246
1247 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1248 V4L2_CID_HFLIP, 0, 1, 1, 0);
1249 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1250 V4L2_CID_VFLIP, 0, 1, 1, 0);
1251 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, 1409 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1252 V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0); 1410 V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
1253 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, 1411 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -1291,6 +1449,30 @@ static int coda_ctrls_setup(struct coda_ctx *ctx)
1291 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, 1449 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1292 V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0, 1450 V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
1293 1920 * 1088 / 256, 1, 0); 1451 1920 * 1088 / 256, 1, 0);
1452}
1453
1454static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
1455{
1456 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1457 V4L2_CID_JPEG_COMPRESSION_QUALITY, 5, 100, 1, 50);
1458 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1459 V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
1460}
1461
1462static int coda_ctrls_setup(struct coda_ctx *ctx)
1463{
1464 v4l2_ctrl_handler_init(&ctx->ctrls, 2);
1465
1466 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1467 V4L2_CID_HFLIP, 0, 1, 1, 0);
1468 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
1469 V4L2_CID_VFLIP, 0, 1, 1, 0);
1470 if (ctx->inst_type == CODA_INST_ENCODER) {
1471 if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG)
1472 coda_jpeg_encode_ctrls(ctx);
1473 else
1474 coda_encode_ctrls(ctx);
1475 }
1294 1476
1295 if (ctx->ctrls.error) { 1477 if (ctx->ctrls.error) {
1296 v4l2_err(&ctx->dev->v4l2_dev, 1478 v4l2_err(&ctx->dev->v4l2_dev,
@@ -1364,10 +1546,14 @@ static int coda_next_free_instance(struct coda_dev *dev)
1364 return idx; 1546 return idx;
1365} 1547}
1366 1548
1367static int coda_open(struct file *file, enum coda_inst_type inst_type, 1549/*
1368 const struct coda_context_ops *ctx_ops) 1550 * File operations
1551 */
1552
1553static int coda_open(struct file *file)
1369{ 1554{
1370 struct coda_dev *dev = video_drvdata(file); 1555 struct video_device *vdev = video_devdata(file);
1556 struct coda_dev *dev = video_get_drvdata(vdev);
1371 struct coda_ctx *ctx = NULL; 1557 struct coda_ctx *ctx = NULL;
1372 char *name; 1558 char *name;
1373 int ret; 1559 int ret;
@@ -1388,8 +1574,9 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
1388 ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root); 1574 ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
1389 kfree(name); 1575 kfree(name);
1390 1576
1391 ctx->inst_type = inst_type; 1577 ctx->cvd = to_coda_video_device(vdev);
1392 ctx->ops = ctx_ops; 1578 ctx->inst_type = ctx->cvd->type;
1579 ctx->ops = ctx->cvd->ops;
1393 init_completion(&ctx->completion); 1580 init_completion(&ctx->completion);
1394 INIT_WORK(&ctx->pic_run_work, coda_pic_run_work); 1581 INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
1395 INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work); 1582 INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
@@ -1399,8 +1586,10 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
1399 ctx->dev = dev; 1586 ctx->dev = dev;
1400 ctx->idx = idx; 1587 ctx->idx = idx;
1401 switch (dev->devtype->product) { 1588 switch (dev->devtype->product) {
1402 case CODA_7541:
1403 case CODA_960: 1589 case CODA_960:
1590 ctx->frame_mem_ctrl = 1 << 12;
1591 /* fallthrough */
1592 case CODA_7541:
1404 ctx->reg_idx = 0; 1593 ctx->reg_idx = 0;
1405 break; 1594 break;
1406 default: 1595 default:
@@ -1441,16 +1630,17 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
1441 1630
1442 ctx->fh.ctrl_handler = &ctx->ctrls; 1631 ctx->fh.ctrl_handler = &ctx->ctrls;
1443 1632
1444 ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE, 1633 ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
1445 "parabuf"); 1634 CODA_PARA_BUF_SIZE, "parabuf");
1446 if (ret < 0) { 1635 if (ret < 0) {
1447 v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf"); 1636 v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
1448 goto err_dma_alloc; 1637 goto err_dma_alloc;
1449 } 1638 }
1450 1639
1451 ctx->bitstream.size = CODA_MAX_FRAME_SIZE; 1640 ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
1452 ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev, 1641 ctx->bitstream.vaddr = dma_alloc_writecombine(
1453 ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL); 1642 &dev->plat_dev->dev, ctx->bitstream.size,
1643 &ctx->bitstream.paddr, GFP_KERNEL);
1454 if (!ctx->bitstream.vaddr) { 1644 if (!ctx->bitstream.vaddr) {
1455 v4l2_err(&dev->v4l2_dev, 1645 v4l2_err(&dev->v4l2_dev,
1456 "failed to allocate bitstream ringbuffer"); 1646 "failed to allocate bitstream ringbuffer");
@@ -1461,7 +1651,7 @@ static int coda_open(struct file *file, enum coda_inst_type inst_type,
1461 ctx->bitstream.vaddr, ctx->bitstream.size); 1651 ctx->bitstream.vaddr, ctx->bitstream.size);
1462 mutex_init(&ctx->bitstream_mutex); 1652 mutex_init(&ctx->bitstream_mutex);
1463 mutex_init(&ctx->buffer_mutex); 1653 mutex_init(&ctx->buffer_mutex);
1464 INIT_LIST_HEAD(&ctx->timestamp_list); 1654 INIT_LIST_HEAD(&ctx->buffer_meta_list);
1465 1655
1466 coda_lock(ctx); 1656 coda_lock(ctx);
1467 list_add(&ctx->list, &dev->instances); 1657 list_add(&ctx->list, &dev->instances);
@@ -1495,16 +1685,6 @@ err_coda_max:
1495 return ret; 1685 return ret;
1496} 1686}
1497 1687
1498static int coda_encoder_open(struct file *file)
1499{
1500 return coda_open(file, CODA_INST_ENCODER, &coda_bit_encode_ops);
1501}
1502
1503static int coda_decoder_open(struct file *file)
1504{
1505 return coda_open(file, CODA_INST_DECODER, &coda_bit_decode_ops);
1506}
1507
1508static int coda_release(struct file *file) 1688static int coda_release(struct file *file)
1509{ 1689{
1510 struct coda_dev *dev = video_drvdata(file); 1690 struct coda_dev *dev = video_drvdata(file);
@@ -1515,6 +1695,9 @@ static int coda_release(struct file *file)
1515 1695
1516 debugfs_remove_recursive(ctx->debugfs_entry); 1696 debugfs_remove_recursive(ctx->debugfs_entry);
1517 1697
1698 if (ctx->inst_type == CODA_INST_DECODER)
1699 coda_bit_stream_end_flag(ctx);
1700
1518 /* If this instance is running, call .job_abort and wait for it to end */ 1701 /* If this instance is running, call .job_abort and wait for it to end */
1519 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 1702 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
1520 1703
@@ -1528,8 +1711,10 @@ static int coda_release(struct file *file)
1528 list_del(&ctx->list); 1711 list_del(&ctx->list);
1529 coda_unlock(ctx); 1712 coda_unlock(ctx);
1530 1713
1531 dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size, 1714 if (ctx->bitstream.vaddr) {
1532 ctx->bitstream.vaddr, ctx->bitstream.paddr); 1715 dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
1716 ctx->bitstream.vaddr, ctx->bitstream.paddr);
1717 }
1533 if (ctx->dev->devtype->product == CODA_DX6) 1718 if (ctx->dev->devtype->product == CODA_DX6)
1534 coda_free_aux_buf(dev, &ctx->workbuf); 1719 coda_free_aux_buf(dev, &ctx->workbuf);
1535 1720
@@ -1548,18 +1733,9 @@ static int coda_release(struct file *file)
1548 return 0; 1733 return 0;
1549} 1734}
1550 1735
1551static const struct v4l2_file_operations coda_encoder_fops = { 1736static const struct v4l2_file_operations coda_fops = {
1552 .owner = THIS_MODULE,
1553 .open = coda_encoder_open,
1554 .release = coda_release,
1555 .poll = v4l2_m2m_fop_poll,
1556 .unlocked_ioctl = video_ioctl2,
1557 .mmap = v4l2_m2m_fop_mmap,
1558};
1559
1560static const struct v4l2_file_operations coda_decoder_fops = {
1561 .owner = THIS_MODULE, 1737 .owner = THIS_MODULE,
1562 .open = coda_decoder_open, 1738 .open = coda_open,
1563 .release = coda_release, 1739 .release = coda_release,
1564 .poll = v4l2_m2m_fop_poll, 1740 .poll = v4l2_m2m_fop_poll,
1565 .unlocked_ioctl = video_ioctl2, 1741 .unlocked_ioctl = video_ioctl2,
@@ -1664,8 +1840,16 @@ err_clk_per:
1664 return ret; 1840 return ret;
1665} 1841}
1666 1842
1667static int coda_register_device(struct coda_dev *dev, struct video_device *vfd) 1843static int coda_register_device(struct coda_dev *dev, int i)
1668{ 1844{
1845 struct video_device *vfd = &dev->vfd[i];
1846
1847 if (i > ARRAY_SIZE(dev->vfd))
1848 return -EINVAL;
1849
1850 snprintf(vfd->name, sizeof(vfd->name), dev->devtype->vdevs[i]->name);
1851 vfd->fops = &coda_fops;
1852 vfd->ioctl_ops = &coda_ioctl_ops;
1669 vfd->release = video_device_release_empty, 1853 vfd->release = video_device_release_empty,
1670 vfd->lock = &dev->dev_mutex; 1854 vfd->lock = &dev->dev_mutex;
1671 vfd->v4l2_dev = &dev->v4l2_dev; 1855 vfd->v4l2_dev = &dev->v4l2_dev;
@@ -1684,7 +1868,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
1684{ 1868{
1685 struct coda_dev *dev = context; 1869 struct coda_dev *dev = context;
1686 struct platform_device *pdev = dev->plat_dev; 1870 struct platform_device *pdev = dev->plat_dev;
1687 int ret; 1871 int i, ret;
1688 1872
1689 if (!fw) { 1873 if (!fw) {
1690 v4l2_err(&dev->v4l2_dev, "firmware request failed\n"); 1874 v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
@@ -1725,33 +1909,25 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
1725 goto rel_ctx; 1909 goto rel_ctx;
1726 } 1910 }
1727 1911
1728 dev->vfd[0].fops = &coda_encoder_fops, 1912 for (i = 0; i < dev->devtype->num_vdevs; i++) {
1729 dev->vfd[0].ioctl_ops = &coda_ioctl_ops; 1913 ret = coda_register_device(dev, i);
1730 snprintf(dev->vfd[0].name, sizeof(dev->vfd[0].name), "coda-encoder"); 1914 if (ret) {
1731 ret = coda_register_device(dev, &dev->vfd[0]); 1915 v4l2_err(&dev->v4l2_dev,
1732 if (ret) { 1916 "Failed to register %s video device: %d\n",
1733 v4l2_err(&dev->v4l2_dev, 1917 dev->devtype->vdevs[i]->name, ret);
1734 "Failed to register encoder video device\n"); 1918 goto rel_vfd;
1735 goto rel_m2m; 1919 }
1736 }
1737
1738 dev->vfd[1].fops = &coda_decoder_fops,
1739 dev->vfd[1].ioctl_ops = &coda_ioctl_ops;
1740 snprintf(dev->vfd[1].name, sizeof(dev->vfd[1].name), "coda-decoder");
1741 ret = coda_register_device(dev, &dev->vfd[1]);
1742 if (ret) {
1743 v4l2_err(&dev->v4l2_dev,
1744 "Failed to register decoder video device\n");
1745 goto rel_m2m;
1746 } 1920 }
1747 1921
1748 v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n", 1922 v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n",
1749 dev->vfd[0].num, dev->vfd[1].num); 1923 dev->vfd[0].num, dev->vfd[i - 1].num);
1750 1924
1751 pm_runtime_put_sync(&pdev->dev); 1925 pm_runtime_put_sync(&pdev->dev);
1752 return; 1926 return;
1753 1927
1754rel_m2m: 1928rel_vfd:
1929 while (--i >= 0)
1930 video_unregister_device(&dev->vfd[i]);
1755 v4l2_m2m_release(dev->m2m_dev); 1931 v4l2_m2m_release(dev->m2m_dev);
1756rel_ctx: 1932rel_ctx:
1757 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); 1933 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
@@ -1783,6 +1959,8 @@ static const struct coda_devtype coda_devdata[] = {
1783 .product = CODA_DX6, 1959 .product = CODA_DX6,
1784 .codecs = codadx6_codecs, 1960 .codecs = codadx6_codecs,
1785 .num_codecs = ARRAY_SIZE(codadx6_codecs), 1961 .num_codecs = ARRAY_SIZE(codadx6_codecs),
1962 .vdevs = codadx6_video_devices,
1963 .num_vdevs = ARRAY_SIZE(codadx6_video_devices),
1786 .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024, 1964 .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
1787 .iram_size = 0xb000, 1965 .iram_size = 0xb000,
1788 }, 1966 },
@@ -1791,6 +1969,8 @@ static const struct coda_devtype coda_devdata[] = {
1791 .product = CODA_7541, 1969 .product = CODA_7541,
1792 .codecs = coda7_codecs, 1970 .codecs = coda7_codecs,
1793 .num_codecs = ARRAY_SIZE(coda7_codecs), 1971 .num_codecs = ARRAY_SIZE(coda7_codecs),
1972 .vdevs = coda7_video_devices,
1973 .num_vdevs = ARRAY_SIZE(coda7_video_devices),
1794 .workbuf_size = 128 * 1024, 1974 .workbuf_size = 128 * 1024,
1795 .tempbuf_size = 304 * 1024, 1975 .tempbuf_size = 304 * 1024,
1796 .iram_size = 0x14000, 1976 .iram_size = 0x14000,
@@ -1800,6 +1980,8 @@ static const struct coda_devtype coda_devdata[] = {
1800 .product = CODA_960, 1980 .product = CODA_960,
1801 .codecs = coda9_codecs, 1981 .codecs = coda9_codecs,
1802 .num_codecs = ARRAY_SIZE(coda9_codecs), 1982 .num_codecs = ARRAY_SIZE(coda9_codecs),
1983 .vdevs = coda9_video_devices,
1984 .num_vdevs = ARRAY_SIZE(coda9_video_devices),
1803 .workbuf_size = 80 * 1024, 1985 .workbuf_size = 80 * 1024,
1804 .tempbuf_size = 204 * 1024, 1986 .tempbuf_size = 204 * 1024,
1805 .iram_size = 0x21000, 1987 .iram_size = 0x21000,
@@ -1809,6 +1991,8 @@ static const struct coda_devtype coda_devdata[] = {
1809 .product = CODA_960, 1991 .product = CODA_960,
1810 .codecs = coda9_codecs, 1992 .codecs = coda9_codecs,
1811 .num_codecs = ARRAY_SIZE(coda9_codecs), 1993 .num_codecs = ARRAY_SIZE(coda9_codecs),
1994 .vdevs = coda9_video_devices,
1995 .num_vdevs = ARRAY_SIZE(coda9_video_devices),
1812 .workbuf_size = 80 * 1024, 1996 .workbuf_size = 80 * 1024,
1813 .tempbuf_size = 204 * 1024, 1997 .tempbuf_size = 204 * 1024,
1814 .iram_size = 0x20000, 1998 .iram_size = 0x20000,
@@ -1846,10 +2030,18 @@ static int coda_probe(struct platform_device *pdev)
1846 int ret, irq; 2030 int ret, irq;
1847 2031
1848 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 2032 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1849 if (!dev) { 2033 if (!dev)
1850 dev_err(&pdev->dev, "Not enough memory for %s\n",
1851 CODA_NAME);
1852 return -ENOMEM; 2034 return -ENOMEM;
2035
2036 pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
2037
2038 if (of_id) {
2039 dev->devtype = of_id->data;
2040 } else if (pdev_id) {
2041 dev->devtype = &coda_devdata[pdev_id->driver_data];
2042 } else {
2043 ret = -EINVAL;
2044 goto err_v4l2_register;
1853 } 2045 }
1854 2046
1855 spin_lock_init(&dev->irqlock); 2047 spin_lock_init(&dev->irqlock);
@@ -1919,17 +2111,6 @@ static int coda_probe(struct platform_device *pdev)
1919 mutex_init(&dev->dev_mutex); 2111 mutex_init(&dev->dev_mutex);
1920 mutex_init(&dev->coda_mutex); 2112 mutex_init(&dev->coda_mutex);
1921 2113
1922 pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
1923
1924 if (of_id) {
1925 dev->devtype = of_id->data;
1926 } else if (pdev_id) {
1927 dev->devtype = &coda_devdata[pdev_id->driver_data];
1928 } else {
1929 v4l2_device_unregister(&dev->v4l2_dev);
1930 return -EINVAL;
1931 }
1932
1933 dev->debugfs_root = debugfs_create_dir("coda", NULL); 2114 dev->debugfs_root = debugfs_create_dir("coda", NULL);
1934 if (!dev->debugfs_root) 2115 if (!dev->debugfs_root)
1935 dev_warn(&pdev->dev, "failed to create debugfs root\n"); 2116 dev_warn(&pdev->dev, "failed to create debugfs root\n");
@@ -1941,8 +2122,7 @@ static int coda_probe(struct platform_device *pdev)
1941 dev->debugfs_root); 2122 dev->debugfs_root);
1942 if (ret < 0) { 2123 if (ret < 0) {
1943 dev_err(&pdev->dev, "failed to allocate work buffer\n"); 2124 dev_err(&pdev->dev, "failed to allocate work buffer\n");
1944 v4l2_device_unregister(&dev->v4l2_dev); 2125 goto err_v4l2_register;
1945 return ret;
1946 } 2126 }
1947 } 2127 }
1948 2128
@@ -1952,8 +2132,7 @@ static int coda_probe(struct platform_device *pdev)
1952 dev->debugfs_root); 2132 dev->debugfs_root);
1953 if (ret < 0) { 2133 if (ret < 0) {
1954 dev_err(&pdev->dev, "failed to allocate temp buffer\n"); 2134 dev_err(&pdev->dev, "failed to allocate temp buffer\n");
1955 v4l2_device_unregister(&dev->v4l2_dev); 2135 goto err_v4l2_register;
1956 return ret;
1957 } 2136 }
1958 } 2137 }
1959 2138
@@ -1973,14 +2152,15 @@ static int coda_probe(struct platform_device *pdev)
1973 dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 2152 dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1974 if (!dev->workqueue) { 2153 if (!dev->workqueue) {
1975 dev_err(&pdev->dev, "unable to alloc workqueue\n"); 2154 dev_err(&pdev->dev, "unable to alloc workqueue\n");
1976 return -ENOMEM; 2155 ret = -ENOMEM;
2156 goto err_v4l2_register;
1977 } 2157 }
1978 2158
1979 platform_set_drvdata(pdev, dev); 2159 platform_set_drvdata(pdev, dev);
1980 2160
1981 /* 2161 /*
1982 * Start activated so we can directly call coda_hw_init in 2162 * Start activated so we can directly call coda_hw_init in
1983 * coda_fw_callback regardless of whether CONFIG_PM_RUNTIME is 2163 * coda_fw_callback regardless of whether CONFIG_PM is
1984 * enabled or whether the device is associated with a PM domain. 2164 * enabled or whether the device is associated with a PM domain.
1985 */ 2165 */
1986 pm_runtime_get_noresume(&pdev->dev); 2166 pm_runtime_get_noresume(&pdev->dev);
@@ -1988,14 +2168,21 @@ static int coda_probe(struct platform_device *pdev)
1988 pm_runtime_enable(&pdev->dev); 2168 pm_runtime_enable(&pdev->dev);
1989 2169
1990 return coda_firmware_request(dev); 2170 return coda_firmware_request(dev);
2171
2172err_v4l2_register:
2173 v4l2_device_unregister(&dev->v4l2_dev);
2174 return ret;
1991} 2175}
1992 2176
1993static int coda_remove(struct platform_device *pdev) 2177static int coda_remove(struct platform_device *pdev)
1994{ 2178{
1995 struct coda_dev *dev = platform_get_drvdata(pdev); 2179 struct coda_dev *dev = platform_get_drvdata(pdev);
2180 int i;
1996 2181
1997 video_unregister_device(&dev->vfd[0]); 2182 for (i = 0; i < ARRAY_SIZE(dev->vfd); i++) {
1998 video_unregister_device(&dev->vfd[1]); 2183 if (video_get_drvdata(&dev->vfd[i]))
2184 video_unregister_device(&dev->vfd[i]);
2185 }
1999 if (dev->m2m_dev) 2186 if (dev->m2m_dev)
2000 v4l2_m2m_release(dev->m2m_dev); 2187 v4l2_m2m_release(dev->m2m_dev);
2001 pm_runtime_disable(&pdev->dev); 2188 pm_runtime_disable(&pdev->dev);
@@ -2013,7 +2200,7 @@ static int coda_remove(struct platform_device *pdev)
2013 return 0; 2200 return 0;
2014} 2201}
2015 2202
2016#ifdef CONFIG_PM_RUNTIME 2203#ifdef CONFIG_PM
2017static int coda_runtime_resume(struct device *dev) 2204static int coda_runtime_resume(struct device *dev)
2018{ 2205{
2019 struct coda_dev *cdev = dev_get_drvdata(dev); 2206 struct coda_dev *cdev = dev_get_drvdata(dev);
diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c
new file mode 100644
index 000000000000..8fa3e353f9e2
--- /dev/null
+++ b/drivers/media/platform/coda/coda-jpeg.c
@@ -0,0 +1,238 @@
1/*
2 * Coda multi-standard codec IP - JPEG support functions
3 *
4 * Copyright (C) 2014 Philipp Zabel, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/swab.h>
14
15#include "coda.h"
16
17#define SOI_MARKER 0xffd8
18#define EOI_MARKER 0xffd9
19
20/*
21 * Typical Huffman tables for 8-bit precision luminance and
22 * chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
23 */
24
25static const unsigned char luma_dc_bits[16] = {
26 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
27 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
28};
29
30static const unsigned char luma_dc_value[12] = {
31 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
32 0x08, 0x09, 0x0a, 0x0b,
33};
34
35static const unsigned char chroma_dc_bits[16] = {
36 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
37 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
38};
39
40static const unsigned char chroma_dc_value[12] = {
41 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
42 0x08, 0x09, 0x0a, 0x0b,
43};
44
45static const unsigned char luma_ac_bits[16] = {
46 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
47 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
48};
49
50static const unsigned char luma_ac_value[162 + 2] = {
51 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
52 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
53 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
54 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
55 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
56 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
57 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
58 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
59 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
60 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
61 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
62 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
63 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
64 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
65 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
66 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
67 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
68 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
69 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
70 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
71 0xf9, 0xfa, /* padded to 32-bit */
72};
73
74static const unsigned char chroma_ac_bits[16] = {
75 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
76 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
77};
78
79static const unsigned char chroma_ac_value[162 + 2] = {
80 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
81 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
82 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
83 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
84 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
85 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
86 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
87 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
88 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
89 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
90 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
91 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
92 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
93 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
94 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
95 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
96 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
97 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
98 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
99 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
100 0xf9, 0xfa, /* padded to 32-bit */
101};
102
103/*
104 * Quantization tables for luminance and chrominance components in
105 * zig-zag scan order from the Freescale i.MX VPU libaries
106 */
107
108static unsigned char luma_q[64] = {
109 0x06, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x05,
110 0x05, 0x06, 0x09, 0x06, 0x05, 0x06, 0x09, 0x0b,
111 0x08, 0x06, 0x06, 0x08, 0x0b, 0x0c, 0x0a, 0x0a,
112 0x0b, 0x0a, 0x0a, 0x0c, 0x10, 0x0c, 0x0c, 0x0c,
113 0x0c, 0x0c, 0x0c, 0x10, 0x0c, 0x0c, 0x0c, 0x0c,
114 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
115 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
116 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
117};
118
119static unsigned char chroma_q[64] = {
120 0x07, 0x07, 0x07, 0x0d, 0x0c, 0x0d, 0x18, 0x10,
121 0x10, 0x18, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14,
122 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c,
123 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c,
124 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c,
125 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
126 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
127 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
128};
129
130struct coda_memcpy_desc {
131 int offset;
132 const void *src;
133 size_t len;
134};
135
136static void coda_memcpy_parabuf(void *parabuf,
137 const struct coda_memcpy_desc *desc)
138{
139 u32 *dst = parabuf + desc->offset;
140 const u32 *src = desc->src;
141 int len = desc->len / 4;
142 int i;
143
144 for (i = 0; i < len; i += 2) {
145 dst[i + 1] = swab32(src[i]);
146 dst[i] = swab32(src[i + 1]);
147 }
148}
149
150int coda_jpeg_write_tables(struct coda_ctx *ctx)
151{
152 int i;
153 static const struct coda_memcpy_desc huff[8] = {
154 { 0, luma_dc_bits, sizeof(luma_dc_bits) },
155 { 16, luma_dc_value, sizeof(luma_dc_value) },
156 { 32, luma_ac_bits, sizeof(luma_ac_bits) },
157 { 48, luma_ac_value, sizeof(luma_ac_value) },
158 { 216, chroma_dc_bits, sizeof(chroma_dc_bits) },
159 { 232, chroma_dc_value, sizeof(chroma_dc_value) },
160 { 248, chroma_ac_bits, sizeof(chroma_ac_bits) },
161 { 264, chroma_ac_value, sizeof(chroma_ac_value) },
162 };
163 struct coda_memcpy_desc qmat[3] = {
164 { 512, ctx->params.jpeg_qmat_tab[0], 64 },
165 { 576, ctx->params.jpeg_qmat_tab[1], 64 },
166 { 640, ctx->params.jpeg_qmat_tab[1], 64 },
167 };
168
169 /* Write huffman tables to parameter memory */
170 for (i = 0; i < ARRAY_SIZE(huff); i++)
171 coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i);
172
173 /* Write Q-matrix to parameter memory */
174 for (i = 0; i < ARRAY_SIZE(qmat); i++)
175 coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i);
176
177 return 0;
178}
179
180bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
181{
182 void *vaddr = vb2_plane_vaddr(vb, 0);
183 u16 soi = be16_to_cpup((__be16 *)vaddr);
184 u16 eoi = be16_to_cpup((__be16 *)(vaddr +
185 vb2_get_plane_payload(vb, 0) - 2));
186
187 return soi == SOI_MARKER && eoi == EOI_MARKER;
188}
189
190/*
191 * Scale quantization table using nonlinear scaling factor
192 * u8 qtab[64], scale [50,190]
193 */
194static void coda_scale_quant_table(u8 *q_tab, int scale)
195{
196 unsigned int temp;
197 int i;
198
199 for (i = 0; i < 64; i++) {
200 temp = DIV_ROUND_CLOSEST((unsigned int)q_tab[i] * scale, 100);
201 if (temp <= 0)
202 temp = 1;
203 if (temp > 255)
204 temp = 255;
205 q_tab[i] = (unsigned char)temp;
206 }
207}
208
209void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
210{
211 unsigned int scale;
212
213 ctx->params.jpeg_quality = quality;
214
215 /* Clip quality setting to [5,100] interval */
216 if (quality > 100)
217 quality = 100;
218 if (quality < 5)
219 quality = 5;
220
221 /*
222 * Non-linear scaling factor:
223 * [5,50] -> [1000..100], [51,100] -> [98..0]
224 */
225 if (quality < 50)
226 scale = 5000 / quality;
227 else
228 scale = 200 - 2 * quality;
229
230 if (ctx->params.jpeg_qmat_tab[0]) {
231 memcpy(ctx->params.jpeg_qmat_tab[0], luma_q, 64);
232 coda_scale_quant_table(ctx->params.jpeg_qmat_tab[0], scale);
233 }
234 if (ctx->params.jpeg_qmat_tab[1]) {
235 memcpy(ctx->params.jpeg_qmat_tab[1], chroma_q, 64);
236 coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
237 }
238}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index bbc18c0dacd9..5dd47e5f97c1 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -45,11 +45,15 @@ enum coda_product {
45 CODA_960 = 0xf020, 45 CODA_960 = 0xf020,
46}; 46};
47 47
48struct coda_video_device;
49
48struct coda_devtype { 50struct coda_devtype {
49 char *firmware; 51 char *firmware;
50 enum coda_product product; 52 enum coda_product product;
51 const struct coda_codec *codecs; 53 const struct coda_codec *codecs;
52 unsigned int num_codecs; 54 unsigned int num_codecs;
55 const struct coda_video_device **vdevs;
56 unsigned int num_vdevs;
53 size_t workbuf_size; 57 size_t workbuf_size;
54 size_t tempbuf_size; 58 size_t tempbuf_size;
55 size_t iram_size; 59 size_t iram_size;
@@ -65,7 +69,7 @@ struct coda_aux_buf {
65 69
66struct coda_dev { 70struct coda_dev {
67 struct v4l2_device v4l2_dev; 71 struct v4l2_device v4l2_dev;
68 struct video_device vfd[2]; 72 struct video_device vfd[5];
69 struct platform_device *plat_dev; 73 struct platform_device *plat_dev;
70 const struct coda_devtype *devtype; 74 const struct coda_devtype *devtype;
71 75
@@ -114,6 +118,9 @@ struct coda_params {
114 u8 mpeg4_inter_qp; 118 u8 mpeg4_inter_qp;
115 u8 gop_size; 119 u8 gop_size;
116 int intra_refresh; 120 int intra_refresh;
121 u8 jpeg_quality;
122 u8 jpeg_restart_interval;
123 u8 *jpeg_qmat_tab[3];
117 int codec_mode; 124 int codec_mode;
118 int codec_mode_aux; 125 int codec_mode_aux;
119 enum v4l2_mpeg_video_multi_slice_mode slice_mode; 126 enum v4l2_mpeg_video_multi_slice_mode slice_mode;
@@ -123,11 +130,13 @@ struct coda_params {
123 u32 slice_max_mb; 130 u32 slice_max_mb;
124}; 131};
125 132
126struct coda_timestamp { 133struct coda_buffer_meta {
127 struct list_head list; 134 struct list_head list;
128 u32 sequence; 135 u32 sequence;
129 struct v4l2_timecode timecode; 136 struct v4l2_timecode timecode;
130 struct timeval timestamp; 137 struct timeval timestamp;
138 u32 start;
139 u32 end;
131}; 140};
132 141
133/* Per-queue, driver-specific private data */ 142/* Per-queue, driver-specific private data */
@@ -183,6 +192,7 @@ struct coda_ctx {
183 struct work_struct pic_run_work; 192 struct work_struct pic_run_work;
184 struct work_struct seq_end_work; 193 struct work_struct seq_end_work;
185 struct completion completion; 194 struct completion completion;
195 const struct coda_video_device *cvd;
186 const struct coda_context_ops *ops; 196 const struct coda_context_ops *ops;
187 int aborting; 197 int aborting;
188 int initialized; 198 int initialized;
@@ -212,9 +222,9 @@ struct coda_ctx {
212 struct coda_aux_buf slicebuf; 222 struct coda_aux_buf slicebuf;
213 struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS]; 223 struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
214 u32 frame_types[CODA_MAX_FRAMEBUFFERS]; 224 u32 frame_types[CODA_MAX_FRAMEBUFFERS];
215 struct coda_timestamp frame_timestamps[CODA_MAX_FRAMEBUFFERS]; 225 struct coda_buffer_meta frame_metas[CODA_MAX_FRAMEBUFFERS];
216 u32 frame_errors[CODA_MAX_FRAMEBUFFERS]; 226 u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
217 struct list_head timestamp_list; 227 struct list_head buffer_meta_list;
218 struct coda_aux_buf workbuf; 228 struct coda_aux_buf workbuf;
219 int num_internal_frames; 229 int num_internal_frames;
220 int idx; 230 int idx;
@@ -232,6 +242,8 @@ extern int coda_debug;
232 242
233void coda_write(struct coda_dev *dev, u32 data, u32 reg); 243void coda_write(struct coda_dev *dev, u32 data, u32 reg);
234unsigned int coda_read(struct coda_dev *dev, u32 reg); 244unsigned int coda_read(struct coda_dev *dev, u32 reg);
245void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
246 struct vb2_buffer *buf, unsigned int reg_y);
235 247
236int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf, 248int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
237 size_t size, const char *name, struct dentry *parent); 249 size_t size, const char *name, struct dentry *parent);
@@ -281,6 +293,10 @@ void coda_bit_stream_end_flag(struct coda_ctx *ctx);
281 293
282int coda_h264_padding(int size, char *p); 294int coda_h264_padding(int size, char *p);
283 295
296bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
297int coda_jpeg_write_tables(struct coda_ctx *ctx);
298void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
299
284extern const struct coda_context_ops coda_bit_encode_ops; 300extern const struct coda_context_ops coda_bit_encode_ops;
285extern const struct coda_context_ops coda_bit_decode_ops; 301extern const struct coda_context_ops coda_bit_decode_ops;
286 302
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index c791275e307b..8e015b8aa8fa 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -147,6 +147,7 @@
147#define CODA_CMD_DEC_SEQ_BB_START 0x180 147#define CODA_CMD_DEC_SEQ_BB_START 0x180
148#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184 148#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184
149#define CODA_CMD_DEC_SEQ_OPTION 0x188 149#define CODA_CMD_DEC_SEQ_OPTION 0x188
150#define CODA_NO_INT_ENABLE (1 << 10)
150#define CODA_REORDER_ENABLE (1 << 1) 151#define CODA_REORDER_ENABLE (1 << 1)
151#define CODADX6_QP_REPORT (1 << 0) 152#define CODADX6_QP_REPORT (1 << 0)
152#define CODA7_MP4_DEBLK_ENABLE (1 << 0) 153#define CODA7_MP4_DEBLK_ENABLE (1 << 0)
@@ -332,6 +333,12 @@
332#define CODA9_CMD_ENC_SEQ_ME_OPTION 0x1d8 333#define CODA9_CMD_ENC_SEQ_ME_OPTION 0x1d8
333#define CODA_RET_ENC_SEQ_SUCCESS 0x1c0 334#define CODA_RET_ENC_SEQ_SUCCESS 0x1c0
334 335
336#define CODA_CMD_ENC_SEQ_JPG_PARA 0x198
337#define CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL 0x19C
338#define CODA_CMD_ENC_SEQ_JPG_THUMB_EN 0x1a0
339#define CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE 0x1a4
340#define CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET 0x1a8
341
335/* Encoder Picture Run */ 342/* Encoder Picture Run */
336#define CODA9_CMD_ENC_PIC_SRC_INDEX 0x180 343#define CODA9_CMD_ENC_PIC_SRC_INDEX 0x180
337#define CODA9_CMD_ENC_PIC_SRC_STRIDE 0x184 344#define CODA9_CMD_ENC_PIC_SRC_STRIDE 0x184
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 33b9660b7f77..244d3d6c244c 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -227,7 +227,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
227 vpbe_current_encoder_info(vpbe_dev); 227 vpbe_current_encoder_info(vpbe_dev);
228 struct vpbe_config *cfg = vpbe_dev->cfg; 228 struct vpbe_config *cfg = vpbe_dev->cfg;
229 struct venc_platform_data *venc_device = vpbe_dev->venc_device; 229 struct venc_platform_data *venc_device = vpbe_dev->venc_device;
230 enum v4l2_mbus_pixelcode if_params; 230 u32 if_params;
231 int enc_out_index; 231 int enc_out_index;
232 int sd_index; 232 int sd_index;
233 int ret = 0; 233 int ret = 0;
@@ -341,7 +341,7 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
341 341
342 if (!(cfg->outputs[out_index].output.capabilities & 342 if (!(cfg->outputs[out_index].output.capabilities &
343 V4L2_OUT_CAP_DV_TIMINGS)) 343 V4L2_OUT_CAP_DV_TIMINGS))
344 return -EINVAL; 344 return -ENODATA;
345 345
346 for (i = 0; i < output->num_modes; i++) { 346 for (i = 0; i < output->num_modes; i++) {
347 if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS && 347 if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS &&
@@ -384,6 +384,13 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
384static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev, 384static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
385 struct v4l2_dv_timings *dv_timings) 385 struct v4l2_dv_timings *dv_timings)
386{ 386{
387 struct vpbe_config *cfg = vpbe_dev->cfg;
388 int out_index = vpbe_dev->current_out_index;
389
390 if (!(cfg->outputs[out_index].output.capabilities &
391 V4L2_OUT_CAP_DV_TIMINGS))
392 return -ENODATA;
393
387 if (vpbe_dev->current_timings.timings_type & 394 if (vpbe_dev->current_timings.timings_type &
388 VPBE_ENC_DV_TIMINGS) { 395 VPBE_ENC_DV_TIMINGS) {
389 *dv_timings = vpbe_dev->current_timings.dv_timings; 396 *dv_timings = vpbe_dev->current_timings.dv_timings;
@@ -409,7 +416,7 @@ static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
409 int i; 416 int i;
410 417
411 if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS)) 418 if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
412 return -EINVAL; 419 return -ENODATA;
413 420
414 for (i = 0; i < output->num_modes; i++) { 421 for (i = 0; i < output->num_modes; i++) {
415 if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) { 422 if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) {
@@ -440,7 +447,7 @@ static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
440 447
441 if (!(cfg->outputs[out_index].output.capabilities & 448 if (!(cfg->outputs[out_index].output.capabilities &
442 V4L2_OUT_CAP_STD)) 449 V4L2_OUT_CAP_STD))
443 return -EINVAL; 450 return -ENODATA;
444 451
445 ret = vpbe_get_std_info(vpbe_dev, std_id); 452 ret = vpbe_get_std_info(vpbe_dev, std_id);
446 if (ret) 453 if (ret)
@@ -473,6 +480,11 @@ static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
473static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id) 480static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
474{ 481{
475 struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings; 482 struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
483 struct vpbe_config *cfg = vpbe_dev->cfg;
484 int out_index = vpbe_dev->current_out_index;
485
486 if (!(cfg->outputs[out_index].output.capabilities & V4L2_OUT_CAP_STD))
487 return -ENODATA;
476 488
477 if (cur_timings->timings_type & VPBE_ENC_STD) { 489 if (cur_timings->timings_type & VPBE_ENC_STD) {
478 *std_id = cur_timings->std_id; 490 *std_id = cur_timings->std_id;
@@ -613,6 +625,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
613 } 625 }
614 if (clk_prepare_enable(vpbe_dev->dac_clk)) { 626 if (clk_prepare_enable(vpbe_dev->dac_clk)) {
615 ret = -ENODEV; 627 ret = -ENODEV;
628 clk_put(vpbe_dev->dac_clk);
616 goto fail_mutex_unlock; 629 goto fail_mutex_unlock;
617 } 630 }
618 } 631 }
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 73496d953ba0..21a5a56eb5ea 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -152,8 +152,8 @@ static irqreturn_t venc_isr(int irq, void *arg)
152 152
153 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { 153 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
154 layer = disp_dev->dev[i]; 154 layer = disp_dev->dev[i];
155 /* If streaming is started in this layer */ 155
156 if (!layer->started) 156 if (!vb2_start_streaming_called(&layer->buffer_queue))
157 continue; 157 continue;
158 158
159 if (layer->layer_first_int) { 159 if (layer->layer_first_int) {
@@ -207,31 +207,23 @@ static irqreturn_t venc_isr(int irq, void *arg)
207 */ 207 */
208static int vpbe_buffer_prepare(struct vb2_buffer *vb) 208static int vpbe_buffer_prepare(struct vb2_buffer *vb)
209{ 209{
210 struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
211 struct vb2_queue *q = vb->vb2_queue; 210 struct vb2_queue *q = vb->vb2_queue;
212 struct vpbe_layer *layer = fh->layer; 211 struct vpbe_layer *layer = vb2_get_drv_priv(q);
213 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 212 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
214 unsigned long addr; 213 unsigned long addr;
215 214
216 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 215 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
217 "vpbe_buffer_prepare\n"); 216 "vpbe_buffer_prepare\n");
218 217
219 if (vb->state != VB2_BUF_STATE_ACTIVE && 218 vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
220 vb->state != VB2_BUF_STATE_PREPARED) { 219 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
221 vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage); 220 return -EINVAL;
222 if (vb2_plane_vaddr(vb, 0) &&
223 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
224 return -EINVAL;
225 221
226 addr = vb2_dma_contig_plane_dma_addr(vb, 0); 222 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
227 if (q->streaming) { 223 if (!IS_ALIGNED(addr, 8)) {
228 if (!IS_ALIGNED(addr, 8)) { 224 v4l2_err(&vpbe_dev->v4l2_dev,
229 v4l2_err(&vpbe_dev->v4l2_dev, 225 "buffer_prepare:offset is not aligned to 32 bytes\n");
230 "buffer_prepare:offset is \ 226 return -EINVAL;
231 not aligned to 32 bytes\n");
232 return -EINVAL;
233 }
234 }
235 } 227 }
236 return 0; 228 return 0;
237} 229}
@@ -247,18 +239,20 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
247 239
248{ 240{
249 /* Get the file handle object and layer object */ 241 /* Get the file handle object and layer object */
250 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 242 struct vpbe_layer *layer = vb2_get_drv_priv(vq);
251 struct vpbe_layer *layer = fh->layer; 243 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
252 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
253 244
254 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n"); 245 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
255 246
247 if (fmt && fmt->fmt.pix.sizeimage < layer->pix_fmt.sizeimage)
248 return -EINVAL;
249
256 /* Store number of buffers allocated in numbuffer member */ 250 /* Store number of buffers allocated in numbuffer member */
257 if (*nbuffers < VPBE_DEFAULT_NUM_BUFS) 251 if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS)
258 *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS; 252 *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers;
259 253
260 *nplanes = 1; 254 *nplanes = 1;
261 sizes[0] = layer->pix_fmt.sizeimage; 255 sizes[0] = fmt ? fmt->fmt.pix.sizeimage : layer->pix_fmt.sizeimage;
262 alloc_ctxs[0] = layer->alloc_ctx; 256 alloc_ctxs[0] = layer->alloc_ctx;
263 257
264 return 0; 258 return 0;
@@ -271,12 +265,11 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
271static void vpbe_buffer_queue(struct vb2_buffer *vb) 265static void vpbe_buffer_queue(struct vb2_buffer *vb)
272{ 266{
273 /* Get the file handle object and layer object */ 267 /* Get the file handle object and layer object */
274 struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
275 struct vpbe_disp_buffer *buf = container_of(vb, 268 struct vpbe_disp_buffer *buf = container_of(vb,
276 struct vpbe_disp_buffer, vb); 269 struct vpbe_disp_buffer, vb);
277 struct vpbe_layer *layer = fh->layer; 270 struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
278 struct vpbe_display *disp = fh->disp_dev; 271 struct vpbe_display *disp = layer->disp_dev;
279 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 272 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
280 unsigned long flags; 273 unsigned long flags;
281 274
282 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 275 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -288,61 +281,14 @@ static void vpbe_buffer_queue(struct vb2_buffer *vb)
288 spin_unlock_irqrestore(&disp->dma_queue_lock, flags); 281 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
289} 282}
290 283
291/*
292 * vpbe_buf_cleanup()
293 * This function is called from the vb2 layer to free memory allocated to
294 * the buffers
295 */
296static void vpbe_buf_cleanup(struct vb2_buffer *vb)
297{
298 /* Get the file handle object and layer object */
299 struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
300 struct vpbe_layer *layer = fh->layer;
301 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
302 struct vpbe_disp_buffer *buf = container_of(vb,
303 struct vpbe_disp_buffer, vb);
304 unsigned long flags;
305
306 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
307 "vpbe_buf_cleanup\n");
308
309 spin_lock_irqsave(&layer->irqlock, flags);
310 if (vb->state == VB2_BUF_STATE_ACTIVE)
311 list_del_init(&buf->list);
312 spin_unlock_irqrestore(&layer->irqlock, flags);
313}
314
315static void vpbe_wait_prepare(struct vb2_queue *vq)
316{
317 struct vpbe_fh *fh = vb2_get_drv_priv(vq);
318 struct vpbe_layer *layer = fh->layer;
319
320 mutex_unlock(&layer->opslock);
321}
322
323static void vpbe_wait_finish(struct vb2_queue *vq)
324{
325 struct vpbe_fh *fh = vb2_get_drv_priv(vq);
326 struct vpbe_layer *layer = fh->layer;
327
328 mutex_lock(&layer->opslock);
329}
330
331static int vpbe_buffer_init(struct vb2_buffer *vb)
332{
333 struct vpbe_disp_buffer *buf = container_of(vb,
334 struct vpbe_disp_buffer, vb);
335
336 INIT_LIST_HEAD(&buf->list);
337 return 0;
338}
339
340static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count) 284static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
341{ 285{
342 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 286 struct vpbe_layer *layer = vb2_get_drv_priv(vq);
343 struct vpbe_layer *layer = fh->layer; 287 struct osd_state *osd_device = layer->disp_dev->osd_device;
344 int ret; 288 int ret;
345 289
290 osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
291
346 /* Get the next frame from the buffer queue */ 292 /* Get the next frame from the buffer queue */
347 layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next, 293 layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
348 struct vpbe_disp_buffer, list); 294 struct vpbe_disp_buffer, list);
@@ -354,7 +300,7 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
354 layer->field_id = 0; 300 layer->field_id = 0;
355 301
356 /* Set parameters in OSD and VENC */ 302 /* Set parameters in OSD and VENC */
357 ret = vpbe_set_osd_display_params(fh->disp_dev, layer); 303 ret = vpbe_set_osd_display_params(layer->disp_dev, layer);
358 if (ret < 0) { 304 if (ret < 0) {
359 struct vpbe_disp_buffer *buf, *tmp; 305 struct vpbe_disp_buffer *buf, *tmp;
360 306
@@ -371,7 +317,6 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
371 * if request format is yuv420 semiplanar, need to 317 * if request format is yuv420 semiplanar, need to
372 * enable both video windows 318 * enable both video windows
373 */ 319 */
374 layer->started = 1;
375 layer->layer_first_int = 1; 320 layer->layer_first_int = 1;
376 321
377 return ret; 322 return ret;
@@ -379,14 +324,16 @@ static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
379 324
380static void vpbe_stop_streaming(struct vb2_queue *vq) 325static void vpbe_stop_streaming(struct vb2_queue *vq)
381{ 326{
382 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 327 struct vpbe_layer *layer = vb2_get_drv_priv(vq);
383 struct vpbe_layer *layer = fh->layer; 328 struct osd_state *osd_device = layer->disp_dev->osd_device;
384 struct vpbe_display *disp = fh->disp_dev; 329 struct vpbe_display *disp = layer->disp_dev;
385 unsigned long flags; 330 unsigned long flags;
386 331
387 if (!vb2_is_streaming(vq)) 332 if (!vb2_is_streaming(vq))
388 return; 333 return;
389 334
335 osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
336
390 /* release all active buffers */ 337 /* release all active buffers */
391 spin_lock_irqsave(&disp->dma_queue_lock, flags); 338 spin_lock_irqsave(&disp->dma_queue_lock, flags);
392 if (layer->cur_frm == layer->next_frm) { 339 if (layer->cur_frm == layer->next_frm) {
@@ -411,13 +358,11 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
411 358
412static struct vb2_ops video_qops = { 359static struct vb2_ops video_qops = {
413 .queue_setup = vpbe_buffer_queue_setup, 360 .queue_setup = vpbe_buffer_queue_setup,
414 .wait_prepare = vpbe_wait_prepare, 361 .wait_prepare = vb2_ops_wait_prepare,
415 .wait_finish = vpbe_wait_finish, 362 .wait_finish = vb2_ops_wait_finish,
416 .buf_init = vpbe_buffer_init,
417 .buf_prepare = vpbe_buffer_prepare, 363 .buf_prepare = vpbe_buffer_prepare,
418 .start_streaming = vpbe_start_streaming, 364 .start_streaming = vpbe_start_streaming,
419 .stop_streaming = vpbe_stop_streaming, 365 .stop_streaming = vpbe_stop_streaming,
420 .buf_cleanup = vpbe_buf_cleanup,
421 .buf_queue = vpbe_buffer_queue, 366 .buf_queue = vpbe_buffer_queue,
422}; 367};
423 368
@@ -691,10 +636,9 @@ static int vpbe_try_format(struct vpbe_display *disp_dev,
691static int vpbe_display_querycap(struct file *file, void *priv, 636static int vpbe_display_querycap(struct file *file, void *priv,
692 struct v4l2_capability *cap) 637 struct v4l2_capability *cap)
693{ 638{
694 struct vpbe_fh *fh = file->private_data; 639 struct vpbe_layer *layer = video_drvdata(file);
695 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 640 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
696 641
697 cap->version = VPBE_DISPLAY_VERSION_CODE;
698 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 642 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
699 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; 643 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
700 snprintf(cap->driver, sizeof(cap->driver), "%s", 644 snprintf(cap->driver, sizeof(cap->driver), "%s",
@@ -709,9 +653,8 @@ static int vpbe_display_querycap(struct file *file, void *priv,
709static int vpbe_display_s_crop(struct file *file, void *priv, 653static int vpbe_display_s_crop(struct file *file, void *priv,
710 const struct v4l2_crop *crop) 654 const struct v4l2_crop *crop)
711{ 655{
712 struct vpbe_fh *fh = file->private_data; 656 struct vpbe_layer *layer = video_drvdata(file);
713 struct vpbe_layer *layer = fh->layer; 657 struct vpbe_display *disp_dev = layer->disp_dev;
714 struct vpbe_display *disp_dev = fh->disp_dev;
715 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; 658 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
716 struct osd_layer_config *cfg = &layer->layer_info.config; 659 struct osd_layer_config *cfg = &layer->layer_info.config;
717 struct osd_state *osd_device = disp_dev->osd_device; 660 struct osd_state *osd_device = disp_dev->osd_device;
@@ -778,11 +721,10 @@ static int vpbe_display_s_crop(struct file *file, void *priv,
778static int vpbe_display_g_crop(struct file *file, void *priv, 721static int vpbe_display_g_crop(struct file *file, void *priv,
779 struct v4l2_crop *crop) 722 struct v4l2_crop *crop)
780{ 723{
781 struct vpbe_fh *fh = file->private_data; 724 struct vpbe_layer *layer = video_drvdata(file);
782 struct vpbe_layer *layer = fh->layer;
783 struct osd_layer_config *cfg = &layer->layer_info.config; 725 struct osd_layer_config *cfg = &layer->layer_info.config;
784 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 726 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
785 struct osd_state *osd_device = fh->disp_dev->osd_device; 727 struct osd_state *osd_device = layer->disp_dev->osd_device;
786 struct v4l2_rect *rect = &crop->c; 728 struct v4l2_rect *rect = &crop->c;
787 729
788 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 730 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -806,8 +748,8 @@ static int vpbe_display_g_crop(struct file *file, void *priv,
806static int vpbe_display_cropcap(struct file *file, void *priv, 748static int vpbe_display_cropcap(struct file *file, void *priv,
807 struct v4l2_cropcap *cropcap) 749 struct v4l2_cropcap *cropcap)
808{ 750{
809 struct vpbe_fh *fh = file->private_data; 751 struct vpbe_layer *layer = video_drvdata(file);
810 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 752 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
811 753
812 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n"); 754 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
813 755
@@ -824,9 +766,8 @@ static int vpbe_display_cropcap(struct file *file, void *priv,
824static int vpbe_display_g_fmt(struct file *file, void *priv, 766static int vpbe_display_g_fmt(struct file *file, void *priv,
825 struct v4l2_format *fmt) 767 struct v4l2_format *fmt)
826{ 768{
827 struct vpbe_fh *fh = file->private_data; 769 struct vpbe_layer *layer = video_drvdata(file);
828 struct vpbe_layer *layer = fh->layer; 770 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
829 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
830 771
831 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 772 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
832 "VIDIOC_G_FMT, layer id = %d\n", 773 "VIDIOC_G_FMT, layer id = %d\n",
@@ -846,9 +787,8 @@ static int vpbe_display_g_fmt(struct file *file, void *priv,
846static int vpbe_display_enum_fmt(struct file *file, void *priv, 787static int vpbe_display_enum_fmt(struct file *file, void *priv,
847 struct v4l2_fmtdesc *fmt) 788 struct v4l2_fmtdesc *fmt)
848{ 789{
849 struct vpbe_fh *fh = file->private_data; 790 struct vpbe_layer *layer = video_drvdata(file);
850 struct vpbe_layer *layer = fh->layer; 791 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
851 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
852 unsigned int index = 0; 792 unsigned int index = 0;
853 793
854 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 794 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
@@ -878,9 +818,8 @@ static int vpbe_display_enum_fmt(struct file *file, void *priv,
878static int vpbe_display_s_fmt(struct file *file, void *priv, 818static int vpbe_display_s_fmt(struct file *file, void *priv,
879 struct v4l2_format *fmt) 819 struct v4l2_format *fmt)
880{ 820{
881 struct vpbe_fh *fh = file->private_data; 821 struct vpbe_layer *layer = video_drvdata(file);
882 struct vpbe_layer *layer = fh->layer; 822 struct vpbe_display *disp_dev = layer->disp_dev;
883 struct vpbe_display *disp_dev = fh->disp_dev;
884 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; 823 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
885 struct osd_layer_config *cfg = &layer->layer_info.config; 824 struct osd_layer_config *cfg = &layer->layer_info.config;
886 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; 825 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -891,11 +830,9 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
891 "VIDIOC_S_FMT, layer id = %d\n", 830 "VIDIOC_S_FMT, layer id = %d\n",
892 layer->device_id); 831 layer->device_id);
893 832
894 /* If streaming is started, return error */ 833 if (vb2_is_busy(&layer->buffer_queue))
895 if (layer->started) {
896 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
897 return -EBUSY; 834 return -EBUSY;
898 } 835
899 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) { 836 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
900 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n"); 837 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
901 return -EINVAL; 838 return -EINVAL;
@@ -967,9 +904,9 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
967static int vpbe_display_try_fmt(struct file *file, void *priv, 904static int vpbe_display_try_fmt(struct file *file, void *priv,
968 struct v4l2_format *fmt) 905 struct v4l2_format *fmt)
969{ 906{
970 struct vpbe_fh *fh = file->private_data; 907 struct vpbe_layer *layer = video_drvdata(file);
971 struct vpbe_display *disp_dev = fh->disp_dev; 908 struct vpbe_display *disp_dev = layer->disp_dev;
972 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 909 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
973 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; 910 struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
974 911
975 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n"); 912 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
@@ -993,18 +930,15 @@ static int vpbe_display_try_fmt(struct file *file, void *priv,
993static int vpbe_display_s_std(struct file *file, void *priv, 930static int vpbe_display_s_std(struct file *file, void *priv,
994 v4l2_std_id std_id) 931 v4l2_std_id std_id)
995{ 932{
996 struct vpbe_fh *fh = priv; 933 struct vpbe_layer *layer = video_drvdata(file);
997 struct vpbe_layer *layer = fh->layer; 934 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
998 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
999 int ret; 935 int ret;
1000 936
1001 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n"); 937 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
1002 938
1003 /* If streaming is started, return error */ 939 if (vb2_is_busy(&layer->buffer_queue))
1004 if (layer->started) {
1005 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
1006 return -EBUSY; 940 return -EBUSY;
1007 } 941
1008 if (NULL != vpbe_dev->ops.s_std) { 942 if (NULL != vpbe_dev->ops.s_std) {
1009 ret = vpbe_dev->ops.s_std(vpbe_dev, std_id); 943 ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
1010 if (ret) { 944 if (ret) {
@@ -1028,8 +962,8 @@ static int vpbe_display_s_std(struct file *file, void *priv,
1028static int vpbe_display_g_std(struct file *file, void *priv, 962static int vpbe_display_g_std(struct file *file, void *priv,
1029 v4l2_std_id *std_id) 963 v4l2_std_id *std_id)
1030{ 964{
1031 struct vpbe_fh *fh = priv; 965 struct vpbe_layer *layer = video_drvdata(file);
1032 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 966 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1033 967
1034 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n"); 968 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
1035 969
@@ -1051,8 +985,8 @@ static int vpbe_display_g_std(struct file *file, void *priv,
1051static int vpbe_display_enum_output(struct file *file, void *priv, 985static int vpbe_display_enum_output(struct file *file, void *priv,
1052 struct v4l2_output *output) 986 struct v4l2_output *output)
1053{ 987{
1054 struct vpbe_fh *fh = priv; 988 struct vpbe_layer *layer = video_drvdata(file);
1055 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 989 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1056 int ret; 990 int ret;
1057 991
1058 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n"); 992 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
@@ -1079,17 +1013,15 @@ static int vpbe_display_enum_output(struct file *file, void *priv,
1079static int vpbe_display_s_output(struct file *file, void *priv, 1013static int vpbe_display_s_output(struct file *file, void *priv,
1080 unsigned int i) 1014 unsigned int i)
1081{ 1015{
1082 struct vpbe_fh *fh = priv; 1016 struct vpbe_layer *layer = video_drvdata(file);
1083 struct vpbe_layer *layer = fh->layer; 1017 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1084 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1085 int ret; 1018 int ret;
1086 1019
1087 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n"); 1020 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
1088 /* If streaming is started, return error */ 1021
1089 if (layer->started) { 1022 if (vb2_is_busy(&layer->buffer_queue))
1090 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
1091 return -EBUSY; 1023 return -EBUSY;
1092 } 1024
1093 if (NULL == vpbe_dev->ops.set_output) 1025 if (NULL == vpbe_dev->ops.set_output)
1094 return -EINVAL; 1026 return -EINVAL;
1095 1027
@@ -1110,8 +1042,8 @@ static int vpbe_display_s_output(struct file *file, void *priv,
1110static int vpbe_display_g_output(struct file *file, void *priv, 1042static int vpbe_display_g_output(struct file *file, void *priv,
1111 unsigned int *i) 1043 unsigned int *i)
1112{ 1044{
1113 struct vpbe_fh *fh = priv; 1045 struct vpbe_layer *layer = video_drvdata(file);
1114 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1046 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1115 1047
1116 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n"); 1048 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
1117 /* Get the standard from the current encoder */ 1049 /* Get the standard from the current encoder */
@@ -1130,8 +1062,8 @@ static int
1130vpbe_display_enum_dv_timings(struct file *file, void *priv, 1062vpbe_display_enum_dv_timings(struct file *file, void *priv,
1131 struct v4l2_enum_dv_timings *timings) 1063 struct v4l2_enum_dv_timings *timings)
1132{ 1064{
1133 struct vpbe_fh *fh = priv; 1065 struct vpbe_layer *layer = video_drvdata(file);
1134 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1066 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1135 int ret; 1067 int ret;
1136 1068
1137 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n"); 1069 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
@@ -1160,19 +1092,14 @@ static int
1160vpbe_display_s_dv_timings(struct file *file, void *priv, 1092vpbe_display_s_dv_timings(struct file *file, void *priv,
1161 struct v4l2_dv_timings *timings) 1093 struct v4l2_dv_timings *timings)
1162{ 1094{
1163 struct vpbe_fh *fh = priv; 1095 struct vpbe_layer *layer = video_drvdata(file);
1164 struct vpbe_layer *layer = fh->layer; 1096 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1165 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1166 int ret; 1097 int ret;
1167 1098
1168 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n"); 1099 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
1169 1100
1170 1101 if (vb2_is_busy(&layer->buffer_queue))
1171 /* If streaming is started, return error */
1172 if (layer->started) {
1173 v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
1174 return -EBUSY; 1102 return -EBUSY;
1175 }
1176 1103
1177 /* Set the given standard in the encoder */ 1104 /* Set the given standard in the encoder */
1178 if (!vpbe_dev->ops.s_dv_timings) 1105 if (!vpbe_dev->ops.s_dv_timings)
@@ -1198,8 +1125,8 @@ static int
1198vpbe_display_g_dv_timings(struct file *file, void *priv, 1125vpbe_display_g_dv_timings(struct file *file, void *priv,
1199 struct v4l2_dv_timings *dv_timings) 1126 struct v4l2_dv_timings *dv_timings)
1200{ 1127{
1201 struct vpbe_fh *fh = priv; 1128 struct vpbe_layer *layer = video_drvdata(file);
1202 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev; 1129 struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
1203 1130
1204 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n"); 1131 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
1205 1132
@@ -1215,259 +1142,6 @@ vpbe_display_g_dv_timings(struct file *file, void *priv,
1215 return 0; 1142 return 0;
1216} 1143}
1217 1144
1218static int vpbe_display_streamoff(struct file *file, void *priv,
1219 enum v4l2_buf_type buf_type)
1220{
1221 struct vpbe_fh *fh = file->private_data;
1222 struct vpbe_layer *layer = fh->layer;
1223 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1224 struct osd_state *osd_device = fh->disp_dev->osd_device;
1225 int ret;
1226
1227 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1228 "VIDIOC_STREAMOFF,layer id = %d\n",
1229 layer->device_id);
1230
1231 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
1232 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1233 return -EINVAL;
1234 }
1235
1236 /* If io is allowed for this file handle, return error */
1237 if (!fh->io_allowed) {
1238 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1239 return -EACCES;
1240 }
1241
1242 /* If streaming is not started, return error */
1243 if (!layer->started) {
1244 v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
1245 " id = %d\n", layer->device_id);
1246 return -EINVAL;
1247 }
1248
1249 osd_device->ops.disable_layer(osd_device,
1250 layer->layer_info.id);
1251 layer->started = 0;
1252 ret = vb2_streamoff(&layer->buffer_queue, buf_type);
1253
1254 return ret;
1255}
1256
1257static int vpbe_display_streamon(struct file *file, void *priv,
1258 enum v4l2_buf_type buf_type)
1259{
1260 struct vpbe_fh *fh = file->private_data;
1261 struct vpbe_layer *layer = fh->layer;
1262 struct vpbe_display *disp_dev = fh->disp_dev;
1263 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1264 struct osd_state *osd_device = disp_dev->osd_device;
1265 int ret;
1266
1267 osd_device->ops.disable_layer(osd_device,
1268 layer->layer_info.id);
1269
1270 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
1271 layer->device_id);
1272
1273 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
1274 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1275 return -EINVAL;
1276 }
1277
1278 /* If file handle is not allowed IO, return error */
1279 if (!fh->io_allowed) {
1280 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1281 return -EACCES;
1282 }
1283 /* If Streaming is already started, return error */
1284 if (layer->started) {
1285 v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
1286 return -EBUSY;
1287 }
1288
1289 /*
1290 * Call vb2_streamon to start streaming
1291 * in videobuf
1292 */
1293 ret = vb2_streamon(&layer->buffer_queue, buf_type);
1294 if (ret) {
1295 v4l2_err(&vpbe_dev->v4l2_dev,
1296 "error in vb2_streamon\n");
1297 return ret;
1298 }
1299 return ret;
1300}
1301
1302static int vpbe_display_dqbuf(struct file *file, void *priv,
1303 struct v4l2_buffer *buf)
1304{
1305 struct vpbe_fh *fh = file->private_data;
1306 struct vpbe_layer *layer = fh->layer;
1307 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1308 int ret;
1309
1310 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1311 "VIDIOC_DQBUF, layer id = %d\n",
1312 layer->device_id);
1313
1314 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
1315 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1316 return -EINVAL;
1317 }
1318 /* If this file handle is not allowed to do IO, return error */
1319 if (!fh->io_allowed) {
1320 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1321 return -EACCES;
1322 }
1323 if (file->f_flags & O_NONBLOCK)
1324 /* Call videobuf_dqbuf for non blocking mode */
1325 ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
1326 else
1327 /* Call videobuf_dqbuf for blocking mode */
1328 ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
1329
1330 return ret;
1331}
1332
1333static int vpbe_display_qbuf(struct file *file, void *priv,
1334 struct v4l2_buffer *p)
1335{
1336 struct vpbe_fh *fh = file->private_data;
1337 struct vpbe_layer *layer = fh->layer;
1338 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1339
1340 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1341 "VIDIOC_QBUF, layer id = %d\n",
1342 layer->device_id);
1343
1344 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
1345 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1346 return -EINVAL;
1347 }
1348
1349 /* If this file handle is not allowed to do IO, return error */
1350 if (!fh->io_allowed) {
1351 v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
1352 return -EACCES;
1353 }
1354
1355 return vb2_qbuf(&layer->buffer_queue, p);
1356}
1357
1358static int vpbe_display_querybuf(struct file *file, void *priv,
1359 struct v4l2_buffer *buf)
1360{
1361 struct vpbe_fh *fh = file->private_data;
1362 struct vpbe_layer *layer = fh->layer;
1363 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1364
1365 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1366 "VIDIOC_QUERYBUF, layer id = %d\n",
1367 layer->device_id);
1368
1369 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
1370 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1371 return -EINVAL;
1372 }
1373 /* Call vb2_querybuf to get information */
1374 return vb2_querybuf(&layer->buffer_queue, buf);
1375}
1376
1377static int vpbe_display_reqbufs(struct file *file, void *priv,
1378 struct v4l2_requestbuffers *req_buf)
1379{
1380 struct vpbe_fh *fh = file->private_data;
1381 struct vpbe_layer *layer = fh->layer;
1382 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1383 struct vb2_queue *q;
1384 int ret;
1385 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
1386
1387 if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
1388 v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
1389 return -EINVAL;
1390 }
1391
1392 /* If io users of the layer is not zero, return error */
1393 if (0 != layer->io_usrs) {
1394 v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
1395 return -EBUSY;
1396 }
1397 /* Initialize videobuf queue as per the buffer type */
1398 layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
1399 if (IS_ERR(layer->alloc_ctx)) {
1400 v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
1401 return PTR_ERR(layer->alloc_ctx);
1402 }
1403 q = &layer->buffer_queue;
1404 memset(q, 0, sizeof(*q));
1405 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1406 q->io_modes = VB2_MMAP | VB2_USERPTR;
1407 q->drv_priv = fh;
1408 q->ops = &video_qops;
1409 q->mem_ops = &vb2_dma_contig_memops;
1410 q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
1411 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1412 q->min_buffers_needed = 1;
1413
1414 ret = vb2_queue_init(q);
1415 if (ret) {
1416 v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1417 vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
1418 return ret;
1419 }
1420 /* Set io allowed member of file handle to TRUE */
1421 fh->io_allowed = 1;
1422 /* Increment io usrs member of layer object to 1 */
1423 layer->io_usrs = 1;
1424 /* Store type of memory requested in layer object */
1425 layer->memory = req_buf->memory;
1426 /* Initialize buffer queue */
1427 INIT_LIST_HEAD(&layer->dma_queue);
1428 /* Allocate buffers */
1429 return vb2_reqbufs(q, req_buf);
1430}
1431
1432/*
1433 * vpbe_display_mmap()
1434 * It is used to map kernel space buffers into user spaces
1435 */
1436static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
1437{
1438 /* Get the layer object and file handle object */
1439 struct vpbe_fh *fh = filep->private_data;
1440 struct vpbe_layer *layer = fh->layer;
1441 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1442 int ret;
1443
1444 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
1445
1446 if (mutex_lock_interruptible(&layer->opslock))
1447 return -ERESTARTSYS;
1448 ret = vb2_mmap(&layer->buffer_queue, vma);
1449 mutex_unlock(&layer->opslock);
1450 return ret;
1451}
1452
1453/* vpbe_display_poll(): It is used for select/poll system call
1454 */
1455static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
1456{
1457 struct vpbe_fh *fh = filep->private_data;
1458 struct vpbe_layer *layer = fh->layer;
1459 struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
1460 unsigned int err = 0;
1461
1462 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
1463 if (layer->started) {
1464 mutex_lock(&layer->opslock);
1465 err = vb2_poll(&layer->buffer_queue, filep, wait);
1466 mutex_unlock(&layer->opslock);
1467 }
1468 return err;
1469}
1470
1471/* 1145/*
1472 * vpbe_display_open() 1146 * vpbe_display_open()
1473 * It creates object of file handle structure and stores it in private_data 1147 * It creates object of file handle structure and stores it in private_data
@@ -1475,30 +1149,22 @@ static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
1475 */ 1149 */
1476static int vpbe_display_open(struct file *file) 1150static int vpbe_display_open(struct file *file)
1477{ 1151{
1478 struct vpbe_fh *fh = NULL;
1479 struct vpbe_layer *layer = video_drvdata(file); 1152 struct vpbe_layer *layer = video_drvdata(file);
1480 struct video_device *vdev = video_devdata(file);
1481 struct vpbe_display *disp_dev = layer->disp_dev; 1153 struct vpbe_display *disp_dev = layer->disp_dev;
1482 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; 1154 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
1483 struct osd_state *osd_device = disp_dev->osd_device; 1155 struct osd_state *osd_device = disp_dev->osd_device;
1484 int err; 1156 int err;
1485 1157
1486 /* Allocate memory for the file handle object */ 1158 /* creating context for file descriptor */
1487 fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL); 1159 err = v4l2_fh_open(file);
1488 if (fh == NULL) { 1160 if (err) {
1489 v4l2_err(&vpbe_dev->v4l2_dev, 1161 v4l2_err(&vpbe_dev->v4l2_dev, "v4l2_fh_open failed\n");
1490 "unable to allocate memory for file handle object\n"); 1162 return err;
1491 return -ENOMEM;
1492 } 1163 }
1493 v4l2_fh_init(&fh->fh, vdev);
1494 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1495 "vpbe display open plane = %d\n",
1496 layer->device_id);
1497 1164
1498 /* store pointer to fh in private_data member of filep */ 1165 /* leaving if layer is already initialized */
1499 file->private_data = fh; 1166 if (!v4l2_fh_is_singular_file(file))
1500 fh->layer = layer; 1167 return err;
1501 fh->disp_dev = disp_dev;
1502 1168
1503 if (!layer->usrs) { 1169 if (!layer->usrs) {
1504 if (mutex_lock_interruptible(&layer->opslock)) 1170 if (mutex_lock_interruptible(&layer->opslock))
@@ -1511,15 +1177,12 @@ static int vpbe_display_open(struct file *file)
1511 /* Couldn't get layer */ 1177 /* Couldn't get layer */
1512 v4l2_err(&vpbe_dev->v4l2_dev, 1178 v4l2_err(&vpbe_dev->v4l2_dev,
1513 "Display Manager failed to allocate layer\n"); 1179 "Display Manager failed to allocate layer\n");
1514 kfree(fh); 1180 v4l2_fh_release(file);
1515 return -EINVAL; 1181 return -EINVAL;
1516 } 1182 }
1517 } 1183 }
1518 /* Increment layer usrs counter */ 1184 /* Increment layer usrs counter */
1519 layer->usrs++; 1185 layer->usrs++;
1520 /* Set io_allowed member to false */
1521 fh->io_allowed = 0;
1522 v4l2_fh_add(&fh->fh);
1523 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, 1186 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
1524 "vpbe display device opened successfully\n"); 1187 "vpbe display device opened successfully\n");
1525 return 0; 1188 return 0;
@@ -1532,30 +1195,18 @@ static int vpbe_display_open(struct file *file)
1532 */ 1195 */
1533static int vpbe_display_release(struct file *file) 1196static int vpbe_display_release(struct file *file)
1534{ 1197{
1535 /* Get the layer object and file handle object */ 1198 struct vpbe_layer *layer = video_drvdata(file);
1536 struct vpbe_fh *fh = file->private_data;
1537 struct vpbe_layer *layer = fh->layer;
1538 struct osd_layer_config *cfg = &layer->layer_info.config; 1199 struct osd_layer_config *cfg = &layer->layer_info.config;
1539 struct vpbe_display *disp_dev = fh->disp_dev; 1200 struct vpbe_display *disp_dev = layer->disp_dev;
1540 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; 1201 struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
1541 struct osd_state *osd_device = disp_dev->osd_device; 1202 struct osd_state *osd_device = disp_dev->osd_device;
1542 1203
1543 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n"); 1204 v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
1544 1205
1545 mutex_lock(&layer->opslock); 1206 mutex_lock(&layer->opslock);
1546 /* if this instance is doing IO */
1547 if (fh->io_allowed) {
1548 /* Reset io_usrs member of layer object */
1549 layer->io_usrs = 0;
1550
1551 osd_device->ops.disable_layer(osd_device,
1552 layer->layer_info.id);
1553 layer->started = 0;
1554 /* Free buffers allocated */
1555 vb2_queue_release(&layer->buffer_queue);
1556 vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
1557 }
1558 1207
1208 osd_device->ops.disable_layer(osd_device,
1209 layer->layer_info.id);
1559 /* Decrement layer usrs counter */ 1210 /* Decrement layer usrs counter */
1560 layer->usrs--; 1211 layer->usrs--;
1561 /* If this file handle has initialize encoder device, reset it */ 1212 /* If this file handle has initialize encoder device, reset it */
@@ -1575,14 +1226,9 @@ static int vpbe_display_release(struct file *file)
1575 layer->layer_info.id); 1226 layer->layer_info.id);
1576 } 1227 }
1577 1228
1578 v4l2_fh_del(&fh->fh); 1229 _vb2_fop_release(file, NULL);
1579 v4l2_fh_exit(&fh->fh);
1580 file->private_data = NULL;
1581 mutex_unlock(&layer->opslock); 1230 mutex_unlock(&layer->opslock);
1582 1231
1583 /* Free memory allocated to file handle object */
1584 kfree(fh);
1585
1586 disp_dev->cbcr_ofst = 0; 1232 disp_dev->cbcr_ofst = 0;
1587 1233
1588 return 0; 1234 return 0;
@@ -1595,20 +1241,27 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
1595 .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt, 1241 .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
1596 .vidioc_s_fmt_vid_out = vpbe_display_s_fmt, 1242 .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
1597 .vidioc_try_fmt_vid_out = vpbe_display_try_fmt, 1243 .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
1598 .vidioc_reqbufs = vpbe_display_reqbufs, 1244
1599 .vidioc_querybuf = vpbe_display_querybuf, 1245 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1600 .vidioc_qbuf = vpbe_display_qbuf, 1246 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1601 .vidioc_dqbuf = vpbe_display_dqbuf, 1247 .vidioc_querybuf = vb2_ioctl_querybuf,
1602 .vidioc_streamon = vpbe_display_streamon, 1248 .vidioc_qbuf = vb2_ioctl_qbuf,
1603 .vidioc_streamoff = vpbe_display_streamoff, 1249 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1250 .vidioc_streamon = vb2_ioctl_streamon,
1251 .vidioc_streamoff = vb2_ioctl_streamoff,
1252 .vidioc_expbuf = vb2_ioctl_expbuf,
1253
1604 .vidioc_cropcap = vpbe_display_cropcap, 1254 .vidioc_cropcap = vpbe_display_cropcap,
1605 .vidioc_g_crop = vpbe_display_g_crop, 1255 .vidioc_g_crop = vpbe_display_g_crop,
1606 .vidioc_s_crop = vpbe_display_s_crop, 1256 .vidioc_s_crop = vpbe_display_s_crop,
1257
1607 .vidioc_s_std = vpbe_display_s_std, 1258 .vidioc_s_std = vpbe_display_s_std,
1608 .vidioc_g_std = vpbe_display_g_std, 1259 .vidioc_g_std = vpbe_display_g_std,
1260
1609 .vidioc_enum_output = vpbe_display_enum_output, 1261 .vidioc_enum_output = vpbe_display_enum_output,
1610 .vidioc_s_output = vpbe_display_s_output, 1262 .vidioc_s_output = vpbe_display_s_output,
1611 .vidioc_g_output = vpbe_display_g_output, 1263 .vidioc_g_output = vpbe_display_g_output,
1264
1612 .vidioc_s_dv_timings = vpbe_display_s_dv_timings, 1265 .vidioc_s_dv_timings = vpbe_display_s_dv_timings,
1613 .vidioc_g_dv_timings = vpbe_display_g_dv_timings, 1266 .vidioc_g_dv_timings = vpbe_display_g_dv_timings,
1614 .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings, 1267 .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
@@ -1619,8 +1272,8 @@ static struct v4l2_file_operations vpbe_fops = {
1619 .open = vpbe_display_open, 1272 .open = vpbe_display_open,
1620 .release = vpbe_display_release, 1273 .release = vpbe_display_release,
1621 .unlocked_ioctl = video_ioctl2, 1274 .unlocked_ioctl = video_ioctl2,
1622 .mmap = vpbe_display_mmap, 1275 .mmap = vb2_fop_mmap,
1623 .poll = vpbe_display_poll 1276 .poll = vb2_fop_poll,
1624}; 1277};
1625 1278
1626static int vpbe_device_get(struct device *dev, void *data) 1279static int vpbe_device_get(struct device *dev, void *data)
@@ -1700,6 +1353,7 @@ static int register_device(struct vpbe_layer *vpbe_display_layer,
1700 (int)vpbe_display_layer, 1353 (int)vpbe_display_layer,
1701 (int)&vpbe_display_layer->video_dev); 1354 (int)&vpbe_display_layer->video_dev);
1702 1355
1356 vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
1703 err = video_register_device(&vpbe_display_layer->video_dev, 1357 err = video_register_device(&vpbe_display_layer->video_dev,
1704 VFL_TYPE_GRABBER, 1358 VFL_TYPE_GRABBER,
1705 -1); 1359 -1);
@@ -1724,9 +1378,10 @@ static int register_device(struct vpbe_layer *vpbe_display_layer,
1724 */ 1378 */
1725static int vpbe_display_probe(struct platform_device *pdev) 1379static int vpbe_display_probe(struct platform_device *pdev)
1726{ 1380{
1727 struct vpbe_layer *vpbe_display_layer;
1728 struct vpbe_display *disp_dev; 1381 struct vpbe_display *disp_dev;
1382 struct v4l2_device *v4l2_dev;
1729 struct resource *res = NULL; 1383 struct resource *res = NULL;
1384 struct vb2_queue *q;
1730 int k; 1385 int k;
1731 int i; 1386 int i;
1732 int err; 1387 int err;
@@ -1748,13 +1403,14 @@ static int vpbe_display_probe(struct platform_device *pdev)
1748 vpbe_device_get); 1403 vpbe_device_get);
1749 if (err < 0) 1404 if (err < 0)
1750 return err; 1405 return err;
1406
1407 v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
1751 /* Initialize the vpbe display controller */ 1408 /* Initialize the vpbe display controller */
1752 if (NULL != disp_dev->vpbe_dev->ops.initialize) { 1409 if (NULL != disp_dev->vpbe_dev->ops.initialize) {
1753 err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev, 1410 err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
1754 disp_dev->vpbe_dev); 1411 disp_dev->vpbe_dev);
1755 if (err) { 1412 if (err) {
1756 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, 1413 v4l2_err(v4l2_dev, "Error initing vpbe\n");
1757 "Error initing vpbe\n");
1758 err = -ENOMEM; 1414 err = -ENOMEM;
1759 goto probe_out; 1415 goto probe_out;
1760 } 1416 }
@@ -1769,8 +1425,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
1769 1425
1770 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1426 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1771 if (!res) { 1427 if (!res) {
1772 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, 1428 v4l2_err(v4l2_dev, "Unable to get VENC interrupt resource\n");
1773 "Unable to get VENC interrupt resource\n");
1774 err = -ENODEV; 1429 err = -ENODEV;
1775 goto probe_out; 1430 goto probe_out;
1776 } 1431 }
@@ -1779,30 +1434,57 @@ static int vpbe_display_probe(struct platform_device *pdev)
1779 err = devm_request_irq(&pdev->dev, irq, venc_isr, 0, 1434 err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
1780 VPBE_DISPLAY_DRIVER, disp_dev); 1435 VPBE_DISPLAY_DRIVER, disp_dev);
1781 if (err) { 1436 if (err) {
1782 v4l2_err(&disp_dev->vpbe_dev->v4l2_dev, 1437 v4l2_err(v4l2_dev, "VPBE IRQ request failed\n");
1783 "Unable to request interrupt\n");
1784 goto probe_out; 1438 goto probe_out;
1785 } 1439 }
1786 1440
1787 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { 1441 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1442 /* initialize vb2 queue */
1443 q = &disp_dev->dev[i]->buffer_queue;
1444 memset(q, 0, sizeof(*q));
1445 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1446 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1447 q->drv_priv = disp_dev->dev[i];
1448 q->ops = &video_qops;
1449 q->mem_ops = &vb2_dma_contig_memops;
1450 q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
1451 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1452 q->min_buffers_needed = 1;
1453 q->lock = &disp_dev->dev[i]->opslock;
1454 err = vb2_queue_init(q);
1455 if (err) {
1456 v4l2_err(v4l2_dev, "vb2_queue_init() failed\n");
1457 goto probe_out;
1458 }
1459
1460 disp_dev->dev[i]->alloc_ctx =
1461 vb2_dma_contig_init_ctx(disp_dev->vpbe_dev->pdev);
1462 if (IS_ERR(disp_dev->dev[i]->alloc_ctx)) {
1463 v4l2_err(v4l2_dev, "Failed to get the context\n");
1464 err = PTR_ERR(disp_dev->dev[i]->alloc_ctx);
1465 goto probe_out;
1466 }
1467
1468 INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue);
1469
1788 if (register_device(disp_dev->dev[i], disp_dev, pdev)) { 1470 if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
1789 err = -ENODEV; 1471 err = -ENODEV;
1790 goto probe_out; 1472 goto probe_out;
1791 } 1473 }
1792 } 1474 }
1793 1475
1794 printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n"); 1476 v4l2_dbg(1, debug, v4l2_dev,
1477 "Successfully completed the probing of vpbe v4l2 device\n");
1478
1795 return 0; 1479 return 0;
1796 1480
1797probe_out: 1481probe_out:
1798 for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) { 1482 for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
1799 /* Get the pointer to the layer object */
1800 vpbe_display_layer = disp_dev->dev[k];
1801 /* Unregister video device */ 1483 /* Unregister video device */
1802 if (vpbe_display_layer) { 1484 if (disp_dev->dev[k] != NULL) {
1803 video_unregister_device( 1485 vb2_dma_contig_cleanup_ctx(disp_dev->dev[k]->alloc_ctx);
1804 &vpbe_display_layer->video_dev); 1486 video_unregister_device(&disp_dev->dev[k]->video_dev);
1805 kfree(disp_dev->dev[k]); 1487 kfree(disp_dev->dev[k]);
1806 } 1488 }
1807 } 1489 }
1808 return err; 1490 return err;
@@ -1828,6 +1510,7 @@ static int vpbe_display_remove(struct platform_device *pdev)
1828 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { 1510 for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
1829 /* Get the pointer to the layer object */ 1511 /* Get the pointer to the layer object */
1830 vpbe_display_layer = disp_dev->dev[i]; 1512 vpbe_display_layer = disp_dev->dev[i];
1513 vb2_dma_contig_cleanup_ctx(vpbe_display_layer->alloc_ctx);
1831 /* Unregister video device */ 1514 /* Unregister video device */
1832 video_unregister_device(&vpbe_display_layer->video_dev); 1515 video_unregister_device(&vpbe_display_layer->video_dev);
1833 1516
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index de55f47a77db..271c4600432d 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -414,13 +414,13 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
414 /* assume V4L2_PIX_FMT_UYVY as default */ 414 /* assume V4L2_PIX_FMT_UYVY as default */
415 pix->pixelformat = V4L2_PIX_FMT_UYVY; 415 pix->pixelformat = V4L2_PIX_FMT_UYVY;
416 v4l2_fill_mbus_format(&mbus_fmt, pix, 416 v4l2_fill_mbus_format(&mbus_fmt, pix,
417 V4L2_MBUS_FMT_YUYV10_2X10); 417 MEDIA_BUS_FMT_YUYV10_2X10);
418 } else { 418 } else {
419 pix->field = V4L2_FIELD_NONE; 419 pix->field = V4L2_FIELD_NONE;
420 /* assume V4L2_PIX_FMT_SBGGR8 */ 420 /* assume V4L2_PIX_FMT_SBGGR8 */
421 pix->pixelformat = V4L2_PIX_FMT_SBGGR8; 421 pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
422 v4l2_fill_mbus_format(&mbus_fmt, pix, 422 v4l2_fill_mbus_format(&mbus_fmt, pix,
423 V4L2_MBUS_FMT_SBGGR8_1X8); 423 MEDIA_BUS_FMT_SBGGR8_1X8);
424 } 424 }
425 425
426 /* if sub device supports g_mbus_fmt, override the defaults */ 426 /* if sub device supports g_mbus_fmt, override the defaults */
@@ -930,8 +930,8 @@ static int vpfe_querycap(struct file *file, void *priv,
930 930
931 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n"); 931 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
932 932
933 cap->version = VPFE_CAPTURE_VERSION_CODE; 933 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
934 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 934 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
935 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver)); 935 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
936 strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info)); 936 strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
937 strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card)); 937 strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 3ccb26ff43c8..d8e1b98b7784 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -311,6 +311,8 @@ static struct vb2_ops video_qops = {
311 .start_streaming = vpif_start_streaming, 311 .start_streaming = vpif_start_streaming,
312 .stop_streaming = vpif_stop_streaming, 312 .stop_streaming = vpif_stop_streaming,
313 .buf_queue = vpif_buffer_queue, 313 .buf_queue = vpif_buffer_queue,
314 .wait_prepare = vb2_ops_wait_prepare,
315 .wait_finish = vb2_ops_wait_finish,
314}; 316};
315 317
316/** 318/**
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index b4c9f1d08968..3062e9fac6da 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -54,7 +54,7 @@ static const struct gsc_fmt gsc_formats[] = {
54 .corder = GSC_CBCR, 54 .corder = GSC_CBCR,
55 .num_planes = 1, 55 .num_planes = 1,
56 .num_comp = 1, 56 .num_comp = 1,
57 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 57 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
58 }, { 58 }, {
59 .name = "YUV 4:2:2 packed, CbYCrY", 59 .name = "YUV 4:2:2 packed, CbYCrY",
60 .pixelformat = V4L2_PIX_FMT_UYVY, 60 .pixelformat = V4L2_PIX_FMT_UYVY,
@@ -64,7 +64,7 @@ static const struct gsc_fmt gsc_formats[] = {
64 .corder = GSC_CBCR, 64 .corder = GSC_CBCR,
65 .num_planes = 1, 65 .num_planes = 1,
66 .num_comp = 1, 66 .num_comp = 1,
67 .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8, 67 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
68 }, { 68 }, {
69 .name = "YUV 4:2:2 packed, CrYCbY", 69 .name = "YUV 4:2:2 packed, CrYCbY",
70 .pixelformat = V4L2_PIX_FMT_VYUY, 70 .pixelformat = V4L2_PIX_FMT_VYUY,
@@ -74,7 +74,7 @@ static const struct gsc_fmt gsc_formats[] = {
74 .corder = GSC_CRCB, 74 .corder = GSC_CRCB,
75 .num_planes = 1, 75 .num_planes = 1,
76 .num_comp = 1, 76 .num_comp = 1,
77 .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8, 77 .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
78 }, { 78 }, {
79 .name = "YUV 4:2:2 packed, YCrYCb", 79 .name = "YUV 4:2:2 packed, YCrYCb",
80 .pixelformat = V4L2_PIX_FMT_YVYU, 80 .pixelformat = V4L2_PIX_FMT_YVYU,
@@ -84,7 +84,7 @@ static const struct gsc_fmt gsc_formats[] = {
84 .corder = GSC_CRCB, 84 .corder = GSC_CRCB,
85 .num_planes = 1, 85 .num_planes = 1,
86 .num_comp = 1, 86 .num_comp = 1,
87 .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8, 87 .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
88 }, { 88 }, {
89 .name = "YUV 4:4:4 planar, YCbYCr", 89 .name = "YUV 4:4:4 planar, YCbYCr",
90 .pixelformat = V4L2_PIX_FMT_YUV32, 90 .pixelformat = V4L2_PIX_FMT_YUV32,
@@ -319,21 +319,22 @@ int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f)
319 return 0; 319 return 0;
320} 320}
321 321
322static u32 get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index) 322static int get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index, u32 *ret_addr)
323{ 323{
324 if (frm->addr.y == addr) { 324 if (frm->addr.y == addr) {
325 *index = 0; 325 *index = 0;
326 return frm->addr.y; 326 *ret_addr = frm->addr.y;
327 } else if (frm->addr.cb == addr) { 327 } else if (frm->addr.cb == addr) {
328 *index = 1; 328 *index = 1;
329 return frm->addr.cb; 329 *ret_addr = frm->addr.cb;
330 } else if (frm->addr.cr == addr) { 330 } else if (frm->addr.cr == addr) {
331 *index = 2; 331 *index = 2;
332 return frm->addr.cr; 332 *ret_addr = frm->addr.cr;
333 } else { 333 } else {
334 pr_err("Plane address is wrong"); 334 pr_err("Plane address is wrong");
335 return -EINVAL; 335 return -EINVAL;
336 } 336 }
337 return 0;
337} 338}
338 339
339void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm) 340void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
@@ -352,9 +353,11 @@ void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
352 u32 t_min, t_max; 353 u32 t_min, t_max;
353 354
354 t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr); 355 t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
355 low_addr = get_plane_info(frm, t_min, &low_plane); 356 if (get_plane_info(frm, t_min, &low_plane, &low_addr))
357 return;
356 t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr); 358 t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
357 high_addr = get_plane_info(frm, t_max, &high_plane); 359 if (get_plane_info(frm, t_max, &high_plane, &high_addr))
360 return;
358 361
359 mid_plane = 3 - (low_plane + high_plane); 362 mid_plane = 3 - (low_plane + high_plane);
360 if (mid_plane == 0) 363 if (mid_plane == 0)
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index ef0a6564cef9..0abdb17fb19c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -117,7 +117,7 @@ enum gsc_yuv_fmt {
117 * @flags: flags indicating which operation mode format applies to 117 * @flags: flags indicating which operation mode format applies to
118 */ 118 */
119struct gsc_fmt { 119struct gsc_fmt {
120 enum v4l2_mbus_pixelcode mbus_code; 120 u32 mbus_code;
121 char *name; 121 char *name;
122 u32 pixelformat; 122 u32 pixelformat;
123 u32 color; 123 u32 color;
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 3d2babd5067a..8a2fd8c33d42 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -749,7 +749,7 @@ static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
749 return -EINVAL; 749 return -EINVAL;
750 strncpy(f->description, fmt->name, sizeof(f->description) - 1); 750 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
751 f->pixelformat = fmt->fourcc; 751 f->pixelformat = fmt->fourcc;
752 if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8) 752 if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8)
753 f->flags |= V4L2_FMT_FLAG_COMPRESSED; 753 f->flags |= V4L2_FMT_FLAG_COMPRESSED;
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index aee92d908e49..2acc19573b6b 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -81,7 +81,7 @@ static struct fimc_fmt fimc_formats[] = {
81 .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA, 81 .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
82 }, { 82 }, {
83 .name = "YUV 4:4:4", 83 .name = "YUV 4:4:4",
84 .mbus_code = V4L2_MBUS_FMT_YUV10_1X30, 84 .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
85 .flags = FMT_FLAGS_WRITEBACK, 85 .flags = FMT_FLAGS_WRITEBACK,
86 }, { 86 }, {
87 .name = "YUV 4:2:2 packed, YCbYCr", 87 .name = "YUV 4:2:2 packed, YCbYCr",
@@ -90,7 +90,7 @@ static struct fimc_fmt fimc_formats[] = {
90 .color = FIMC_FMT_YCBYCR422, 90 .color = FIMC_FMT_YCBYCR422,
91 .memplanes = 1, 91 .memplanes = 1,
92 .colplanes = 1, 92 .colplanes = 1,
93 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 93 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
94 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM, 94 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
95 }, { 95 }, {
96 .name = "YUV 4:2:2 packed, CbYCrY", 96 .name = "YUV 4:2:2 packed, CbYCrY",
@@ -99,7 +99,7 @@ static struct fimc_fmt fimc_formats[] = {
99 .color = FIMC_FMT_CBYCRY422, 99 .color = FIMC_FMT_CBYCRY422,
100 .memplanes = 1, 100 .memplanes = 1,
101 .colplanes = 1, 101 .colplanes = 1,
102 .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8, 102 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
103 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM, 103 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
104 }, { 104 }, {
105 .name = "YUV 4:2:2 packed, CrYCbY", 105 .name = "YUV 4:2:2 packed, CrYCbY",
@@ -108,7 +108,7 @@ static struct fimc_fmt fimc_formats[] = {
108 .color = FIMC_FMT_CRYCBY422, 108 .color = FIMC_FMT_CRYCBY422,
109 .memplanes = 1, 109 .memplanes = 1,
110 .colplanes = 1, 110 .colplanes = 1,
111 .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8, 111 .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
112 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM, 112 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
113 }, { 113 }, {
114 .name = "YUV 4:2:2 packed, YCrYCb", 114 .name = "YUV 4:2:2 packed, YCrYCb",
@@ -117,7 +117,7 @@ static struct fimc_fmt fimc_formats[] = {
117 .color = FIMC_FMT_YCRYCB422, 117 .color = FIMC_FMT_YCRYCB422,
118 .memplanes = 1, 118 .memplanes = 1,
119 .colplanes = 1, 119 .colplanes = 1,
120 .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8, 120 .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
121 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM, 121 .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
122 }, { 122 }, {
123 .name = "YUV 4:2:2 planar, Y/Cb/Cr", 123 .name = "YUV 4:2:2 planar, Y/Cb/Cr",
@@ -190,7 +190,7 @@ static struct fimc_fmt fimc_formats[] = {
190 .depth = { 8 }, 190 .depth = { 8 },
191 .memplanes = 1, 191 .memplanes = 1,
192 .colplanes = 1, 192 .colplanes = 1,
193 .mbus_code = V4L2_MBUS_FMT_JPEG_1X8, 193 .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
194 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED, 194 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
195 }, { 195 }, {
196 .name = "S5C73MX interleaved UYVY/JPEG", 196 .name = "S5C73MX interleaved UYVY/JPEG",
@@ -200,7 +200,7 @@ static struct fimc_fmt fimc_formats[] = {
200 .memplanes = 2, 200 .memplanes = 2,
201 .colplanes = 1, 201 .colplanes = 1,
202 .mdataplanes = 0x2, /* plane 1 holds frame meta data */ 202 .mdataplanes = 0x2, /* plane 1 holds frame meta data */
203 .mbus_code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8, 203 .mbus_code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
204 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED, 204 .flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
205 }, 205 },
206}; 206};
@@ -832,7 +832,7 @@ err:
832 return -ENXIO; 832 return -ENXIO;
833} 833}
834 834
835#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP) 835#ifdef CONFIG_PM
836static int fimc_m2m_suspend(struct fimc_dev *fimc) 836static int fimc_m2m_suspend(struct fimc_dev *fimc)
837{ 837{
838 unsigned long flags; 838 unsigned long flags;
@@ -871,7 +871,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
871 871
872 return 0; 872 return 0;
873} 873}
874#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */ 874#endif /* CONFIG_PM */
875 875
876static const struct of_device_id fimc_of_match[]; 876static const struct of_device_id fimc_of_match[];
877 877
@@ -1039,7 +1039,7 @@ err_sclk:
1039 return ret; 1039 return ret;
1040} 1040}
1041 1041
1042#ifdef CONFIG_PM_RUNTIME 1042#ifdef CONFIG_PM
1043static int fimc_runtime_resume(struct device *dev) 1043static int fimc_runtime_resume(struct device *dev)
1044{ 1044{
1045 struct fimc_dev *fimc = dev_get_drvdata(dev); 1045 struct fimc_dev *fimc = dev_get_drvdata(dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 6c75c6ced1f7..7328f0845065 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -579,8 +579,8 @@ static inline bool fimc_jpeg_fourcc(u32 pixelformat)
579 579
580static inline bool fimc_user_defined_mbus_fmt(u32 code) 580static inline bool fimc_user_defined_mbus_fmt(u32 code)
581{ 581{
582 return (code == V4L2_MBUS_FMT_JPEG_1X8 || 582 return (code == MEDIA_BUS_FMT_JPEG_1X8 ||
583 code == V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8); 583 code == MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8);
584} 584}
585 585
586/* Return the alpha component bit mask */ 586/* Return the alpha component bit mask */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 371cad4fcce9..d83033170789 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -81,7 +81,7 @@ static int fimc_is_i2c_remove(struct platform_device *pdev)
81 return 0; 81 return 0;
82} 82}
83 83
84#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP) 84#ifdef CONFIG_PM
85static int fimc_is_i2c_runtime_suspend(struct device *dev) 85static int fimc_is_i2c_runtime_suspend(struct device *dev)
86{ 86{
87 struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev); 87 struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 22162b2567da..2a0cbeff0324 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -428,8 +428,7 @@ static void fimc_is_load_firmware(const struct firmware *fw, void *context)
428 * needed around for copying to the IS working memory every 428 * needed around for copying to the IS working memory every
429 * time before the Cortex-A5 is restarted. 429 * time before the Cortex-A5 is restarted.
430 */ 430 */
431 if (is->fw.f_w) 431 release_firmware(is->fw.f_w);
432 release_firmware(is->fw.f_w);
433 is->fw.f_w = fw; 432 is->fw.f_w = fw;
434done: 433done:
435 mutex_unlock(&is->lock); 434 mutex_unlock(&is->lock);
@@ -814,9 +813,9 @@ static int fimc_is_probe(struct platform_device *pdev)
814 return -ENOMEM; 813 return -ENOMEM;
815 814
816 is->irq = irq_of_parse_and_map(dev->of_node, 0); 815 is->irq = irq_of_parse_and_map(dev->of_node, 0);
817 if (is->irq < 0) { 816 if (!is->irq) {
818 dev_err(dev, "no irq found\n"); 817 dev_err(dev, "no irq found\n");
819 return is->irq; 818 return -EINVAL;
820 } 819 }
821 820
822 ret = fimc_is_get_clocks(is); 821 ret = fimc_is_get_clocks(is);
@@ -937,8 +936,7 @@ static int fimc_is_remove(struct platform_device *pdev)
937 vb2_dma_contig_cleanup_ctx(is->alloc_ctx); 936 vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
938 fimc_is_put_clocks(is); 937 fimc_is_put_clocks(is);
939 fimc_is_debugfs_remove(is); 938 fimc_is_debugfs_remove(is);
940 if (is->fw.f_w) 939 release_firmware(is->fw.f_w);
941 release_firmware(is->fw.f_w);
942 fimc_is_free_cpu_memory(is); 940 fimc_is_free_cpu_memory(is);
943 941
944 return 0; 942 return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index be62d6b9ac48..60c744915549 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -41,21 +41,21 @@ static const struct fimc_fmt fimc_isp_formats[FIMC_ISP_NUM_FORMATS] = {
41 .depth = { 8 }, 41 .depth = { 8 },
42 .color = FIMC_FMT_RAW8, 42 .color = FIMC_FMT_RAW8,
43 .memplanes = 1, 43 .memplanes = 1,
44 .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8, 44 .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
45 }, { 45 }, {
46 .name = "RAW10 (GRBG)", 46 .name = "RAW10 (GRBG)",
47 .fourcc = V4L2_PIX_FMT_SGRBG10, 47 .fourcc = V4L2_PIX_FMT_SGRBG10,
48 .depth = { 10 }, 48 .depth = { 10 },
49 .color = FIMC_FMT_RAW10, 49 .color = FIMC_FMT_RAW10,
50 .memplanes = 1, 50 .memplanes = 1,
51 .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10, 51 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
52 }, { 52 }, {
53 .name = "RAW12 (GRBG)", 53 .name = "RAW12 (GRBG)",
54 .fourcc = V4L2_PIX_FMT_SGRBG12, 54 .fourcc = V4L2_PIX_FMT_SGRBG12,
55 .depth = { 12 }, 55 .depth = { 12 },
56 .color = FIMC_FMT_RAW12, 56 .color = FIMC_FMT_RAW12,
57 .memplanes = 1, 57 .memplanes = 1,
58 .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12, 58 .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
59 }, 59 },
60}; 60};
61 61
@@ -149,7 +149,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
149 149
150 if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) { 150 if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
151 mf->colorspace = V4L2_COLORSPACE_JPEG; 151 mf->colorspace = V4L2_COLORSPACE_JPEG;
152 mf->code = V4L2_MBUS_FMT_YUV10_1X30; 152 mf->code = MEDIA_BUS_FMT_YUV10_1X30;
153 } 153 }
154 } 154 }
155 155
@@ -175,7 +175,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
175 FIMC_ISP_SINK_WIDTH_MAX, 0, 175 FIMC_ISP_SINK_WIDTH_MAX, 0,
176 &mf->height, FIMC_ISP_SINK_HEIGHT_MIN, 176 &mf->height, FIMC_ISP_SINK_HEIGHT_MIN,
177 FIMC_ISP_SINK_HEIGHT_MAX, 0, 0); 177 FIMC_ISP_SINK_HEIGHT_MAX, 0, 0);
178 mf->code = V4L2_MBUS_FMT_SGRBG10_1X10; 178 mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
179 } else { 179 } else {
180 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) 180 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
181 format = v4l2_subdev_get_try_format(fh, 181 format = v4l2_subdev_get_try_format(fh,
@@ -188,7 +188,7 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
188 mf->height = format->height - FIMC_ISP_CAC_MARGIN_HEIGHT; 188 mf->height = format->height - FIMC_ISP_CAC_MARGIN_HEIGHT;
189 189
190 if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) { 190 if (fmt->pad == FIMC_ISP_SD_PAD_SRC_FIFO) {
191 mf->code = V4L2_MBUS_FMT_YUV10_1X30; 191 mf->code = MEDIA_BUS_FMT_YUV10_1X30;
192 mf->colorspace = V4L2_COLORSPACE_JPEG; 192 mf->colorspace = V4L2_COLORSPACE_JPEG;
193 } else { 193 } else {
194 mf->code = format->code; 194 mf->code = format->code;
@@ -680,11 +680,11 @@ static void __isp_subdev_set_default_format(struct fimc_isp *isp)
680 FIMC_ISP_CAC_MARGIN_WIDTH; 680 FIMC_ISP_CAC_MARGIN_WIDTH;
681 isp->sink_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT + 681 isp->sink_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT +
682 FIMC_ISP_CAC_MARGIN_HEIGHT; 682 FIMC_ISP_CAC_MARGIN_HEIGHT;
683 isp->sink_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10; 683 isp->sink_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
684 684
685 isp->src_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH; 685 isp->src_fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
686 isp->src_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT; 686 isp->src_fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
687 isp->src_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10; 687 isp->src_fmt.code = MEDIA_BUS_FMT_SGRBG10_1X10;
688 __is_set_frame_size(is, &isp->src_fmt); 688 __is_set_frame_size(is, &isp->src_fmt);
689} 689}
690 690
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.c b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
index bc3ec7d25a32..0477716a20db 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.c
@@ -112,24 +112,24 @@ void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
112} 112}
113 113
114static const u32 src_pixfmt_map[8][3] = { 114static const u32 src_pixfmt_map[8][3] = {
115 { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR, 115 { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
116 FLITE_REG_CIGCTRL_YUV422_1P }, 116 FLITE_REG_CIGCTRL_YUV422_1P },
117 { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB, 117 { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
118 FLITE_REG_CIGCTRL_YUV422_1P }, 118 FLITE_REG_CIGCTRL_YUV422_1P },
119 { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY, 119 { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
120 FLITE_REG_CIGCTRL_YUV422_1P }, 120 FLITE_REG_CIGCTRL_YUV422_1P },
121 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY, 121 { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
122 FLITE_REG_CIGCTRL_YUV422_1P }, 122 FLITE_REG_CIGCTRL_YUV422_1P },
123 { V4L2_MBUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 }, 123 { MEDIA_BUS_FMT_SGRBG8_1X8, 0, FLITE_REG_CIGCTRL_RAW8 },
124 { V4L2_MBUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 }, 124 { MEDIA_BUS_FMT_SGRBG10_1X10, 0, FLITE_REG_CIGCTRL_RAW10 },
125 { V4L2_MBUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 }, 125 { MEDIA_BUS_FMT_SGRBG12_1X12, 0, FLITE_REG_CIGCTRL_RAW12 },
126 { V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) }, 126 { MEDIA_BUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
127}; 127};
128 128
129/* Set camera input pixel format and resolution */ 129/* Set camera input pixel format and resolution */
130void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) 130void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
131{ 131{
132 enum v4l2_mbus_pixelcode pixelcode = f->fmt->mbus_code; 132 u32 pixelcode = f->fmt->mbus_code;
133 int i = ARRAY_SIZE(src_pixfmt_map); 133 int i = ARRAY_SIZE(src_pixfmt_map);
134 u32 cfg; 134 u32 cfg;
135 135
@@ -232,10 +232,10 @@ static void flite_hw_set_pack12(struct fimc_lite *dev, int on)
232static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f) 232static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
233{ 233{
234 static const u32 pixcode[4][2] = { 234 static const u32 pixcode[4][2] = {
235 { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR }, 235 { MEDIA_BUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
236 { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB }, 236 { MEDIA_BUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
237 { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY }, 237 { MEDIA_BUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
238 { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, 238 { MEDIA_BUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
239 }; 239 };
240 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); 240 u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
241 int i = ARRAY_SIZE(pixcode); 241 int i = ARRAY_SIZE(pixcode);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index a97d2352f1d7..7e8687338155 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -48,7 +48,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
48 .depth = { 16 }, 48 .depth = { 16 },
49 .color = FIMC_FMT_YCBYCR422, 49 .color = FIMC_FMT_YCBYCR422,
50 .memplanes = 1, 50 .memplanes = 1,
51 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 51 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
52 .flags = FMT_FLAGS_YUV, 52 .flags = FMT_FLAGS_YUV,
53 }, { 53 }, {
54 .name = "YUV 4:2:2 packed, CbYCrY", 54 .name = "YUV 4:2:2 packed, CbYCrY",
@@ -57,7 +57,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
57 .depth = { 16 }, 57 .depth = { 16 },
58 .color = FIMC_FMT_CBYCRY422, 58 .color = FIMC_FMT_CBYCRY422,
59 .memplanes = 1, 59 .memplanes = 1,
60 .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8, 60 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
61 .flags = FMT_FLAGS_YUV, 61 .flags = FMT_FLAGS_YUV,
62 }, { 62 }, {
63 .name = "YUV 4:2:2 packed, CrYCbY", 63 .name = "YUV 4:2:2 packed, CrYCbY",
@@ -66,7 +66,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
66 .depth = { 16 }, 66 .depth = { 16 },
67 .color = FIMC_FMT_CRYCBY422, 67 .color = FIMC_FMT_CRYCBY422,
68 .memplanes = 1, 68 .memplanes = 1,
69 .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8, 69 .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
70 .flags = FMT_FLAGS_YUV, 70 .flags = FMT_FLAGS_YUV,
71 }, { 71 }, {
72 .name = "YUV 4:2:2 packed, YCrYCb", 72 .name = "YUV 4:2:2 packed, YCrYCb",
@@ -75,7 +75,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
75 .depth = { 16 }, 75 .depth = { 16 },
76 .color = FIMC_FMT_YCRYCB422, 76 .color = FIMC_FMT_YCRYCB422,
77 .memplanes = 1, 77 .memplanes = 1,
78 .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8, 78 .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
79 .flags = FMT_FLAGS_YUV, 79 .flags = FMT_FLAGS_YUV,
80 }, { 80 }, {
81 .name = "RAW8 (GRBG)", 81 .name = "RAW8 (GRBG)",
@@ -84,7 +84,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
84 .depth = { 8 }, 84 .depth = { 8 },
85 .color = FIMC_FMT_RAW8, 85 .color = FIMC_FMT_RAW8,
86 .memplanes = 1, 86 .memplanes = 1,
87 .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8, 87 .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
88 .flags = FMT_FLAGS_RAW_BAYER, 88 .flags = FMT_FLAGS_RAW_BAYER,
89 }, { 89 }, {
90 .name = "RAW10 (GRBG)", 90 .name = "RAW10 (GRBG)",
@@ -93,7 +93,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
93 .depth = { 16 }, 93 .depth = { 16 },
94 .color = FIMC_FMT_RAW10, 94 .color = FIMC_FMT_RAW10,
95 .memplanes = 1, 95 .memplanes = 1,
96 .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10, 96 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
97 .flags = FMT_FLAGS_RAW_BAYER, 97 .flags = FMT_FLAGS_RAW_BAYER,
98 }, { 98 }, {
99 .name = "RAW12 (GRBG)", 99 .name = "RAW12 (GRBG)",
@@ -102,7 +102,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
102 .depth = { 16 }, 102 .depth = { 16 },
103 .color = FIMC_FMT_RAW12, 103 .color = FIMC_FMT_RAW12,
104 .memplanes = 1, 104 .memplanes = 1,
105 .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12, 105 .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
106 .flags = FMT_FLAGS_RAW_BAYER, 106 .flags = FMT_FLAGS_RAW_BAYER,
107 }, 107 },
108}; 108};
@@ -1588,7 +1588,7 @@ err_clk_put:
1588 return ret; 1588 return ret;
1589} 1589}
1590 1590
1591#ifdef CONFIG_PM_RUNTIME 1591#ifdef CONFIG_PM
1592static int fimc_lite_runtime_resume(struct device *dev) 1592static int fimc_lite_runtime_resume(struct device *dev)
1593{ 1593{
1594 struct fimc_lite *fimc = dev_get_drvdata(dev); 1594 struct fimc_lite *fimc = dev_get_drvdata(dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c
index 2d77fd8f440a..df0cbcb69b6b 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-reg.c
@@ -592,10 +592,10 @@ struct mbus_pixfmt_desc {
592}; 592};
593 593
594static const struct mbus_pixfmt_desc pix_desc[] = { 594static const struct mbus_pixfmt_desc pix_desc[] = {
595 { V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 }, 595 { MEDIA_BUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
596 { V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 }, 596 { MEDIA_BUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
597 { V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 }, 597 { MEDIA_BUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
598 { V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 }, 598 { MEDIA_BUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
599}; 599};
600 600
601int fimc_hw_set_camera_source(struct fimc_dev *fimc, 601int fimc_hw_set_camera_source(struct fimc_dev *fimc,
@@ -689,11 +689,11 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
689 689
690 /* TODO: add remaining supported formats. */ 690 /* TODO: add remaining supported formats. */
691 switch (vid_cap->ci_fmt.code) { 691 switch (vid_cap->ci_fmt.code) {
692 case V4L2_MBUS_FMT_VYUY8_2X8: 692 case MEDIA_BUS_FMT_VYUY8_2X8:
693 tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT; 693 tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
694 break; 694 break;
695 case V4L2_MBUS_FMT_JPEG_1X8: 695 case MEDIA_BUS_FMT_JPEG_1X8:
696 case V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8: 696 case MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8:
697 tmp = FIMC_REG_CSIIMGFMT_USER(1); 697 tmp = FIMC_REG_CSIIMGFMT_USER(1);
698 cfg |= FIMC_REG_CIGCTRL_CAM_JPEG; 698 cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
699 break; 699 break;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index db6fd14d1936..9e1056d60c7e 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -238,34 +238,34 @@ struct csis_state {
238 */ 238 */
239struct csis_pix_format { 239struct csis_pix_format {
240 unsigned int pix_width_alignment; 240 unsigned int pix_width_alignment;
241 enum v4l2_mbus_pixelcode code; 241 u32 code;
242 u32 fmt_reg; 242 u32 fmt_reg;
243 u8 data_alignment; 243 u8 data_alignment;
244}; 244};
245 245
246static const struct csis_pix_format s5pcsis_formats[] = { 246static const struct csis_pix_format s5pcsis_formats[] = {
247 { 247 {
248 .code = V4L2_MBUS_FMT_VYUY8_2X8, 248 .code = MEDIA_BUS_FMT_VYUY8_2X8,
249 .fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT, 249 .fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
250 .data_alignment = 32, 250 .data_alignment = 32,
251 }, { 251 }, {
252 .code = V4L2_MBUS_FMT_JPEG_1X8, 252 .code = MEDIA_BUS_FMT_JPEG_1X8,
253 .fmt_reg = S5PCSIS_CFG_FMT_USER(1), 253 .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
254 .data_alignment = 32, 254 .data_alignment = 32,
255 }, { 255 }, {
256 .code = V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8, 256 .code = MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
257 .fmt_reg = S5PCSIS_CFG_FMT_USER(1), 257 .fmt_reg = S5PCSIS_CFG_FMT_USER(1),
258 .data_alignment = 32, 258 .data_alignment = 32,
259 }, { 259 }, {
260 .code = V4L2_MBUS_FMT_SGRBG8_1X8, 260 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
261 .fmt_reg = S5PCSIS_CFG_FMT_RAW8, 261 .fmt_reg = S5PCSIS_CFG_FMT_RAW8,
262 .data_alignment = 24, 262 .data_alignment = 24,
263 }, { 263 }, {
264 .code = V4L2_MBUS_FMT_SGRBG10_1X10, 264 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
265 .fmt_reg = S5PCSIS_CFG_FMT_RAW10, 265 .fmt_reg = S5PCSIS_CFG_FMT_RAW10,
266 .data_alignment = 24, 266 .data_alignment = 24,
267 }, { 267 }, {
268 .code = V4L2_MBUS_FMT_SGRBG12_1X12, 268 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
269 .fmt_reg = S5PCSIS_CFG_FMT_RAW12, 269 .fmt_reg = S5PCSIS_CFG_FMT_RAW12,
270 .data_alignment = 24, 270 .data_alignment = 24,
271 } 271 }
@@ -978,7 +978,7 @@ static int s5pcsis_resume(struct device *dev)
978} 978}
979#endif 979#endif
980 980
981#ifdef CONFIG_PM_RUNTIME 981#ifdef CONFIG_PM
982static int s5pcsis_runtime_suspend(struct device *dev) 982static int s5pcsis_runtime_suspend(struct device *dev)
983{ 983{
984 return s5pcsis_pm_suspend(dev, true); 984 return s5pcsis_pm_suspend(dev, true);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index d5dc198502ef..8afee3c17c1c 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -604,10 +604,11 @@ static int vidioc_querycap(struct file *file, void *priv,
604{ 604{
605 strcpy(cap->driver, "viu"); 605 strcpy(cap->driver, "viu");
606 strcpy(cap->card, "viu"); 606 strcpy(cap->card, "viu");
607 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 607 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
608 V4L2_CAP_STREAMING | 608 V4L2_CAP_STREAMING |
609 V4L2_CAP_VIDEO_OVERLAY | 609 V4L2_CAP_VIDEO_OVERLAY |
610 V4L2_CAP_READWRITE; 610 V4L2_CAP_READWRITE;
611 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
611 return 0; 612 return 0;
612} 613}
613 614
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 7a86c77bffa0..193373ff268d 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -106,61 +106,61 @@ static struct mcam_format_struct {
106 __u32 pixelformat; 106 __u32 pixelformat;
107 int bpp; /* Bytes per pixel */ 107 int bpp; /* Bytes per pixel */
108 bool planar; 108 bool planar;
109 enum v4l2_mbus_pixelcode mbus_code; 109 u32 mbus_code;
110} mcam_formats[] = { 110} mcam_formats[] = {
111 { 111 {
112 .desc = "YUYV 4:2:2", 112 .desc = "YUYV 4:2:2",
113 .pixelformat = V4L2_PIX_FMT_YUYV, 113 .pixelformat = V4L2_PIX_FMT_YUYV,
114 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 114 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
115 .bpp = 2, 115 .bpp = 2,
116 .planar = false, 116 .planar = false,
117 }, 117 },
118 { 118 {
119 .desc = "UYVY 4:2:2", 119 .desc = "UYVY 4:2:2",
120 .pixelformat = V4L2_PIX_FMT_UYVY, 120 .pixelformat = V4L2_PIX_FMT_UYVY,
121 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 121 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
122 .bpp = 2, 122 .bpp = 2,
123 .planar = false, 123 .planar = false,
124 }, 124 },
125 { 125 {
126 .desc = "YUV 4:2:2 PLANAR", 126 .desc = "YUV 4:2:2 PLANAR",
127 .pixelformat = V4L2_PIX_FMT_YUV422P, 127 .pixelformat = V4L2_PIX_FMT_YUV422P,
128 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 128 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
129 .bpp = 2, 129 .bpp = 2,
130 .planar = true, 130 .planar = true,
131 }, 131 },
132 { 132 {
133 .desc = "YUV 4:2:0 PLANAR", 133 .desc = "YUV 4:2:0 PLANAR",
134 .pixelformat = V4L2_PIX_FMT_YUV420, 134 .pixelformat = V4L2_PIX_FMT_YUV420,
135 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 135 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
136 .bpp = 2, 136 .bpp = 2,
137 .planar = true, 137 .planar = true,
138 }, 138 },
139 { 139 {
140 .desc = "YVU 4:2:0 PLANAR", 140 .desc = "YVU 4:2:0 PLANAR",
141 .pixelformat = V4L2_PIX_FMT_YVU420, 141 .pixelformat = V4L2_PIX_FMT_YVU420,
142 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 142 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
143 .bpp = 2, 143 .bpp = 2,
144 .planar = true, 144 .planar = true,
145 }, 145 },
146 { 146 {
147 .desc = "RGB 444", 147 .desc = "RGB 444",
148 .pixelformat = V4L2_PIX_FMT_RGB444, 148 .pixelformat = V4L2_PIX_FMT_RGB444,
149 .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE, 149 .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
150 .bpp = 2, 150 .bpp = 2,
151 .planar = false, 151 .planar = false,
152 }, 152 },
153 { 153 {
154 .desc = "RGB 565", 154 .desc = "RGB 565",
155 .pixelformat = V4L2_PIX_FMT_RGB565, 155 .pixelformat = V4L2_PIX_FMT_RGB565,
156 .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE, 156 .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
157 .bpp = 2, 157 .bpp = 2,
158 .planar = false, 158 .planar = false,
159 }, 159 },
160 { 160 {
161 .desc = "Raw RGB Bayer", 161 .desc = "Raw RGB Bayer",
162 .pixelformat = V4L2_PIX_FMT_SBGGR8, 162 .pixelformat = V4L2_PIX_FMT_SBGGR8,
163 .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8, 163 .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
164 .bpp = 1, 164 .bpp = 1,
165 .planar = false, 165 .planar = false,
166 }, 166 },
@@ -190,8 +190,7 @@ static const struct v4l2_pix_format mcam_def_pix_format = {
190 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2, 190 .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
191}; 191};
192 192
193static const enum v4l2_mbus_pixelcode mcam_def_mbus_code = 193static const u32 mcam_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
194 V4L2_MBUS_FMT_YUYV8_2X8;
195 194
196 195
197/* 196/*
@@ -1080,6 +1079,8 @@ static int mcam_vb_queue_setup(struct vb2_queue *vq,
1080 *nbufs = minbufs; 1079 *nbufs = minbufs;
1081 if (cam->buffer_mode == B_DMA_contig) 1080 if (cam->buffer_mode == B_DMA_contig)
1082 alloc_ctxs[0] = cam->vb_alloc_ctx; 1081 alloc_ctxs[0] = cam->vb_alloc_ctx;
1082 else if (cam->buffer_mode == B_DMA_sg)
1083 alloc_ctxs[0] = cam->vb_alloc_ctx_sg;
1083 return 0; 1084 return 0;
1084} 1085}
1085 1086
@@ -1101,26 +1102,6 @@ static void mcam_vb_buf_queue(struct vb2_buffer *vb)
1101 mcam_read_setup(cam); 1102 mcam_read_setup(cam);
1102} 1103}
1103 1104
1104
1105/*
1106 * vb2 uses these to release the mutex when waiting in dqbuf. I'm
1107 * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
1108 * to be called with the mutex held), but better safe than sorry.
1109 */
1110static void mcam_vb_wait_prepare(struct vb2_queue *vq)
1111{
1112 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1113
1114 mutex_unlock(&cam->s_mutex);
1115}
1116
1117static void mcam_vb_wait_finish(struct vb2_queue *vq)
1118{
1119 struct mcam_camera *cam = vb2_get_drv_priv(vq);
1120
1121 mutex_lock(&cam->s_mutex);
1122}
1123
1124/* 1105/*
1125 * These need to be called with the mutex held from vb2 1106 * These need to be called with the mutex held from vb2
1126 */ 1107 */
@@ -1190,8 +1171,8 @@ static const struct vb2_ops mcam_vb2_ops = {
1190 .buf_queue = mcam_vb_buf_queue, 1171 .buf_queue = mcam_vb_buf_queue,
1191 .start_streaming = mcam_vb_start_streaming, 1172 .start_streaming = mcam_vb_start_streaming,
1192 .stop_streaming = mcam_vb_stop_streaming, 1173 .stop_streaming = mcam_vb_stop_streaming,
1193 .wait_prepare = mcam_vb_wait_prepare, 1174 .wait_prepare = vb2_ops_wait_prepare,
1194 .wait_finish = mcam_vb_wait_finish, 1175 .wait_finish = vb2_ops_wait_finish,
1195}; 1176};
1196 1177
1197 1178
@@ -1219,17 +1200,12 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
1219static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) 1200static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1220{ 1201{
1221 struct mcam_vb_buffer *mvb = vb_to_mvb(vb); 1202 struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
1222 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1223 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); 1203 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1224 struct mcam_dma_desc *desc = mvb->dma_desc; 1204 struct mcam_dma_desc *desc = mvb->dma_desc;
1225 struct scatterlist *sg; 1205 struct scatterlist *sg;
1226 int i; 1206 int i;
1227 1207
1228 mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, 1208 for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
1229 sg_table->nents, DMA_FROM_DEVICE);
1230 if (mvb->dma_desc_nent <= 0)
1231 return -EIO; /* Not sure what's right here */
1232 for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
1233 desc->dma_addr = sg_dma_address(sg); 1209 desc->dma_addr = sg_dma_address(sg);
1234 desc->segment_len = sg_dma_len(sg); 1210 desc->segment_len = sg_dma_len(sg);
1235 desc++; 1211 desc++;
@@ -1237,16 +1213,6 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
1237 return 0; 1213 return 0;
1238} 1214}
1239 1215
1240static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
1241{
1242 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
1243 struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
1244
1245 if (sg_table)
1246 dma_unmap_sg(cam->dev, sg_table->sgl,
1247 sg_table->nents, DMA_FROM_DEVICE);
1248}
1249
1250static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb) 1216static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
1251{ 1217{
1252 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); 1218 struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
@@ -1263,12 +1229,11 @@ static const struct vb2_ops mcam_vb2_sg_ops = {
1263 .buf_init = mcam_vb_sg_buf_init, 1229 .buf_init = mcam_vb_sg_buf_init,
1264 .buf_prepare = mcam_vb_sg_buf_prepare, 1230 .buf_prepare = mcam_vb_sg_buf_prepare,
1265 .buf_queue = mcam_vb_buf_queue, 1231 .buf_queue = mcam_vb_buf_queue,
1266 .buf_finish = mcam_vb_sg_buf_finish,
1267 .buf_cleanup = mcam_vb_sg_buf_cleanup, 1232 .buf_cleanup = mcam_vb_sg_buf_cleanup,
1268 .start_streaming = mcam_vb_start_streaming, 1233 .start_streaming = mcam_vb_start_streaming,
1269 .stop_streaming = mcam_vb_stop_streaming, 1234 .stop_streaming = mcam_vb_stop_streaming,
1270 .wait_prepare = mcam_vb_wait_prepare, 1235 .wait_prepare = vb2_ops_wait_prepare,
1271 .wait_finish = mcam_vb_wait_finish, 1236 .wait_finish = vb2_ops_wait_finish,
1272}; 1237};
1273 1238
1274#endif /* MCAM_MODE_DMA_SG */ 1239#endif /* MCAM_MODE_DMA_SG */
@@ -1280,6 +1245,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
1280 memset(vq, 0, sizeof(*vq)); 1245 memset(vq, 0, sizeof(*vq));
1281 vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1246 vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1282 vq->drv_priv = cam; 1247 vq->drv_priv = cam;
1248 vq->lock = &cam->s_mutex;
1283 INIT_LIST_HEAD(&cam->buffers); 1249 INIT_LIST_HEAD(&cam->buffers);
1284 switch (cam->buffer_mode) { 1250 switch (cam->buffer_mode) {
1285 case B_DMA_contig: 1251 case B_DMA_contig:
@@ -1287,10 +1253,12 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
1287 vq->ops = &mcam_vb2_ops; 1253 vq->ops = &mcam_vb2_ops;
1288 vq->mem_ops = &vb2_dma_contig_memops; 1254 vq->mem_ops = &vb2_dma_contig_memops;
1289 vq->buf_struct_size = sizeof(struct mcam_vb_buffer); 1255 vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
1290 cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
1291 vq->io_modes = VB2_MMAP | VB2_USERPTR; 1256 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1292 cam->dma_setup = mcam_ctlr_dma_contig; 1257 cam->dma_setup = mcam_ctlr_dma_contig;
1293 cam->frame_complete = mcam_dma_contig_done; 1258 cam->frame_complete = mcam_dma_contig_done;
1259 cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
1260 if (IS_ERR(cam->vb_alloc_ctx))
1261 return PTR_ERR(cam->vb_alloc_ctx);
1294#endif 1262#endif
1295 break; 1263 break;
1296 case B_DMA_sg: 1264 case B_DMA_sg:
@@ -1301,6 +1269,9 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
1301 vq->io_modes = VB2_MMAP | VB2_USERPTR; 1269 vq->io_modes = VB2_MMAP | VB2_USERPTR;
1302 cam->dma_setup = mcam_ctlr_dma_sg; 1270 cam->dma_setup = mcam_ctlr_dma_sg;
1303 cam->frame_complete = mcam_dma_sg_done; 1271 cam->frame_complete = mcam_dma_sg_done;
1272 cam->vb_alloc_ctx_sg = vb2_dma_sg_init_ctx(cam->dev);
1273 if (IS_ERR(cam->vb_alloc_ctx_sg))
1274 return PTR_ERR(cam->vb_alloc_ctx_sg);
1304#endif 1275#endif
1305 break; 1276 break;
1306 case B_vmalloc: 1277 case B_vmalloc:
@@ -1326,6 +1297,10 @@ static void mcam_cleanup_vb2(struct mcam_camera *cam)
1326 if (cam->buffer_mode == B_DMA_contig) 1297 if (cam->buffer_mode == B_DMA_contig)
1327 vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx); 1298 vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
1328#endif 1299#endif
1300#ifdef MCAM_MODE_DMA_SG
1301 if (cam->buffer_mode == B_DMA_sg)
1302 vb2_dma_sg_cleanup_ctx(cam->vb_alloc_ctx_sg);
1303#endif
1329} 1304}
1330 1305
1331 1306
@@ -1414,9 +1389,9 @@ static int mcam_vidioc_querycap(struct file *file, void *priv,
1414{ 1389{
1415 strcpy(cap->driver, "marvell_ccic"); 1390 strcpy(cap->driver, "marvell_ccic");
1416 strcpy(cap->card, "marvell_ccic"); 1391 strcpy(cap->card, "marvell_ccic");
1417 cap->version = 1; 1392 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
1418 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1419 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; 1393 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
1394 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1420 return 0; 1395 return 0;
1421} 1396}
1422 1397
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index e0e628cb98f9..aa0c6eac254a 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -176,6 +176,7 @@ struct mcam_camera {
176 /* DMA buffers - DMA modes */ 176 /* DMA buffers - DMA modes */
177 struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS]; 177 struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
178 struct vb2_alloc_ctx *vb_alloc_ctx; 178 struct vb2_alloc_ctx *vb_alloc_ctx;
179 struct vb2_alloc_ctx *vb_alloc_ctx_sg;
179 180
180 /* Mode-specific ops, set at open time */ 181 /* Mode-specific ops, set at open time */
181 void (*dma_setup)(struct mcam_camera *cam); 182 void (*dma_setup)(struct mcam_camera *cam);
@@ -183,7 +184,7 @@ struct mcam_camera {
183 184
184 /* Current operating parameters */ 185 /* Current operating parameters */
185 struct v4l2_pix_format pix_format; 186 struct v4l2_pix_format pix_format;
186 enum v4l2_mbus_pixelcode mbus_code; 187 u32 mbus_code;
187 188
188 /* Locks */ 189 /* Locks */
189 struct mutex s_mutex; /* Access to this structure */ 190 struct mutex s_mutex; /* Access to this structure */
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4971ff21f82b..f923d1bc43a0 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -402,13 +402,8 @@ static int vidioc_querycap(struct file *file, void *priv,
402{ 402{
403 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); 403 strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
404 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); 404 strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
405 /* 405 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
406 * This is only a mem-to-mem video device. The capture and output 406 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
407 * device capability flags are left only for backward compatibility
408 * and are scheduled for removal.
409 */
410 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
411 V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
412 return 0; 407 return 0;
413} 408}
414 409
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 05de442d24e4..dc2aaab54aef 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -3,7 +3,8 @@ config VIDEO_OMAP2_VOUT_VRFB
3 3
4config VIDEO_OMAP2_VOUT 4config VIDEO_OMAP2_VOUT
5 tristate "OMAP2/OMAP3 V4L2-Display driver" 5 tristate "OMAP2/OMAP3 V4L2-Display driver"
6 depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && HAS_MMU) 6 depends on MMU
7 depends on ARCH_OMAP2 || ARCH_OMAP3
7 select VIDEOBUF_GEN 8 select VIDEOBUF_GEN
8 select VIDEOBUF_DMA_CONTIG 9 select VIDEOBUF_DMA_CONTIG
9 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS 10 select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 64ab6fb06b9c..ba2d8f973d58 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -198,7 +198,7 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
198 * omap_vout_uservirt_to_phys: This inline function is used to convert user 198 * omap_vout_uservirt_to_phys: This inline function is used to convert user
199 * space virtual address to physical address. 199 * space virtual address to physical address.
200 */ 200 */
201static u32 omap_vout_uservirt_to_phys(u32 virtp) 201static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
202{ 202{
203 unsigned long physp = 0; 203 unsigned long physp = 0;
204 struct vm_area_struct *vma; 204 struct vm_area_struct *vma;
@@ -418,10 +418,10 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
418 } 418 }
419 419
420 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, 420 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
421 "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n" 421 "%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
422 "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n" 422 "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
423 "out_height=%d rotation_type=%d screen_width=%d\n", 423 "out_height=%d rotation_type=%d screen_width=%d\n",
424 __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height, 424 __func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
425 info.color_mode, info.rotation, info.mirror, info.pos_x, 425 info.color_mode, info.rotation, info.mirror, info.pos_x,
426 info.pos_y, info.out_width, info.out_height, info.rotation_type, 426 info.pos_y, info.out_width, info.out_height, info.rotation_type,
427 info.screen_width); 427 info.screen_width);
@@ -794,7 +794,7 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
794 vout->queued_buf_addr[vb->i] = (u8 *) 794 vout->queued_buf_addr[vb->i] = (u8 *)
795 omap_vout_uservirt_to_phys(vb->baddr); 795 omap_vout_uservirt_to_phys(vb->baddr);
796 } else { 796 } else {
797 u32 addr, dma_addr; 797 unsigned long addr, dma_addr;
798 unsigned long size; 798 unsigned long size;
799 799
800 addr = (unsigned long) vout->buf_virt_addr[vb->i]; 800 addr = (unsigned long) vout->buf_virt_addr[vb->i];
@@ -1054,8 +1054,9 @@ static int vidioc_querycap(struct file *file, void *fh,
1054 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); 1054 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
1055 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card)); 1055 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
1056 cap->bus_info[0] = '\0'; 1056 cap->bus_info[0] = '\0';
1057 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT | 1057 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
1058 V4L2_CAP_VIDEO_OUTPUT_OVERLAY; 1058 V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
1059 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1059 1060
1060 return 0; 1061 return 0;
1061} 1062}
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 81a9dc053d58..587489a072d5 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -36,23 +36,23 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
36 unsigned int pad, enum v4l2_subdev_format_whence which); 36 unsigned int pad, enum v4l2_subdev_format_whence which);
37 37
38static const unsigned int ccdc_fmts[] = { 38static const unsigned int ccdc_fmts[] = {
39 V4L2_MBUS_FMT_Y8_1X8, 39 MEDIA_BUS_FMT_Y8_1X8,
40 V4L2_MBUS_FMT_Y10_1X10, 40 MEDIA_BUS_FMT_Y10_1X10,
41 V4L2_MBUS_FMT_Y12_1X12, 41 MEDIA_BUS_FMT_Y12_1X12,
42 V4L2_MBUS_FMT_SGRBG8_1X8, 42 MEDIA_BUS_FMT_SGRBG8_1X8,
43 V4L2_MBUS_FMT_SRGGB8_1X8, 43 MEDIA_BUS_FMT_SRGGB8_1X8,
44 V4L2_MBUS_FMT_SBGGR8_1X8, 44 MEDIA_BUS_FMT_SBGGR8_1X8,
45 V4L2_MBUS_FMT_SGBRG8_1X8, 45 MEDIA_BUS_FMT_SGBRG8_1X8,
46 V4L2_MBUS_FMT_SGRBG10_1X10, 46 MEDIA_BUS_FMT_SGRBG10_1X10,
47 V4L2_MBUS_FMT_SRGGB10_1X10, 47 MEDIA_BUS_FMT_SRGGB10_1X10,
48 V4L2_MBUS_FMT_SBGGR10_1X10, 48 MEDIA_BUS_FMT_SBGGR10_1X10,
49 V4L2_MBUS_FMT_SGBRG10_1X10, 49 MEDIA_BUS_FMT_SGBRG10_1X10,
50 V4L2_MBUS_FMT_SGRBG12_1X12, 50 MEDIA_BUS_FMT_SGRBG12_1X12,
51 V4L2_MBUS_FMT_SRGGB12_1X12, 51 MEDIA_BUS_FMT_SRGGB12_1X12,
52 V4L2_MBUS_FMT_SBGGR12_1X12, 52 MEDIA_BUS_FMT_SBGGR12_1X12,
53 V4L2_MBUS_FMT_SGBRG12_1X12, 53 MEDIA_BUS_FMT_SGBRG12_1X12,
54 V4L2_MBUS_FMT_YUYV8_2X8, 54 MEDIA_BUS_FMT_YUYV8_2X8,
55 V4L2_MBUS_FMT_UYVY8_2X8, 55 MEDIA_BUS_FMT_UYVY8_2X8,
56}; 56};
57 57
58/* 58/*
@@ -266,10 +266,10 @@ static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable)
266 __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK, 266 __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK,
267 V4L2_SUBDEV_FORMAT_ACTIVE); 267 V4L2_SUBDEV_FORMAT_ACTIVE);
268 268
269 if ((format->code != V4L2_MBUS_FMT_SGRBG10_1X10) && 269 if ((format->code != MEDIA_BUS_FMT_SGRBG10_1X10) &&
270 (format->code != V4L2_MBUS_FMT_SRGGB10_1X10) && 270 (format->code != MEDIA_BUS_FMT_SRGGB10_1X10) &&
271 (format->code != V4L2_MBUS_FMT_SBGGR10_1X10) && 271 (format->code != MEDIA_BUS_FMT_SBGGR10_1X10) &&
272 (format->code != V4L2_MBUS_FMT_SGBRG10_1X10)) 272 (format->code != MEDIA_BUS_FMT_SGBRG10_1X10))
273 return -EINVAL; 273 return -EINVAL;
274 274
275 if (enable) 275 if (enable)
@@ -971,8 +971,8 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
971 971
972 format = &ccdc->formats[CCDC_PAD_SINK]; 972 format = &ccdc->formats[CCDC_PAD_SINK];
973 973
974 if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 || 974 if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
975 format->code == V4L2_MBUS_FMT_UYVY8_2X8) { 975 format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
976 /* According to the OMAP3 TRM the input mode only affects SYNC 976 /* According to the OMAP3 TRM the input mode only affects SYNC
977 * mode, enabling BT.656 mode should take precedence. However, 977 * mode, enabling BT.656 mode should take precedence. However,
978 * in practice setting the input mode to YCbCr data on 8 bits 978 * in practice setting the input mode to YCbCr data on 8 bits
@@ -1020,7 +1020,7 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
1020 /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The 1020 /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
1021 * hardware seems to ignore it in all other input modes. 1021 * hardware seems to ignore it in all other input modes.
1022 */ 1022 */
1023 if (format->code == V4L2_MBUS_FMT_UYVY8_2X8) 1023 if (format->code == MEDIA_BUS_FMT_UYVY8_2X8)
1024 isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, 1024 isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
1025 ISPCCDC_CFG_Y8POS); 1025 ISPCCDC_CFG_Y8POS);
1026 else 1026 else
@@ -1168,9 +1168,9 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
1168 1168
1169 if (ccdc->bt656) 1169 if (ccdc->bt656)
1170 bridge = ISPCTRL_PAR_BRIDGE_DISABLE; 1170 bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
1171 else if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8) 1171 else if (fmt_info->code == MEDIA_BUS_FMT_YUYV8_2X8)
1172 bridge = ISPCTRL_PAR_BRIDGE_LENDIAN; 1172 bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
1173 else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8) 1173 else if (fmt_info->code == MEDIA_BUS_FMT_UYVY8_2X8)
1174 bridge = ISPCTRL_PAR_BRIDGE_BENDIAN; 1174 bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
1175 else 1175 else
1176 bridge = ISPCTRL_PAR_BRIDGE_DISABLE; 1176 bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
@@ -1199,16 +1199,16 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
1199 1199
1200 /* Mosaic filter */ 1200 /* Mosaic filter */
1201 switch (format->code) { 1201 switch (format->code) {
1202 case V4L2_MBUS_FMT_SRGGB10_1X10: 1202 case MEDIA_BUS_FMT_SRGGB10_1X10:
1203 case V4L2_MBUS_FMT_SRGGB12_1X12: 1203 case MEDIA_BUS_FMT_SRGGB12_1X12:
1204 ccdc_pattern = ccdc_srggb_pattern; 1204 ccdc_pattern = ccdc_srggb_pattern;
1205 break; 1205 break;
1206 case V4L2_MBUS_FMT_SBGGR10_1X10: 1206 case MEDIA_BUS_FMT_SBGGR10_1X10:
1207 case V4L2_MBUS_FMT_SBGGR12_1X12: 1207 case MEDIA_BUS_FMT_SBGGR12_1X12:
1208 ccdc_pattern = ccdc_sbggr_pattern; 1208 ccdc_pattern = ccdc_sbggr_pattern;
1209 break; 1209 break;
1210 case V4L2_MBUS_FMT_SGBRG10_1X10: 1210 case MEDIA_BUS_FMT_SGBRG10_1X10:
1211 case V4L2_MBUS_FMT_SGBRG12_1X12: 1211 case MEDIA_BUS_FMT_SGBRG12_1X12:
1212 ccdc_pattern = ccdc_sgbrg_pattern; 1212 ccdc_pattern = ccdc_sgbrg_pattern;
1213 break; 1213 break;
1214 default: 1214 default:
@@ -1267,7 +1267,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
1267 /* The CCDC outputs data in UYVY order by default. Swap bytes to get 1267 /* The CCDC outputs data in UYVY order by default. Swap bytes to get
1268 * YUYV. 1268 * YUYV.
1269 */ 1269 */
1270 if (format->code == V4L2_MBUS_FMT_YUYV8_1X16) 1270 if (format->code == MEDIA_BUS_FMT_YUYV8_1X16)
1271 isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, 1271 isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
1272 ISPCCDC_CFG_BSWD); 1272 ISPCCDC_CFG_BSWD);
1273 else 1273 else
@@ -1967,7 +1967,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
1967 enum v4l2_subdev_format_whence which) 1967 enum v4l2_subdev_format_whence which)
1968{ 1968{
1969 const struct isp_format_info *info; 1969 const struct isp_format_info *info;
1970 enum v4l2_mbus_pixelcode pixelcode; 1970 u32 pixelcode;
1971 unsigned int width = fmt->width; 1971 unsigned int width = fmt->width;
1972 unsigned int height = fmt->height; 1972 unsigned int height = fmt->height;
1973 struct v4l2_rect *crop; 1973 struct v4l2_rect *crop;
@@ -1983,7 +1983,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
1983 1983
1984 /* If not found, use SGRBG10 as default */ 1984 /* If not found, use SGRBG10 as default */
1985 if (i >= ARRAY_SIZE(ccdc_fmts)) 1985 if (i >= ARRAY_SIZE(ccdc_fmts))
1986 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 1986 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
1987 1987
1988 /* Clamp the input size. */ 1988 /* Clamp the input size. */
1989 fmt->width = clamp_t(u32, width, 32, 4096); 1989 fmt->width = clamp_t(u32, width, 32, 4096);
@@ -2007,19 +2007,19 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
2007 * configured to pack bytes in BT.656, hiding the inaccuracy. 2007 * configured to pack bytes in BT.656, hiding the inaccuracy.
2008 * In all cases bytes can be swapped. 2008 * In all cases bytes can be swapped.
2009 */ 2009 */
2010 if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 || 2010 if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
2011 fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) { 2011 fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) {
2012 /* Use the user requested format if YUV. */ 2012 /* Use the user requested format if YUV. */
2013 if (pixelcode == V4L2_MBUS_FMT_YUYV8_2X8 || 2013 if (pixelcode == MEDIA_BUS_FMT_YUYV8_2X8 ||
2014 pixelcode == V4L2_MBUS_FMT_UYVY8_2X8 || 2014 pixelcode == MEDIA_BUS_FMT_UYVY8_2X8 ||
2015 pixelcode == V4L2_MBUS_FMT_YUYV8_1X16 || 2015 pixelcode == MEDIA_BUS_FMT_YUYV8_1X16 ||
2016 pixelcode == V4L2_MBUS_FMT_UYVY8_1X16) 2016 pixelcode == MEDIA_BUS_FMT_UYVY8_1X16)
2017 fmt->code = pixelcode; 2017 fmt->code = pixelcode;
2018 2018
2019 if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8) 2019 if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8)
2020 fmt->code = V4L2_MBUS_FMT_YUYV8_1X16; 2020 fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
2021 else if (fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) 2021 else if (fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
2022 fmt->code = V4L2_MBUS_FMT_UYVY8_1X16; 2022 fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
2023 } 2023 }
2024 2024
2025 /* Hardcode the output size to the crop rectangle size. */ 2025 /* Hardcode the output size to the crop rectangle size. */
@@ -2047,8 +2047,8 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
2047 fmt->code = info->truncated; 2047 fmt->code = info->truncated;
2048 2048
2049 /* YUV formats are not supported by the video port. */ 2049 /* YUV formats are not supported by the video port. */
2050 if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 || 2050 if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
2051 fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) 2051 fmt->code == MEDIA_BUS_FMT_UYVY8_2X8)
2052 fmt->code = 0; 2052 fmt->code = 0;
2053 2053
2054 /* The number of lines that can be clocked out from the video 2054 /* The number of lines that can be clocked out from the video
@@ -2083,7 +2083,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
2083 * to keep the Bayer pattern. 2083 * to keep the Bayer pattern.
2084 */ 2084 */
2085 info = omap3isp_video_format_info(sink->code); 2085 info = omap3isp_video_format_info(sink->code);
2086 if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) { 2086 if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
2087 crop->left &= ~1; 2087 crop->left &= ~1;
2088 crop->top &= ~1; 2088 crop->top &= ~1;
2089 } 2089 }
@@ -2103,7 +2103,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
2103 sink->height - crop->top); 2103 sink->height - crop->top);
2104 2104
2105 /* Odd width/height values don't make sense for Bayer formats. */ 2105 /* Odd width/height values don't make sense for Bayer formats. */
2106 if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) { 2106 if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) {
2107 crop->width &= ~1; 2107 crop->width &= ~1;
2108 crop->height &= ~1; 2108 crop->height &= ~1;
2109 } 2109 }
@@ -2135,13 +2135,13 @@ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd,
2135 format = __ccdc_get_format(ccdc, fh, code->pad, 2135 format = __ccdc_get_format(ccdc, fh, code->pad,
2136 V4L2_SUBDEV_FORMAT_TRY); 2136 V4L2_SUBDEV_FORMAT_TRY);
2137 2137
2138 if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 || 2138 if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
2139 format->code == V4L2_MBUS_FMT_UYVY8_2X8) { 2139 format->code == MEDIA_BUS_FMT_UYVY8_2X8) {
2140 /* In YUV mode the CCDC can swap bytes. */ 2140 /* In YUV mode the CCDC can swap bytes. */
2141 if (code->index == 0) 2141 if (code->index == 0)
2142 code->code = V4L2_MBUS_FMT_YUYV8_1X16; 2142 code->code = MEDIA_BUS_FMT_YUYV8_1X16;
2143 else if (code->index == 1) 2143 else if (code->index == 1)
2144 code->code = V4L2_MBUS_FMT_UYVY8_1X16; 2144 code->code = MEDIA_BUS_FMT_UYVY8_1X16;
2145 else 2145 else
2146 return -EINVAL; 2146 return -EINVAL;
2147 } else { 2147 } else {
@@ -2383,9 +2383,7 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
2383 * return true if the combination is possible 2383 * return true if the combination is possible
2384 * return false otherwise 2384 * return false otherwise
2385 */ 2385 */
2386static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in, 2386static bool ccdc_is_shiftable(u32 in, u32 out, unsigned int additional_shift)
2387 enum v4l2_mbus_pixelcode out,
2388 unsigned int additional_shift)
2389{ 2387{
2390 const struct isp_format_info *in_info, *out_info; 2388 const struct isp_format_info *in_info, *out_info;
2391 2389
@@ -2452,7 +2450,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2452 memset(&format, 0, sizeof(format)); 2450 memset(&format, 0, sizeof(format));
2453 format.pad = CCDC_PAD_SINK; 2451 format.pad = CCDC_PAD_SINK;
2454 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 2452 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
2455 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 2453 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
2456 format.format.width = 4096; 2454 format.format.width = 4096;
2457 format.format.height = 4096; 2455 format.format.height = 4096;
2458 ccdc_set_format(sd, fh, &format); 2456 ccdc_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index 9cb49b3c04bd..f4aedb37e41e 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -289,10 +289,10 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
289 u32 val, format; 289 u32 val, format;
290 290
291 switch (config->format) { 291 switch (config->format) {
292 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 292 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
293 format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP; 293 format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP;
294 break; 294 break;
295 case V4L2_MBUS_FMT_SGRBG10_1X10: 295 case MEDIA_BUS_FMT_SGRBG10_1X10:
296 default: 296 default:
297 format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */ 297 format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */
298 break; 298 break;
@@ -438,7 +438,7 @@ static void ccp2_mem_configure(struct isp_ccp2_device *ccp2,
438 u32 val, hwords; 438 u32 val, hwords;
439 439
440 if (sink_pixcode != source_pixcode && 440 if (sink_pixcode != source_pixcode &&
441 sink_pixcode == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) 441 sink_pixcode == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
442 dpcm_decompress = 1; 442 dpcm_decompress = 1;
443 443
444 ccp2_pwr_cfg(ccp2); 444 ccp2_pwr_cfg(ccp2);
@@ -604,8 +604,8 @@ void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
604 */ 604 */
605 605
606static const unsigned int ccp2_fmts[] = { 606static const unsigned int ccp2_fmts[] = {
607 V4L2_MBUS_FMT_SGRBG10_1X10, 607 MEDIA_BUS_FMT_SGRBG10_1X10,
608 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 608 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
609}; 609};
610 610
611/* 611/*
@@ -643,8 +643,8 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
643 643
644 switch (pad) { 644 switch (pad) {
645 case CCP2_PAD_SINK: 645 case CCP2_PAD_SINK:
646 if (fmt->code != V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) 646 if (fmt->code != MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8)
647 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 647 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
648 648
649 if (ccp2->input == CCP2_INPUT_SENSOR) { 649 if (ccp2->input == CCP2_INPUT_SENSOR) {
650 fmt->width = clamp_t(u32, fmt->width, 650 fmt->width = clamp_t(u32, fmt->width,
@@ -671,7 +671,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
671 */ 671 */
672 format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which); 672 format = __ccp2_get_format(ccp2, fh, CCP2_PAD_SINK, which);
673 memcpy(fmt, format, sizeof(*fmt)); 673 memcpy(fmt, format, sizeof(*fmt));
674 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 674 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
675 break; 675 break;
676 } 676 }
677 677
@@ -808,7 +808,7 @@ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
808 memset(&format, 0, sizeof(format)); 808 memset(&format, 0, sizeof(format));
809 format.pad = CCP2_PAD_SINK; 809 format.pad = CCP2_PAD_SINK;
810 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 810 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
811 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 811 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
812 format.format.width = 4096; 812 format.format.width = 4096;
813 format.format.height = 4096; 813 format.format.height = 4096;
814 ccp2_set_format(sd, fh, &format); 814 ccp2_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6530b255f103..09c686d96ae8 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -78,15 +78,15 @@ static void csi2_recv_config(struct isp_device *isp,
78} 78}
79 79
80static const unsigned int csi2_input_fmts[] = { 80static const unsigned int csi2_input_fmts[] = {
81 V4L2_MBUS_FMT_SGRBG10_1X10, 81 MEDIA_BUS_FMT_SGRBG10_1X10,
82 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 82 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
83 V4L2_MBUS_FMT_SRGGB10_1X10, 83 MEDIA_BUS_FMT_SRGGB10_1X10,
84 V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 84 MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
85 V4L2_MBUS_FMT_SBGGR10_1X10, 85 MEDIA_BUS_FMT_SBGGR10_1X10,
86 V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 86 MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
87 V4L2_MBUS_FMT_SGBRG10_1X10, 87 MEDIA_BUS_FMT_SGBRG10_1X10,
88 V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 88 MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
89 V4L2_MBUS_FMT_YUYV8_2X8, 89 MEDIA_BUS_FMT_YUYV8_2X8,
90}; 90};
91 91
92/* To set the format on the CSI2 requires a mapping function that takes 92/* To set the format on the CSI2 requires a mapping function that takes
@@ -171,19 +171,19 @@ static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2)
171 int fmtidx, destidx, is_3630; 171 int fmtidx, destidx, is_3630;
172 172
173 switch (fmt->code) { 173 switch (fmt->code) {
174 case V4L2_MBUS_FMT_SGRBG10_1X10: 174 case MEDIA_BUS_FMT_SGRBG10_1X10:
175 case V4L2_MBUS_FMT_SRGGB10_1X10: 175 case MEDIA_BUS_FMT_SRGGB10_1X10:
176 case V4L2_MBUS_FMT_SBGGR10_1X10: 176 case MEDIA_BUS_FMT_SBGGR10_1X10:
177 case V4L2_MBUS_FMT_SGBRG10_1X10: 177 case MEDIA_BUS_FMT_SGBRG10_1X10:
178 fmtidx = 0; 178 fmtidx = 0;
179 break; 179 break;
180 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 180 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
181 case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8: 181 case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
182 case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8: 182 case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
183 case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8: 183 case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
184 fmtidx = 1; 184 fmtidx = 1;
185 break; 185 break;
186 case V4L2_MBUS_FMT_YUYV8_2X8: 186 case MEDIA_BUS_FMT_YUYV8_2X8:
187 fmtidx = 2; 187 fmtidx = 2;
188 break; 188 break;
189 default: 189 default:
@@ -843,7 +843,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
843 unsigned int pad, struct v4l2_mbus_framefmt *fmt, 843 unsigned int pad, struct v4l2_mbus_framefmt *fmt,
844 enum v4l2_subdev_format_whence which) 844 enum v4l2_subdev_format_whence which)
845{ 845{
846 enum v4l2_mbus_pixelcode pixelcode; 846 u32 pixelcode;
847 struct v4l2_mbus_framefmt *format; 847 struct v4l2_mbus_framefmt *format;
848 const struct isp_format_info *info; 848 const struct isp_format_info *info;
849 unsigned int i; 849 unsigned int i;
@@ -858,7 +858,7 @@ csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_fh *fh,
858 858
859 /* If not found, use SGRBG10 as default */ 859 /* If not found, use SGRBG10 as default */
860 if (i >= ARRAY_SIZE(csi2_input_fmts)) 860 if (i >= ARRAY_SIZE(csi2_input_fmts))
861 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 861 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
862 862
863 fmt->width = clamp_t(u32, fmt->width, 1, 8191); 863 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
864 fmt->height = clamp_t(u32, fmt->height, 1, 8191); 864 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1029,7 +1029,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1029 memset(&format, 0, sizeof(format)); 1029 memset(&format, 0, sizeof(format));
1030 format.pad = CSI2_PAD_SINK; 1030 format.pad = CSI2_PAD_SINK;
1031 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1031 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1032 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 1032 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
1033 format.format.width = 4096; 1033 format.format.width = 4096;
1034 format.format.height = 4096; 1034 format.format.height = 4096;
1035 csi2_set_format(sd, fh, &format); 1035 csi2_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 605f57ef0a49..dd9eed45d853 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -964,18 +964,16 @@ static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
964 * @prev: pointer to previewer private structure 964 * @prev: pointer to previewer private structure
965 * @pixelcode: pixel code 965 * @pixelcode: pixel code
966 */ 966 */
967static void 967static void preview_config_ycpos(struct isp_prev_device *prev, u32 pixelcode)
968preview_config_ycpos(struct isp_prev_device *prev,
969 enum v4l2_mbus_pixelcode pixelcode)
970{ 968{
971 struct isp_device *isp = to_isp_device(prev); 969 struct isp_device *isp = to_isp_device(prev);
972 enum preview_ycpos_mode mode; 970 enum preview_ycpos_mode mode;
973 971
974 switch (pixelcode) { 972 switch (pixelcode) {
975 case V4L2_MBUS_FMT_YUYV8_1X16: 973 case MEDIA_BUS_FMT_YUYV8_1X16:
976 mode = YCPOS_CrYCbY; 974 mode = YCPOS_CrYCbY;
977 break; 975 break;
978 case V4L2_MBUS_FMT_UYVY8_1X16: 976 case MEDIA_BUS_FMT_UYVY8_1X16:
979 mode = YCPOS_YCrYCb; 977 mode = YCPOS_YCrYCb;
980 break; 978 break;
981 default: 979 default:
@@ -1028,16 +1026,16 @@ static void preview_config_input_format(struct isp_prev_device *prev,
1028 ISPPRV_PCR_WIDTH); 1026 ISPPRV_PCR_WIDTH);
1029 1027
1030 switch (info->flavor) { 1028 switch (info->flavor) {
1031 case V4L2_MBUS_FMT_SGRBG8_1X8: 1029 case MEDIA_BUS_FMT_SGRBG8_1X8:
1032 prev->params.cfa_order = 0; 1030 prev->params.cfa_order = 0;
1033 break; 1031 break;
1034 case V4L2_MBUS_FMT_SRGGB8_1X8: 1032 case MEDIA_BUS_FMT_SRGGB8_1X8:
1035 prev->params.cfa_order = 1; 1033 prev->params.cfa_order = 1;
1036 break; 1034 break;
1037 case V4L2_MBUS_FMT_SBGGR8_1X8: 1035 case MEDIA_BUS_FMT_SBGGR8_1X8:
1038 prev->params.cfa_order = 2; 1036 prev->params.cfa_order = 2;
1039 break; 1037 break;
1040 case V4L2_MBUS_FMT_SGBRG8_1X8: 1038 case MEDIA_BUS_FMT_SGBRG8_1X8:
1041 prev->params.cfa_order = 3; 1039 prev->params.cfa_order = 3;
1042 break; 1040 break;
1043 default: 1041 default:
@@ -1078,8 +1076,8 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
1078 unsigned int elv = prev->crop.top + prev->crop.height - 1; 1076 unsigned int elv = prev->crop.top + prev->crop.height - 1;
1079 u32 features; 1077 u32 features;
1080 1078
1081 if (format->code != V4L2_MBUS_FMT_Y8_1X8 && 1079 if (format->code != MEDIA_BUS_FMT_Y8_1X8 &&
1082 format->code != V4L2_MBUS_FMT_Y10_1X10) { 1080 format->code != MEDIA_BUS_FMT_Y10_1X10) {
1083 sph -= 2; 1081 sph -= 2;
1084 eph += 2; 1082 eph += 2;
1085 slv -= 2; 1083 slv -= 2;
@@ -1709,21 +1707,21 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
1709 1707
1710/* previewer format descriptions */ 1708/* previewer format descriptions */
1711static const unsigned int preview_input_fmts[] = { 1709static const unsigned int preview_input_fmts[] = {
1712 V4L2_MBUS_FMT_Y8_1X8, 1710 MEDIA_BUS_FMT_Y8_1X8,
1713 V4L2_MBUS_FMT_SGRBG8_1X8, 1711 MEDIA_BUS_FMT_SGRBG8_1X8,
1714 V4L2_MBUS_FMT_SRGGB8_1X8, 1712 MEDIA_BUS_FMT_SRGGB8_1X8,
1715 V4L2_MBUS_FMT_SBGGR8_1X8, 1713 MEDIA_BUS_FMT_SBGGR8_1X8,
1716 V4L2_MBUS_FMT_SGBRG8_1X8, 1714 MEDIA_BUS_FMT_SGBRG8_1X8,
1717 V4L2_MBUS_FMT_Y10_1X10, 1715 MEDIA_BUS_FMT_Y10_1X10,
1718 V4L2_MBUS_FMT_SGRBG10_1X10, 1716 MEDIA_BUS_FMT_SGRBG10_1X10,
1719 V4L2_MBUS_FMT_SRGGB10_1X10, 1717 MEDIA_BUS_FMT_SRGGB10_1X10,
1720 V4L2_MBUS_FMT_SBGGR10_1X10, 1718 MEDIA_BUS_FMT_SBGGR10_1X10,
1721 V4L2_MBUS_FMT_SGBRG10_1X10, 1719 MEDIA_BUS_FMT_SGBRG10_1X10,
1722}; 1720};
1723 1721
1724static const unsigned int preview_output_fmts[] = { 1722static const unsigned int preview_output_fmts[] = {
1725 V4L2_MBUS_FMT_UYVY8_1X16, 1723 MEDIA_BUS_FMT_UYVY8_1X16,
1726 V4L2_MBUS_FMT_YUYV8_1X16, 1724 MEDIA_BUS_FMT_YUYV8_1X16,
1727}; 1725};
1728 1726
1729/* 1727/*
@@ -1742,7 +1740,7 @@ static void preview_try_format(struct isp_prev_device *prev,
1742 struct v4l2_mbus_framefmt *fmt, 1740 struct v4l2_mbus_framefmt *fmt,
1743 enum v4l2_subdev_format_whence which) 1741 enum v4l2_subdev_format_whence which)
1744{ 1742{
1745 enum v4l2_mbus_pixelcode pixelcode; 1743 u32 pixelcode;
1746 struct v4l2_rect *crop; 1744 struct v4l2_rect *crop;
1747 unsigned int i; 1745 unsigned int i;
1748 1746
@@ -1774,7 +1772,7 @@ static void preview_try_format(struct isp_prev_device *prev,
1774 1772
1775 /* If not found, use SGRBG10 as default */ 1773 /* If not found, use SGRBG10 as default */
1776 if (i >= ARRAY_SIZE(preview_input_fmts)) 1774 if (i >= ARRAY_SIZE(preview_input_fmts))
1777 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 1775 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
1778 break; 1776 break;
1779 1777
1780 case PREV_PAD_SOURCE: 1778 case PREV_PAD_SOURCE:
@@ -1782,13 +1780,13 @@ static void preview_try_format(struct isp_prev_device *prev,
1782 *fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which); 1780 *fmt = *__preview_get_format(prev, fh, PREV_PAD_SINK, which);
1783 1781
1784 switch (pixelcode) { 1782 switch (pixelcode) {
1785 case V4L2_MBUS_FMT_YUYV8_1X16: 1783 case MEDIA_BUS_FMT_YUYV8_1X16:
1786 case V4L2_MBUS_FMT_UYVY8_1X16: 1784 case MEDIA_BUS_FMT_UYVY8_1X16:
1787 fmt->code = pixelcode; 1785 fmt->code = pixelcode;
1788 break; 1786 break;
1789 1787
1790 default: 1788 default:
1791 fmt->code = V4L2_MBUS_FMT_YUYV8_1X16; 1789 fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
1792 break; 1790 break;
1793 } 1791 }
1794 1792
@@ -1843,8 +1841,8 @@ static void preview_try_crop(struct isp_prev_device *prev,
1843 * and no columns in other modes. Increase the margins based on the sink 1841 * and no columns in other modes. Increase the margins based on the sink
1844 * format. 1842 * format.
1845 */ 1843 */
1846 if (sink->code != V4L2_MBUS_FMT_Y8_1X8 && 1844 if (sink->code != MEDIA_BUS_FMT_Y8_1X8 &&
1847 sink->code != V4L2_MBUS_FMT_Y10_1X10) { 1845 sink->code != MEDIA_BUS_FMT_Y10_1X10) {
1848 left += 2; 1846 left += 2;
1849 right -= 2; 1847 right -= 2;
1850 top += 2; 1848 top += 2;
@@ -2092,7 +2090,7 @@ static int preview_init_formats(struct v4l2_subdev *sd,
2092 memset(&format, 0, sizeof(format)); 2090 memset(&format, 0, sizeof(format));
2093 format.pad = PREV_PAD_SINK; 2091 format.pad = PREV_PAD_SINK;
2094 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 2092 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
2095 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 2093 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
2096 format.format.width = 4096; 2094 format.format.width = 4096;
2097 format.format.height = 4096; 2095 format.format.height = 4096;
2098 preview_set_format(sd, fh, &format); 2096 preview_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 05d1ace57451..2b9bc4839876 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -198,17 +198,16 @@ static void resizer_set_bilinear(struct isp_res_device *res,
198 * @res: Device context. 198 * @res: Device context.
199 * @pixelcode: pixel code. 199 * @pixelcode: pixel code.
200 */ 200 */
201static void resizer_set_ycpos(struct isp_res_device *res, 201static void resizer_set_ycpos(struct isp_res_device *res, u32 pixelcode)
202 enum v4l2_mbus_pixelcode pixelcode)
203{ 202{
204 struct isp_device *isp = to_isp_device(res); 203 struct isp_device *isp = to_isp_device(res);
205 204
206 switch (pixelcode) { 205 switch (pixelcode) {
207 case V4L2_MBUS_FMT_YUYV8_1X16: 206 case MEDIA_BUS_FMT_YUYV8_1X16:
208 isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, 207 isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
209 ISPRSZ_CNT_YCPOS); 208 ISPRSZ_CNT_YCPOS);
210 break; 209 break;
211 case V4L2_MBUS_FMT_UYVY8_1X16: 210 case MEDIA_BUS_FMT_UYVY8_1X16:
212 isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, 211 isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT,
213 ISPRSZ_CNT_YCPOS); 212 ISPRSZ_CNT_YCPOS);
214 break; 213 break;
@@ -1348,8 +1347,8 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
1348 1347
1349/* resizer pixel formats */ 1348/* resizer pixel formats */
1350static const unsigned int resizer_formats[] = { 1349static const unsigned int resizer_formats[] = {
1351 V4L2_MBUS_FMT_UYVY8_1X16, 1350 MEDIA_BUS_FMT_UYVY8_1X16,
1352 V4L2_MBUS_FMT_YUYV8_1X16, 1351 MEDIA_BUS_FMT_YUYV8_1X16,
1353}; 1352};
1354 1353
1355static unsigned int resizer_max_in_width(struct isp_res_device *res) 1354static unsigned int resizer_max_in_width(struct isp_res_device *res)
@@ -1385,9 +1384,9 @@ static void resizer_try_format(struct isp_res_device *res,
1385 1384
1386 switch (pad) { 1385 switch (pad) {
1387 case RESZ_PAD_SINK: 1386 case RESZ_PAD_SINK:
1388 if (fmt->code != V4L2_MBUS_FMT_YUYV8_1X16 && 1387 if (fmt->code != MEDIA_BUS_FMT_YUYV8_1X16 &&
1389 fmt->code != V4L2_MBUS_FMT_UYVY8_1X16) 1388 fmt->code != MEDIA_BUS_FMT_UYVY8_1X16)
1390 fmt->code = V4L2_MBUS_FMT_YUYV8_1X16; 1389 fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
1391 1390
1392 fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH, 1391 fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
1393 resizer_max_in_width(res)); 1392 resizer_max_in_width(res));
@@ -1571,7 +1570,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1571 memset(&format, 0, sizeof(format)); 1570 memset(&format, 0, sizeof(format));
1572 format.pad = RESZ_PAD_SINK; 1571 format.pad = RESZ_PAD_SINK;
1573 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1572 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1574 format.format.code = V4L2_MBUS_FMT_YUYV8_1X16; 1573 format.format.code = MEDIA_BUS_FMT_YUYV8_1X16;
1575 format.format.width = 4096; 1574 format.format.width = 4096;
1576 format.format.height = 4096; 1575 format.format.height = 4096;
1577 resizer_set_format(sd, fh, &format); 1576 resizer_set_format(sd, fh, &format);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index bc38c88c7bd9..b463fe172d16 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -39,74 +39,74 @@
39 * corresponding in-memory formats to the table below!!! 39 * corresponding in-memory formats to the table below!!!
40 */ 40 */
41static struct isp_format_info formats[] = { 41static struct isp_format_info formats[] = {
42 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 42 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
43 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 43 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
44 V4L2_PIX_FMT_GREY, 8, 1, }, 44 V4L2_PIX_FMT_GREY, 8, 1, },
45 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, 45 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
46 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, 46 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
47 V4L2_PIX_FMT_Y10, 10, 2, }, 47 V4L2_PIX_FMT_Y10, 10, 2, },
48 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, 48 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
49 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, 49 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
50 V4L2_PIX_FMT_Y12, 12, 2, }, 50 V4L2_PIX_FMT_Y12, 12, 2, },
51 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 51 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
52 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 52 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
53 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 53 V4L2_PIX_FMT_SBGGR8, 8, 1, },
54 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 54 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
55 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 55 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
56 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 56 V4L2_PIX_FMT_SGBRG8, 8, 1, },
57 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 57 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
58 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 58 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
59 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 59 V4L2_PIX_FMT_SGRBG8, 8, 1, },
60 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 60 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
61 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 61 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
62 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 62 V4L2_PIX_FMT_SRGGB8, 8, 1, },
63 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 63 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
64 V4L2_MBUS_FMT_SBGGR10_1X10, 0, 64 MEDIA_BUS_FMT_SBGGR10_1X10, 0,
65 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 65 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
66 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 66 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
67 V4L2_MBUS_FMT_SGBRG10_1X10, 0, 67 MEDIA_BUS_FMT_SGBRG10_1X10, 0,
68 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 68 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
69 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 69 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
70 V4L2_MBUS_FMT_SGRBG10_1X10, 0, 70 MEDIA_BUS_FMT_SGRBG10_1X10, 0,
71 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 71 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
72 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 72 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
73 V4L2_MBUS_FMT_SRGGB10_1X10, 0, 73 MEDIA_BUS_FMT_SRGGB10_1X10, 0,
74 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 74 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
75 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, 75 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
76 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, 76 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
77 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 77 V4L2_PIX_FMT_SBGGR10, 10, 2, },
78 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, 78 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
79 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, 79 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
80 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 80 V4L2_PIX_FMT_SGBRG10, 10, 2, },
81 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, 81 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
82 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, 82 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
83 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 83 V4L2_PIX_FMT_SGRBG10, 10, 2, },
84 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, 84 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
85 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, 85 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
86 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 86 V4L2_PIX_FMT_SRGGB10, 10, 2, },
87 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, 87 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
88 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, 88 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
89 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 89 V4L2_PIX_FMT_SBGGR12, 12, 2, },
90 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, 90 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
91 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, 91 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
92 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 92 V4L2_PIX_FMT_SGBRG12, 12, 2, },
93 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, 93 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
94 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, 94 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
95 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 95 V4L2_PIX_FMT_SGRBG12, 12, 2, },
96 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, 96 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
97 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, 97 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
98 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 98 V4L2_PIX_FMT_SRGGB12, 12, 2, },
99 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 99 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
100 V4L2_MBUS_FMT_UYVY8_1X16, 0, 100 MEDIA_BUS_FMT_UYVY8_1X16, 0,
101 V4L2_PIX_FMT_UYVY, 16, 2, }, 101 V4L2_PIX_FMT_UYVY, 16, 2, },
102 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 102 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
103 V4L2_MBUS_FMT_YUYV8_1X16, 0, 103 MEDIA_BUS_FMT_YUYV8_1X16, 0,
104 V4L2_PIX_FMT_YUYV, 16, 2, }, 104 V4L2_PIX_FMT_YUYV, 16, 2, },
105 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8, 105 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
106 V4L2_MBUS_FMT_UYVY8_2X8, 0, 106 MEDIA_BUS_FMT_UYVY8_2X8, 0,
107 V4L2_PIX_FMT_UYVY, 8, 2, }, 107 V4L2_PIX_FMT_UYVY, 8, 2, },
108 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8, 108 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8,
109 V4L2_MBUS_FMT_YUYV8_2X8, 0, 109 MEDIA_BUS_FMT_YUYV8_2X8, 0,
110 V4L2_PIX_FMT_YUYV, 8, 2, }, 110 V4L2_PIX_FMT_YUYV, 8, 2, },
111 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 111 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
112 * module and avoid NULL pointer dereferences. 112 * module and avoid NULL pointer dereferences.
@@ -114,8 +114,7 @@ static struct isp_format_info formats[] = {
114 { 0, } 114 { 0, }
115}; 115};
116 116
117const struct isp_format_info * 117const struct isp_format_info *omap3isp_video_format_info(u32 code)
118omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
119{ 118{
120 unsigned int i; 119 unsigned int i;
121 120
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 0b7efedc3da9..4071dd7060ea 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -44,10 +44,10 @@ struct v4l2_pix_format;
44 * @bpp: Bytes per pixel (when stored in memory) 44 * @bpp: Bytes per pixel (when stored in memory)
45 */ 45 */
46struct isp_format_info { 46struct isp_format_info {
47 enum v4l2_mbus_pixelcode code; 47 u32 code;
48 enum v4l2_mbus_pixelcode truncated; 48 u32 truncated;
49 enum v4l2_mbus_pixelcode uncompressed; 49 u32 uncompressed;
50 enum v4l2_mbus_pixelcode flavor; 50 u32 flavor;
51 u32 pixelformat; 51 u32 pixelformat;
52 unsigned int width; 52 unsigned int width;
53 unsigned int bpp; 53 unsigned int bpp;
@@ -206,6 +206,6 @@ void omap3isp_video_resume(struct isp_video *video, int continuous);
206struct media_pad *omap3isp_video_remote_pad(struct isp_video *video); 206struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
207 207
208const struct isp_format_info * 208const struct isp_format_info *
209omap3isp_video_format_info(enum v4l2_mbus_pixelcode code); 209omap3isp_video_format_info(u32 code);
210 210
211#endif /* OMAP3_ISP_VIDEO_H */ 211#endif /* OMAP3_ISP_VIDEO_H */
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 4f81b4c9d113..aa40c8269ab8 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1218,11 +1218,11 @@ void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
1218} 1218}
1219 1219
1220/* Media bus pixel formats supported at the camif input */ 1220/* Media bus pixel formats supported at the camif input */
1221static const enum v4l2_mbus_pixelcode camif_mbus_formats[] = { 1221static const u32 camif_mbus_formats[] = {
1222 V4L2_MBUS_FMT_YUYV8_2X8, 1222 MEDIA_BUS_FMT_YUYV8_2X8,
1223 V4L2_MBUS_FMT_YVYU8_2X8, 1223 MEDIA_BUS_FMT_YVYU8_2X8,
1224 V4L2_MBUS_FMT_UYVY8_2X8, 1224 MEDIA_BUS_FMT_UYVY8_2X8,
1225 V4L2_MBUS_FMT_VYUY8_2X8, 1225 MEDIA_BUS_FMT_VYUY8_2X8,
1226}; 1226};
1227 1227
1228/* 1228/*
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index b38574702fe9..3b09b5b6ad51 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -256,8 +256,7 @@ static void camif_unregister_sensor(struct camif_dev *camif)
256 v4l2_device_unregister_subdev(sd); 256 v4l2_device_unregister_subdev(sd);
257 camif->sensor.sd = NULL; 257 camif->sensor.sd = NULL;
258 i2c_unregister_device(client); 258 i2c_unregister_device(client);
259 if (adapter) 259 i2c_put_adapter(adapter);
260 i2c_put_adapter(adapter);
261} 260}
262 261
263static int camif_create_media_links(struct camif_dev *camif) 262static int camif_create_media_links(struct camif_dev *camif)
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 6e0c9988a191..812fb3a7c4e3 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -96,10 +96,10 @@ void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
96} 96}
97 97
98static const u32 src_pixfmt_map[8][2] = { 98static const u32 src_pixfmt_map[8][2] = {
99 { V4L2_MBUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR }, 99 { MEDIA_BUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
100 { V4L2_MBUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB }, 100 { MEDIA_BUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
101 { V4L2_MBUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY }, 101 { MEDIA_BUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
102 { V4L2_MBUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY }, 102 { MEDIA_BUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
103}; 103};
104 104
105/* Set camera input pixel format and resolution */ 105/* Set camera input pixel format and resolution */
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index d79e214ce8ce..51e4edc92d28 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -297,14 +297,8 @@ static int vidioc_querycap(struct file *file, void *priv,
297 strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1); 297 strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
298 strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1); 298 strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
299 cap->bus_info[0] = 0; 299 cap->bus_info[0] = 0;
300 cap->version = KERNEL_VERSION(1, 0, 0); 300 cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
301 /* 301 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
302 * This is only a mem-to-mem video device. The capture and output
303 * device capability flags are left only for backward compatibility
304 * and are scheduled for removal.
305 */
306 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
307 V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
308 return 0; 302 return 0;
309} 303}
310 304
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 6fcc7f072ace..3ecf0e06a4dd 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1001,13 +1001,8 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
1001 sizeof(cap->card)); 1001 sizeof(cap->card));
1002 } 1002 }
1003 cap->bus_info[0] = 0; 1003 cap->bus_info[0] = 0;
1004 /* 1004 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
1005 * This is only a mem-to-mem video device. The capture and output 1005 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1006 * device capability flags are left only for backward compatibility
1007 * and are scheduled for removal.
1008 */
1009 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
1010 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
1011 return 0; 1006 return 0;
1012} 1007}
1013 1008
@@ -2632,7 +2627,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
2632 return 0; 2627 return 0;
2633} 2628}
2634 2629
2635#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP) 2630#ifdef CONFIG_PM
2636static int s5p_jpeg_runtime_suspend(struct device *dev) 2631static int s5p_jpeg_runtime_suspend(struct device *dev)
2637{ 2632{
2638 struct s5p_jpeg *jpeg = dev_get_drvdata(dev); 2633 struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
@@ -2682,7 +2677,7 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
2682 2677
2683 return 0; 2678 return 0;
2684} 2679}
2685#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */ 2680#endif /* CONFIG_PM */
2686 2681
2687#ifdef CONFIG_PM_SLEEP 2682#ifdef CONFIG_PM_SLEEP
2688static int s5p_jpeg_suspend(struct device *dev) 2683static int s5p_jpeg_suspend(struct device *dev)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 51cb2dd0e13a..83e01f3466e9 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -71,6 +71,7 @@
71#define S5P_FIMV_R2H_CMD_ENC_BUFFER_FUL_RET_V6 16 71#define S5P_FIMV_R2H_CMD_ENC_BUFFER_FUL_RET_V6 16
72#define S5P_FIMV_R2H_CMD_ERR_RET_V6 32 72#define S5P_FIMV_R2H_CMD_ERR_RET_V6 32
73 73
74#define S5P_FIMV_MFC_BUS_RESET_CTRL 0x7110
74#define S5P_FIMV_FW_VERSION_V6 0xf000 75#define S5P_FIMV_FW_VERSION_V6 0xf000
75 76
76#define S5P_FIMV_INSTANCE_ID_V6 0xf008 77#define S5P_FIMV_INSTANCE_ID_V6 0xf008
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 165bc86c5962..b80a576164f1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -159,6 +159,10 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
159 } 159 }
160 clear_bit(0, &dev->hw_lock); 160 clear_bit(0, &dev->hw_lock);
161 spin_unlock_irqrestore(&dev->irqlock, flags); 161 spin_unlock_irqrestore(&dev->irqlock, flags);
162
163 /* De-init MFC */
164 s5p_mfc_deinit_hw(dev);
165
162 /* Double check if there is at least one instance running. 166 /* Double check if there is at least one instance running.
163 * If no instance is in memory than no firmware should be present */ 167 * If no instance is in memory than no firmware should be present */
164 if (dev->num_inst > 0) { 168 if (dev->num_inst > 0) {
@@ -220,11 +224,14 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
220 size_t dec_y_addr; 224 size_t dec_y_addr;
221 unsigned int frame_type; 225 unsigned int frame_type;
222 226
223 dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev); 227 /* Make sure we actually have a new frame before continuing. */
224 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); 228 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
229 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
230 return;
231 dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
225 232
226 /* Copy timestamp / timecode from decoded src to dst and set 233 /* Copy timestamp / timecode from decoded src to dst and set
227 appropriate flags */ 234 appropriate flags. */
228 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); 235 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
229 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { 236 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
230 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { 237 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -250,6 +257,11 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
250 dst_buf->b->v4l2_buf.flags |= 257 dst_buf->b->v4l2_buf.flags |=
251 V4L2_BUF_FLAG_BFRAME; 258 V4L2_BUF_FLAG_BFRAME;
252 break; 259 break;
260 default:
261 /* Don't know how to handle
262 S5P_FIMV_DECODE_FRAME_OTHER_FRAME. */
263 mfc_debug(2, "Unexpected frame type: %d\n",
264 frame_type);
253 } 265 }
254 break; 266 break;
255 } 267 }
@@ -334,8 +346,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
334 ctx->state = MFCINST_RES_CHANGE_INIT; 346 ctx->state = MFCINST_RES_CHANGE_INIT;
335 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); 347 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
336 wake_up_ctx(ctx, reason, err); 348 wake_up_ctx(ctx, reason, err);
337 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 349 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
338 BUG();
339 s5p_mfc_clock_off(); 350 s5p_mfc_clock_off();
340 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 351 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
341 return; 352 return;
@@ -407,8 +418,7 @@ leave_handle_frame:
407 clear_work_bit(ctx); 418 clear_work_bit(ctx);
408 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); 419 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
409 wake_up_ctx(ctx, reason, err); 420 wake_up_ctx(ctx, reason, err);
410 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 421 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
411 BUG();
412 s5p_mfc_clock_off(); 422 s5p_mfc_clock_off();
413 /* if suspending, wake up device and do not try_run again*/ 423 /* if suspending, wake up device and do not try_run again*/
414 if (test_bit(0, &dev->enter_suspend)) 424 if (test_bit(0, &dev->enter_suspend))
@@ -455,8 +465,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
455 break; 465 break;
456 } 466 }
457 } 467 }
458 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 468 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
459 BUG();
460 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); 469 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
461 s5p_mfc_clock_off(); 470 s5p_mfc_clock_off();
462 wake_up_dev(dev, reason, err); 471 wake_up_dev(dev, reason, err);
@@ -510,8 +519,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
510 } 519 }
511 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); 520 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
512 clear_work_bit(ctx); 521 clear_work_bit(ctx);
513 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 522 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
514 BUG();
515 s5p_mfc_clock_off(); 523 s5p_mfc_clock_off();
516 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 524 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
517 wake_up_ctx(ctx, reason, err); 525 wake_up_ctx(ctx, reason, err);
@@ -549,16 +557,14 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
549 } else { 557 } else {
550 ctx->dpb_flush_flag = 0; 558 ctx->dpb_flush_flag = 0;
551 } 559 }
552 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 560 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
553 BUG();
554 561
555 s5p_mfc_clock_off(); 562 s5p_mfc_clock_off();
556 563
557 wake_up(&ctx->queue); 564 wake_up(&ctx->queue);
558 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 565 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
559 } else { 566 } else {
560 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 567 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
561 BUG();
562 568
563 s5p_mfc_clock_off(); 569 s5p_mfc_clock_off();
564 570
@@ -635,8 +641,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
635 mfc_err("post_frame_start() failed\n"); 641 mfc_err("post_frame_start() failed\n");
636 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); 642 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
637 wake_up_ctx(ctx, reason, err); 643 wake_up_ctx(ctx, reason, err);
638 if (test_and_clear_bit(0, &dev->hw_lock) == 0) 644 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
639 BUG();
640 s5p_mfc_clock_off(); 645 s5p_mfc_clock_off();
641 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 646 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
642 } else { 647 } else {
@@ -815,7 +820,7 @@ static int s5p_mfc_open(struct file *file)
815 ret = -ENOENT; 820 ret = -ENOENT;
816 goto err_queue_init; 821 goto err_queue_init;
817 } 822 }
818 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops; 823 q->mem_ops = &vb2_dma_contig_memops;
819 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 824 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
820 ret = vb2_queue_init(q); 825 ret = vb2_queue_init(q);
821 if (ret) { 826 if (ret) {
@@ -837,7 +842,7 @@ static int s5p_mfc_open(struct file *file)
837 ret = -ENOENT; 842 ret = -ENOENT;
838 goto err_queue_init; 843 goto err_queue_init;
839 } 844 }
840 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops; 845 q->mem_ops = &vb2_dma_contig_memops;
841 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 846 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
842 ret = vb2_queue_init(q); 847 ret = vb2_queue_init(q);
843 if (ret) { 848 if (ret) {
@@ -1284,11 +1289,17 @@ static int s5p_mfc_suspend(struct device *dev)
1284 m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT)); 1289 m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
1285 if (ret == 0) { 1290 if (ret == 0) {
1286 mfc_err("Waiting for hardware to finish timed out\n"); 1291 mfc_err("Waiting for hardware to finish timed out\n");
1292 clear_bit(0, &m_dev->enter_suspend);
1287 return -EIO; 1293 return -EIO;
1288 } 1294 }
1289 } 1295 }
1290 1296
1291 return s5p_mfc_sleep(m_dev); 1297 ret = s5p_mfc_sleep(m_dev);
1298 if (ret) {
1299 clear_bit(0, &m_dev->enter_suspend);
1300 clear_bit(0, &m_dev->hw_lock);
1301 }
1302 return ret;
1292} 1303}
1293 1304
1294static int s5p_mfc_resume(struct device *dev) 1305static int s5p_mfc_resume(struct device *dev)
@@ -1302,7 +1313,7 @@ static int s5p_mfc_resume(struct device *dev)
1302} 1313}
1303#endif 1314#endif
1304 1315
1305#ifdef CONFIG_PM_RUNTIME 1316#ifdef CONFIG_PM
1306static int s5p_mfc_runtime_suspend(struct device *dev) 1317static int s5p_mfc_runtime_suspend(struct device *dev)
1307{ 1318{
1308 struct platform_device *pdev = to_platform_device(dev); 1319 struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 3e41ca1293ed..15f7663dd9f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -237,8 +237,6 @@ struct s5p_mfc_variant {
237 237
238/** 238/**
239 * struct s5p_mfc_priv_buf - represents internal used buffer 239 * struct s5p_mfc_priv_buf - represents internal used buffer
240 * @alloc: allocation-specific context for each buffer
241 * (videobuf2 allocator)
242 * @ofs: offset of each buffer, will be used for MFC 240 * @ofs: offset of each buffer, will be used for MFC
243 * @virt: kernel virtual address, only valid when the 241 * @virt: kernel virtual address, only valid when the
244 * buffer accessed by driver 242 * buffer accessed by driver
@@ -246,7 +244,6 @@ struct s5p_mfc_variant {
246 * @size: size of the buffer 244 * @size: size of the buffer
247 */ 245 */
248struct s5p_mfc_priv_buf { 246struct s5p_mfc_priv_buf {
249 void *alloc;
250 unsigned long ofs; 247 unsigned long ofs;
251 void *virt; 248 void *virt;
252 dma_addr_t dma; 249 dma_addr_t dma;
@@ -340,6 +337,7 @@ struct s5p_mfc_dev {
340 struct s5p_mfc_hw_cmds *mfc_cmds; 337 struct s5p_mfc_hw_cmds *mfc_cmds;
341 const struct s5p_mfc_regs *mfc_regs; 338 const struct s5p_mfc_regs *mfc_regs;
342 enum s5p_mfc_fw_ver fw_ver; 339 enum s5p_mfc_fw_ver fw_ver;
340 bool risc_on; /* indicates if RISC is on or off */
343}; 341};
344 342
345/** 343/**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 0c885a8a0e9f..40d8a03a141d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -129,6 +129,25 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
129 return 0; 129 return 0;
130} 130}
131 131
132static int s5p_mfc_bus_reset(struct s5p_mfc_dev *dev)
133{
134 unsigned int status;
135 unsigned long timeout;
136
137 /* Reset */
138 mfc_write(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);
139 timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
140 /* Check bus status */
141 do {
142 if (time_after(jiffies, timeout)) {
143 mfc_err("Timeout while resetting MFC.\n");
144 return -EIO;
145 }
146 status = mfc_read(dev, S5P_FIMV_MFC_BUS_RESET_CTRL);
147 } while ((status & 0x2) == 0);
148 return 0;
149}
150
132/* Reset the device */ 151/* Reset the device */
133int s5p_mfc_reset(struct s5p_mfc_dev *dev) 152int s5p_mfc_reset(struct s5p_mfc_dev *dev)
134{ 153{
@@ -139,12 +158,6 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
139 mfc_debug_enter(); 158 mfc_debug_enter();
140 159
141 if (IS_MFCV6_PLUS(dev)) { 160 if (IS_MFCV6_PLUS(dev)) {
142 /* Reset IP */
143 /* except RISC, reset */
144 mfc_write(dev, 0xFEE, S5P_FIMV_MFC_RESET_V6);
145 /* reset release */
146 mfc_write(dev, 0x0, S5P_FIMV_MFC_RESET_V6);
147
148 /* Zero Initialization of MFC registers */ 161 /* Zero Initialization of MFC registers */
149 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6); 162 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD_V6);
150 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD_V6); 163 mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD_V6);
@@ -153,8 +166,17 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
153 for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT_V6; i++) 166 for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT_V6; i++)
154 mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4)); 167 mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4));
155 168
156 /* Reset */ 169 /* check bus reset control before reset */
157 mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6); 170 if (dev->risc_on)
171 if (s5p_mfc_bus_reset(dev))
172 return -EIO;
173 /* Reset
174 * set RISC_ON to 0 during power_on & wake_up.
175 * V6 needs RISC_ON set to 0 during reset also.
176 */
177 if ((!dev->risc_on) || (!IS_MFCV7_PLUS(dev)))
178 mfc_write(dev, 0, S5P_FIMV_RISC_ON_V6);
179
158 mfc_write(dev, 0x1FFF, S5P_FIMV_MFC_RESET_V6); 180 mfc_write(dev, 0x1FFF, S5P_FIMV_MFC_RESET_V6);
159 mfc_write(dev, 0, S5P_FIMV_MFC_RESET_V6); 181 mfc_write(dev, 0, S5P_FIMV_MFC_RESET_V6);
160 } else { 182 } else {
@@ -226,6 +248,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
226 /* 0. MFC reset */ 248 /* 0. MFC reset */
227 mfc_debug(2, "MFC reset..\n"); 249 mfc_debug(2, "MFC reset..\n");
228 s5p_mfc_clock_on(); 250 s5p_mfc_clock_on();
251 dev->risc_on = 0;
229 ret = s5p_mfc_reset(dev); 252 ret = s5p_mfc_reset(dev);
230 if (ret) { 253 if (ret) {
231 mfc_err("Failed to reset MFC - timeout\n"); 254 mfc_err("Failed to reset MFC - timeout\n");
@@ -238,8 +261,10 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
238 s5p_mfc_clear_cmds(dev); 261 s5p_mfc_clear_cmds(dev);
239 /* 3. Release reset signal to the RISC */ 262 /* 3. Release reset signal to the RISC */
240 s5p_mfc_clean_dev_int_flags(dev); 263 s5p_mfc_clean_dev_int_flags(dev);
241 if (IS_MFCV6_PLUS(dev)) 264 if (IS_MFCV6_PLUS(dev)) {
265 dev->risc_on = 1;
242 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6); 266 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
267 }
243 else 268 else
244 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET); 269 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
245 mfc_debug(2, "Will now wait for completion of firmware transfer\n"); 270 mfc_debug(2, "Will now wait for completion of firmware transfer\n");
@@ -328,6 +353,58 @@ int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
328 return ret; 353 return ret;
329} 354}
330 355
356static int s5p_mfc_v8_wait_wakeup(struct s5p_mfc_dev *dev)
357{
358 int ret;
359
360 /* Release reset signal to the RISC */
361 dev->risc_on = 1;
362 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
363
364 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_FW_STATUS_RET)) {
365 mfc_err("Failed to reset MFCV8\n");
366 return -EIO;
367 }
368 mfc_debug(2, "Write command to wakeup MFCV8\n");
369 ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
370 if (ret) {
371 mfc_err("Failed to send command to MFCV8 - timeout\n");
372 return ret;
373 }
374
375 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
376 mfc_err("Failed to wakeup MFC\n");
377 return -EIO;
378 }
379 return ret;
380}
381
382static int s5p_mfc_wait_wakeup(struct s5p_mfc_dev *dev)
383{
384 int ret;
385
386 /* Send MFC wakeup command */
387 ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev);
388 if (ret) {
389 mfc_err("Failed to send command to MFC - timeout\n");
390 return ret;
391 }
392
393 /* Release reset signal to the RISC */
394 if (IS_MFCV6_PLUS(dev)) {
395 dev->risc_on = 1;
396 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
397 } else {
398 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
399 }
400
401 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
402 mfc_err("Failed to wakeup MFC\n");
403 return -EIO;
404 }
405 return ret;
406}
407
331int s5p_mfc_wakeup(struct s5p_mfc_dev *dev) 408int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
332{ 409{
333 int ret; 410 int ret;
@@ -336,9 +413,11 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
336 /* 0. MFC reset */ 413 /* 0. MFC reset */
337 mfc_debug(2, "MFC reset..\n"); 414 mfc_debug(2, "MFC reset..\n");
338 s5p_mfc_clock_on(); 415 s5p_mfc_clock_on();
416 dev->risc_on = 0;
339 ret = s5p_mfc_reset(dev); 417 ret = s5p_mfc_reset(dev);
340 if (ret) { 418 if (ret) {
341 mfc_err("Failed to reset MFC - timeout\n"); 419 mfc_err("Failed to reset MFC - timeout\n");
420 s5p_mfc_clock_off();
342 return ret; 421 return ret;
343 } 422 }
344 mfc_debug(2, "Done MFC reset..\n"); 423 mfc_debug(2, "Done MFC reset..\n");
@@ -347,23 +426,16 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
347 /* 2. Initialize registers of channel I/F */ 426 /* 2. Initialize registers of channel I/F */
348 s5p_mfc_clear_cmds(dev); 427 s5p_mfc_clear_cmds(dev);
349 s5p_mfc_clean_dev_int_flags(dev); 428 s5p_mfc_clean_dev_int_flags(dev);
350 /* 3. Initialize firmware */ 429 /* 3. Send MFC wakeup command and wait for completion*/
351 ret = s5p_mfc_hw_call(dev->mfc_cmds, wakeup_cmd, dev); 430 if (IS_MFCV8(dev))
352 if (ret) { 431 ret = s5p_mfc_v8_wait_wakeup(dev);
353 mfc_err("Failed to send command to MFC - timeout\n");
354 return ret;
355 }
356 /* 4. Release reset signal to the RISC */
357 if (IS_MFCV6_PLUS(dev))
358 mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
359 else 432 else
360 mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET); 433 ret = s5p_mfc_wait_wakeup(dev);
361 mfc_debug(2, "Ok, now will write a command to wakeup the system\n"); 434
362 if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_WAKEUP_RET)) {
363 mfc_err("Failed to load firmware\n");
364 return -EIO;
365 }
366 s5p_mfc_clock_off(); 435 s5p_mfc_clock_off();
436 if (ret)
437 return ret;
438
367 dev->int_cond = 0; 439 dev->int_cond = 0;
368 if (dev->int_err != 0 || dev->int_type != 440 if (dev->int_err != 0 || dev->int_type !=
369 S5P_MFC_R2H_CMD_WAKEUP_RET) { 441 S5P_MFC_R2H_CMD_WAKEUP_RET) {
@@ -396,7 +468,6 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
396 } 468 }
397 469
398 set_work_bit_irqsave(ctx); 470 set_work_bit_irqsave(ctx);
399 s5p_mfc_clean_ctx_int_flags(ctx);
400 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 471 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
401 if (s5p_mfc_wait_for_done_ctx(ctx, 472 if (s5p_mfc_wait_for_done_ctx(ctx,
402 S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) { 473 S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
@@ -422,7 +493,6 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
422{ 493{
423 ctx->state = MFCINST_RETURN_INST; 494 ctx->state = MFCINST_RETURN_INST;
424 set_work_bit_irqsave(ctx); 495 set_work_bit_irqsave(ctx);
425 s5p_mfc_clean_ctx_int_flags(ctx);
426 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 496 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
427 /* Wait until instance is returned or timeout occurred */ 497 /* Wait until instance is returned or timeout occurred */
428 if (s5p_mfc_wait_for_done_ctx(ctx, 498 if (s5p_mfc_wait_for_done_ctx(ctx,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index a98fe023deaf..c6c3452ccca1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -269,15 +269,13 @@ static int vidioc_querycap(struct file *file, void *priv,
269 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1); 269 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
270 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 270 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
271 cap->bus_info[0] = 0; 271 cap->bus_info[0] = 0;
272 cap->version = KERNEL_VERSION(1, 0, 0);
273 /* 272 /*
274 * This is only a mem-to-mem video device. The capture and output 273 * This is only a mem-to-mem video device. The capture and output
275 * device capability flags are left only for backward compatibility 274 * device capability flags are left only for backward compatibility
276 * and are scheduled for removal. 275 * and are scheduled for removal.
277 */ 276 */
278 cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING | 277 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
279 V4L2_CAP_VIDEO_CAPTURE_MPLANE | 278 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
280 V4L2_CAP_VIDEO_OUTPUT_MPLANE;
281 return 0; 279 return 0;
282} 280}
283 281
@@ -334,7 +332,6 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
334 MFCINST_RES_CHANGE_END)) { 332 MFCINST_RES_CHANGE_END)) {
335 /* If the MFC is parsing the header, 333 /* If the MFC is parsing the header,
336 * so wait until it is finished */ 334 * so wait until it is finished */
337 s5p_mfc_clean_ctx_int_flags(ctx);
338 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET, 335 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET,
339 0); 336 0);
340 } 337 }
@@ -740,12 +737,12 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
740 ctx->state < MFCINST_ABORT) { 737 ctx->state < MFCINST_ABORT) {
741 ctrl->val = ctx->pb_count; 738 ctrl->val = ctx->pb_count;
742 break; 739 break;
743 } else if (ctx->state != MFCINST_INIT) { 740 } else if (ctx->state != MFCINST_INIT &&
741 ctx->state != MFCINST_RES_CHANGE_END) {
744 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); 742 v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
745 return -EINVAL; 743 return -EINVAL;
746 } 744 }
747 /* Should wait for the header to be parsed */ 745 /* Should wait for the header to be parsed */
748 s5p_mfc_clean_ctx_int_flags(ctx);
749 s5p_mfc_wait_for_done_ctx(ctx, 746 s5p_mfc_wait_for_done_ctx(ctx,
750 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); 747 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
751 if (ctx->state >= MFCINST_HEAD_PARSED && 748 if (ctx->state >= MFCINST_HEAD_PARSED &&
@@ -1057,7 +1054,6 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
1057 if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { 1054 if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) {
1058 ctx->state = MFCINST_FLUSH; 1055 ctx->state = MFCINST_FLUSH;
1059 set_work_bit_irqsave(ctx); 1056 set_work_bit_irqsave(ctx);
1060 s5p_mfc_clean_ctx_int_flags(ctx);
1061 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); 1057 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
1062 if (s5p_mfc_wait_for_done_ctx(ctx, 1058 if (s5p_mfc_wait_for_done_ctx(ctx,
1063 S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0)) 1059 S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index a904a1c7bb21..bd64f1dcbdb5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -32,7 +32,7 @@
32#include "s5p_mfc_intr.h" 32#include "s5p_mfc_intr.h"
33#include "s5p_mfc_opr.h" 33#include "s5p_mfc_opr.h"
34 34
35#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12MT 35#define DEF_SRC_FMT_ENC V4L2_PIX_FMT_NV12M
36#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264 36#define DEF_DST_FMT_ENC V4L2_PIX_FMT_H264
37 37
38static struct s5p_mfc_fmt formats[] = { 38static struct s5p_mfc_fmt formats[] = {
@@ -67,8 +67,7 @@ static struct s5p_mfc_fmt formats[] = {
67 .codec_mode = S5P_MFC_CODEC_NONE, 67 .codec_mode = S5P_MFC_CODEC_NONE,
68 .type = MFC_FMT_RAW, 68 .type = MFC_FMT_RAW,
69 .num_planes = 2, 69 .num_planes = 2,
70 .versions = MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | 70 .versions = MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT,
71 MFC_V8_BIT,
72 }, 71 },
73 { 72 {
74 .name = "H264 Encoded Stream", 73 .name = "H264 Encoded Stream",
@@ -690,6 +689,16 @@ static struct mfc_control controls[] = {
690 .step = 1, 689 .step = 1,
691 .default_value = 0, 690 .default_value = 0,
692 }, 691 },
692 {
693 .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
694 .type = V4L2_CTRL_TYPE_INTEGER,
695 .name = "Minimum number of output bufs",
696 .minimum = 1,
697 .maximum = 32,
698 .step = 1,
699 .default_value = 1,
700 .is_volatile = 1,
701 },
693}; 702};
694 703
695#define NUM_CTRLS ARRAY_SIZE(controls) 704#define NUM_CTRLS ARRAY_SIZE(controls)
@@ -938,15 +947,13 @@ static int vidioc_querycap(struct file *file, void *priv,
938 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1); 947 strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
939 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 948 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
940 cap->bus_info[0] = 0; 949 cap->bus_info[0] = 0;
941 cap->version = KERNEL_VERSION(1, 0, 0);
942 /* 950 /*
943 * This is only a mem-to-mem video device. The capture and output 951 * This is only a mem-to-mem video device. The capture and output
944 * device capability flags are left only for backward compatibility 952 * device capability flags are left only for backward compatibility
945 * and are scheduled for removal. 953 * and are scheduled for removal.
946 */ 954 */
947 cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING | 955 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
948 V4L2_CAP_VIDEO_CAPTURE_MPLANE | 956 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
949 V4L2_CAP_VIDEO_OUTPUT_MPLANE;
950 return 0; 957 return 0;
951} 958}
952 959
@@ -1137,6 +1144,11 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1137 (reqbufs->memory != V4L2_MEMORY_USERPTR)) 1144 (reqbufs->memory != V4L2_MEMORY_USERPTR))
1138 return -EINVAL; 1145 return -EINVAL;
1139 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 1146 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
1147 if (reqbufs->count == 0) {
1148 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
1149 ctx->capture_state = QUEUE_FREE;
1150 return ret;
1151 }
1140 if (ctx->capture_state != QUEUE_FREE) { 1152 if (ctx->capture_state != QUEUE_FREE) {
1141 mfc_err("invalid capture state: %d\n", 1153 mfc_err("invalid capture state: %d\n",
1142 ctx->capture_state); 1154 ctx->capture_state);
@@ -1158,6 +1170,14 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1158 return -ENOMEM; 1170 return -ENOMEM;
1159 } 1171 }
1160 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { 1172 } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1173 if (reqbufs->count == 0) {
1174 mfc_debug(2, "Freeing buffers\n");
1175 ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
1176 s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers,
1177 ctx);
1178 ctx->output_state = QUEUE_FREE;
1179 return ret;
1180 }
1161 if (ctx->output_state != QUEUE_FREE) { 1181 if (ctx->output_state != QUEUE_FREE) {
1162 mfc_err("invalid output state: %d\n", 1182 mfc_err("invalid output state: %d\n",
1163 ctx->output_state); 1183 ctx->output_state);
@@ -1624,8 +1644,39 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
1624 return ret; 1644 return ret;
1625} 1645}
1626 1646
1647static int s5p_mfc_enc_g_v_ctrl(struct v4l2_ctrl *ctrl)
1648{
1649 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
1650 struct s5p_mfc_dev *dev = ctx->dev;
1651
1652 switch (ctrl->id) {
1653 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
1654 if (ctx->state >= MFCINST_HEAD_PARSED &&
1655 ctx->state < MFCINST_ABORT) {
1656 ctrl->val = ctx->pb_count;
1657 break;
1658 } else if (ctx->state != MFCINST_INIT) {
1659 v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
1660 return -EINVAL;
1661 }
1662 /* Should wait for the header to be produced */
1663 s5p_mfc_wait_for_done_ctx(ctx,
1664 S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
1665 if (ctx->state >= MFCINST_HEAD_PARSED &&
1666 ctx->state < MFCINST_ABORT) {
1667 ctrl->val = ctx->pb_count;
1668 } else {
1669 v4l2_err(&dev->v4l2_dev, "Encoding not initialised\n");
1670 return -EINVAL;
1671 }
1672 break;
1673 }
1674 return 0;
1675}
1676
1627static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = { 1677static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
1628 .s_ctrl = s5p_mfc_enc_s_ctrl, 1678 .s_ctrl = s5p_mfc_enc_s_ctrl,
1679 .g_volatile_ctrl = s5p_mfc_enc_g_v_ctrl,
1629}; 1680};
1630 1681
1631static int vidioc_s_parm(struct file *file, void *priv, 1682static int vidioc_s_parm(struct file *file, void *priv,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 7cf07963187d..0c4fcf2dfd09 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1178,7 +1178,6 @@ static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
1178 1178
1179 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); 1179 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
1180 dev->curr_ctx = ctx->num; 1180 dev->curr_ctx = ctx->num;
1181 s5p_mfc_clean_ctx_int_flags(ctx);
1182 s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE); 1181 s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE);
1183} 1182}
1184 1183
@@ -1192,7 +1191,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1192 last_frame = MFC_DEC_LAST_FRAME; 1191 last_frame = MFC_DEC_LAST_FRAME;
1193 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); 1192 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
1194 dev->curr_ctx = ctx->num; 1193 dev->curr_ctx = ctx->num;
1195 s5p_mfc_clean_ctx_int_flags(ctx);
1196 s5p_mfc_decode_one_frame_v5(ctx, last_frame); 1194 s5p_mfc_decode_one_frame_v5(ctx, last_frame);
1197 return 0; 1195 return 0;
1198 } 1196 }
@@ -1212,7 +1210,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1212 ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); 1210 ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
1213 spin_unlock_irqrestore(&dev->irqlock, flags); 1211 spin_unlock_irqrestore(&dev->irqlock, flags);
1214 dev->curr_ctx = ctx->num; 1212 dev->curr_ctx = ctx->num;
1215 s5p_mfc_clean_ctx_int_flags(ctx);
1216 if (temp_vb->b->v4l2_planes[0].bytesused == 0) { 1213 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1217 last_frame = MFC_DEC_LAST_FRAME; 1214 last_frame = MFC_DEC_LAST_FRAME;
1218 mfc_debug(2, "Setting ctx->state to FINISHING\n"); 1215 mfc_debug(2, "Setting ctx->state to FINISHING\n");
@@ -1273,7 +1270,6 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1273 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); 1270 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1274 spin_unlock_irqrestore(&dev->irqlock, flags); 1271 spin_unlock_irqrestore(&dev->irqlock, flags);
1275 dev->curr_ctx = ctx->num; 1272 dev->curr_ctx = ctx->num;
1276 s5p_mfc_clean_ctx_int_flags(ctx);
1277 mfc_debug(2, "encoding buffer with index=%d state=%d\n", 1273 mfc_debug(2, "encoding buffer with index=%d state=%d\n",
1278 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); 1274 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
1279 s5p_mfc_encode_one_frame_v5(ctx); 1275 s5p_mfc_encode_one_frame_v5(ctx);
@@ -1297,7 +1293,6 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1297 0, temp_vb->b->v4l2_planes[0].bytesused); 1293 0, temp_vb->b->v4l2_planes[0].bytesused);
1298 spin_unlock_irqrestore(&dev->irqlock, flags); 1294 spin_unlock_irqrestore(&dev->irqlock, flags);
1299 dev->curr_ctx = ctx->num; 1295 dev->curr_ctx = ctx->num;
1300 s5p_mfc_clean_ctx_int_flags(ctx);
1301 s5p_mfc_init_decode_v5(ctx); 1296 s5p_mfc_init_decode_v5(ctx);
1302} 1297}
1303 1298
@@ -1317,7 +1312,6 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1317 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); 1312 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1318 spin_unlock_irqrestore(&dev->irqlock, flags); 1313 spin_unlock_irqrestore(&dev->irqlock, flags);
1319 dev->curr_ctx = ctx->num; 1314 dev->curr_ctx = ctx->num;
1320 s5p_mfc_clean_ctx_int_flags(ctx);
1321 s5p_mfc_init_encode_v5(ctx); 1315 s5p_mfc_init_encode_v5(ctx);
1322} 1316}
1323 1317
@@ -1352,7 +1346,6 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1352 0, temp_vb->b->v4l2_planes[0].bytesused); 1346 0, temp_vb->b->v4l2_planes[0].bytesused);
1353 spin_unlock_irqrestore(&dev->irqlock, flags); 1347 spin_unlock_irqrestore(&dev->irqlock, flags);
1354 dev->curr_ctx = ctx->num; 1348 dev->curr_ctx = ctx->num;
1355 s5p_mfc_clean_ctx_int_flags(ctx);
1356 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); 1349 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx);
1357 if (ret) { 1350 if (ret) {
1358 mfc_err("Failed to alloc frame mem\n"); 1351 mfc_err("Failed to alloc frame mem\n");
@@ -1396,6 +1389,8 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
1396 * Now obtaining frames from MFC buffer 1389 * Now obtaining frames from MFC buffer
1397 */ 1390 */
1398 s5p_mfc_clock_on(); 1391 s5p_mfc_clock_on();
1392 s5p_mfc_clean_ctx_int_flags(ctx);
1393
1399 if (ctx->type == MFCINST_DECODER) { 1394 if (ctx->type == MFCINST_DECODER) {
1400 s5p_mfc_set_dec_desc_buffer(ctx); 1395 s5p_mfc_set_dec_desc_buffer(ctx);
1401 switch (ctx->state) { 1396 switch (ctx->state) {
@@ -1406,12 +1401,10 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
1406 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); 1401 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1407 break; 1402 break;
1408 case MFCINST_INIT: 1403 case MFCINST_INIT:
1409 s5p_mfc_clean_ctx_int_flags(ctx);
1410 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, 1404 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1411 ctx); 1405 ctx);
1412 break; 1406 break;
1413 case MFCINST_RETURN_INST: 1407 case MFCINST_RETURN_INST:
1414 s5p_mfc_clean_ctx_int_flags(ctx);
1415 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, 1408 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1416 ctx); 1409 ctx);
1417 break; 1410 break;
@@ -1444,12 +1437,10 @@ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev)
1444 ret = s5p_mfc_run_enc_frame(ctx); 1437 ret = s5p_mfc_run_enc_frame(ctx);
1445 break; 1438 break;
1446 case MFCINST_INIT: 1439 case MFCINST_INIT:
1447 s5p_mfc_clean_ctx_int_flags(ctx);
1448 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, 1440 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1449 ctx); 1441 ctx);
1450 break; 1442 break;
1451 case MFCINST_RETURN_INST: 1443 case MFCINST_RETURN_INST:
1452 s5p_mfc_clean_ctx_int_flags(ctx);
1453 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, 1444 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1454 ctx); 1445 ctx);
1455 break; 1446 break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 8798b14bacce..9aea179943ce 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1394,7 +1394,6 @@ static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1394 1394
1395 if (flush) { 1395 if (flush) {
1396 dev->curr_ctx = ctx->num; 1396 dev->curr_ctx = ctx->num;
1397 s5p_mfc_clean_ctx_int_flags(ctx);
1398 writel(ctx->inst_no, mfc_regs->instance_id); 1397 writel(ctx->inst_no, mfc_regs->instance_id);
1399 s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev, 1398 s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
1400 S5P_FIMV_H2R_CMD_FLUSH_V6, NULL); 1399 S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
@@ -1532,27 +1531,10 @@ static inline int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1532static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx) 1531static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx)
1533{ 1532{
1534 struct s5p_mfc_dev *dev = ctx->dev; 1533 struct s5p_mfc_dev *dev = ctx->dev;
1535 struct s5p_mfc_buf *temp_vb;
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&dev->irqlock, flags);
1539
1540 /* Frames are being decoded */
1541 if (list_empty(&ctx->src_queue)) {
1542 mfc_debug(2, "No src buffers.\n");
1543 spin_unlock_irqrestore(&dev->irqlock, flags);
1544 return;
1545 }
1546 /* Get the next source buffer */
1547 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1548 temp_vb->flags |= MFC_BUF_FLAG_USED;
1549 s5p_mfc_set_dec_stream_buffer_v6(ctx,
1550 vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, 0);
1551 spin_unlock_irqrestore(&dev->irqlock, flags);
1552 1534
1535 s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0);
1553 dev->curr_ctx = ctx->num; 1536 dev->curr_ctx = ctx->num;
1554 s5p_mfc_clean_ctx_int_flags(ctx); 1537 s5p_mfc_decode_one_frame_v6(ctx, MFC_DEC_LAST_FRAME);
1555 s5p_mfc_decode_one_frame_v6(ctx, 1);
1556} 1538}
1557 1539
1558static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) 1540static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
@@ -1588,7 +1570,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx)
1588 spin_unlock_irqrestore(&dev->irqlock, flags); 1570 spin_unlock_irqrestore(&dev->irqlock, flags);
1589 1571
1590 dev->curr_ctx = ctx->num; 1572 dev->curr_ctx = ctx->num;
1591 s5p_mfc_clean_ctx_int_flags(ctx);
1592 if (temp_vb->b->v4l2_planes[0].bytesused == 0) { 1573 if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1593 last_frame = 1; 1574 last_frame = 1;
1594 mfc_debug(2, "Setting ctx->state to FINISHING\n"); 1575 mfc_debug(2, "Setting ctx->state to FINISHING\n");
@@ -1645,7 +1626,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1645 spin_unlock_irqrestore(&dev->irqlock, flags); 1626 spin_unlock_irqrestore(&dev->irqlock, flags);
1646 1627
1647 dev->curr_ctx = ctx->num; 1628 dev->curr_ctx = ctx->num;
1648 s5p_mfc_clean_ctx_int_flags(ctx);
1649 s5p_mfc_encode_one_frame_v6(ctx); 1629 s5p_mfc_encode_one_frame_v6(ctx);
1650 1630
1651 return 0; 1631 return 0;
@@ -1667,7 +1647,6 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1667 temp_vb->b->v4l2_planes[0].bytesused); 1647 temp_vb->b->v4l2_planes[0].bytesused);
1668 spin_unlock_irqrestore(&dev->irqlock, flags); 1648 spin_unlock_irqrestore(&dev->irqlock, flags);
1669 dev->curr_ctx = ctx->num; 1649 dev->curr_ctx = ctx->num;
1670 s5p_mfc_clean_ctx_int_flags(ctx);
1671 s5p_mfc_init_decode_v6(ctx); 1650 s5p_mfc_init_decode_v6(ctx);
1672} 1651}
1673 1652
@@ -1687,7 +1666,6 @@ static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1687 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); 1666 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size);
1688 spin_unlock_irqrestore(&dev->irqlock, flags); 1667 spin_unlock_irqrestore(&dev->irqlock, flags);
1689 dev->curr_ctx = ctx->num; 1668 dev->curr_ctx = ctx->num;
1690 s5p_mfc_clean_ctx_int_flags(ctx);
1691 s5p_mfc_init_encode_v6(ctx); 1669 s5p_mfc_init_encode_v6(ctx);
1692} 1670}
1693 1671
@@ -1707,7 +1685,6 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1707 } 1685 }
1708 1686
1709 dev->curr_ctx = ctx->num; 1687 dev->curr_ctx = ctx->num;
1710 s5p_mfc_clean_ctx_int_flags(ctx);
1711 ret = s5p_mfc_set_dec_frame_buffer_v6(ctx); 1688 ret = s5p_mfc_set_dec_frame_buffer_v6(ctx);
1712 if (ret) { 1689 if (ret) {
1713 mfc_err("Failed to alloc frame mem.\n"); 1690 mfc_err("Failed to alloc frame mem.\n");
@@ -1722,7 +1699,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
1722 int ret; 1699 int ret;
1723 1700
1724 dev->curr_ctx = ctx->num; 1701 dev->curr_ctx = ctx->num;
1725 s5p_mfc_clean_ctx_int_flags(ctx);
1726 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); 1702 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
1727 if (ret) { 1703 if (ret) {
1728 mfc_err("Failed to alloc frame mem.\n"); 1704 mfc_err("Failed to alloc frame mem.\n");
@@ -1771,6 +1747,8 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
1771 * Now obtaining frames from MFC buffer */ 1747 * Now obtaining frames from MFC buffer */
1772 1748
1773 s5p_mfc_clock_on(); 1749 s5p_mfc_clock_on();
1750 s5p_mfc_clean_ctx_int_flags(ctx);
1751
1774 if (ctx->type == MFCINST_DECODER) { 1752 if (ctx->type == MFCINST_DECODER) {
1775 switch (ctx->state) { 1753 switch (ctx->state) {
1776 case MFCINST_FINISHING: 1754 case MFCINST_FINISHING:
@@ -1780,12 +1758,10 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
1780 ret = s5p_mfc_run_dec_frame(ctx); 1758 ret = s5p_mfc_run_dec_frame(ctx);
1781 break; 1759 break;
1782 case MFCINST_INIT: 1760 case MFCINST_INIT:
1783 s5p_mfc_clean_ctx_int_flags(ctx);
1784 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, 1761 ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1785 ctx); 1762 ctx);
1786 break; 1763 break;
1787 case MFCINST_RETURN_INST: 1764 case MFCINST_RETURN_INST:
1788 s5p_mfc_clean_ctx_int_flags(ctx);
1789 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, 1765 ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1790 ctx); 1766 ctx);
1791 break; 1767 break;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 826c48945bf5..5f97a3398c11 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -13,9 +13,7 @@
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#ifdef CONFIG_PM_RUNTIME
17#include <linux/pm_runtime.h> 16#include <linux/pm_runtime.h>
18#endif
19#include "s5p_mfc_common.h" 17#include "s5p_mfc_common.h"
20#include "s5p_mfc_debug.h" 18#include "s5p_mfc_debug.h"
21#include "s5p_mfc_pm.h" 19#include "s5p_mfc_pm.h"
@@ -67,7 +65,7 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
67 } 65 }
68 66
69 atomic_set(&pm->power, 0); 67 atomic_set(&pm->power, 0);
70#ifdef CONFIG_PM_RUNTIME 68#ifdef CONFIG_PM
71 pm->device = &dev->plat_dev->dev; 69 pm->device = &dev->plat_dev->dev;
72 pm_runtime_enable(pm->device); 70 pm_runtime_enable(pm->device);
73#endif 71#endif
@@ -93,7 +91,7 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
93 } 91 }
94 clk_unprepare(pm->clock_gate); 92 clk_unprepare(pm->clock_gate);
95 clk_put(pm->clock_gate); 93 clk_put(pm->clock_gate);
96#ifdef CONFIG_PM_RUNTIME 94#ifdef CONFIG_PM
97 pm_runtime_disable(pm->device); 95 pm_runtime_disable(pm->device);
98#endif 96#endif
99} 97}
@@ -120,7 +118,7 @@ void s5p_mfc_clock_off(void)
120 118
121int s5p_mfc_power_on(void) 119int s5p_mfc_power_on(void)
122{ 120{
123#ifdef CONFIG_PM_RUNTIME 121#ifdef CONFIG_PM
124 return pm_runtime_get_sync(pm->device); 122 return pm_runtime_get_sync(pm->device);
125#else 123#else
126 atomic_set(&pm->power, 1); 124 atomic_set(&pm->power, 1);
@@ -130,7 +128,7 @@ int s5p_mfc_power_on(void)
130 128
131int s5p_mfc_power_off(void) 129int s5p_mfc_power_off(void)
132{ 130{
133#ifdef CONFIG_PM_RUNTIME 131#ifdef CONFIG_PM
134 return pm_runtime_put_sync(pm->device); 132 return pm_runtime_put_sync(pm->device);
135#else 133#else
136 atomic_set(&pm->power, 0); 134 atomic_set(&pm->power, 0);
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 37c8bd694c5f..1d1ef211e113 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -660,7 +660,7 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
660 memset(fmt, 0, sizeof(*fmt)); 660 memset(fmt, 0, sizeof(*fmt));
661 fmt->width = t->hact.end - t->hact.beg; 661 fmt->width = t->hact.end - t->hact.beg;
662 fmt->height = t->vact[0].end - t->vact[0].beg; 662 fmt->height = t->vact[0].end - t->vact[0].beg;
663 fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */ 663 fmt->code = MEDIA_BUS_FMT_FIXED; /* means RGB888 */
664 fmt->colorspace = V4L2_COLORSPACE_SRGB; 664 fmt->colorspace = V4L2_COLORSPACE_SRGB;
665 if (t->interlaced) { 665 if (t->interlaced) {
666 fmt->field = V4L2_FIELD_INTERLACED; 666 fmt->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 72cf892dd008..46f4d56aaa1b 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -170,7 +170,7 @@ static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
170 /* all modes are 720 pixels wide */ 170 /* all modes are 720 pixels wide */
171 fmt->width = 720; 171 fmt->width = 720;
172 fmt->height = sdev->fmt->height; 172 fmt->height = sdev->fmt->height;
173 fmt->code = V4L2_MBUS_FMT_FIXED; 173 fmt->code = MEDIA_BUS_FMT_FIXED;
174 fmt->field = V4L2_FIELD_INTERLACED; 174 fmt->field = V4L2_FIELD_INTERLACED;
175 fmt->colorspace = V4L2_COLORSPACE_JPEG; 175 fmt->colorspace = V4L2_COLORSPACE_JPEG;
176 return 0; 176 return 0;
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index e5f1d4c14f2c..154ef0b6b8ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -396,7 +396,8 @@ static int sh_vou_querycap(struct file *file, void *priv,
396 dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); 396 dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
397 397
398 strlcpy(cap->card, "SuperH VOU", sizeof(cap->card)); 398 strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
399 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 399 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
400 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
400 return 0; 401 return 0;
401} 402}
402 403
@@ -680,7 +681,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
680 struct sh_vou_geometry geo; 681 struct sh_vou_geometry geo;
681 struct v4l2_mbus_framefmt mbfmt = { 682 struct v4l2_mbus_framefmt mbfmt = {
682 /* Revisit: is this the correct code? */ 683 /* Revisit: is this the correct code? */
683 .code = V4L2_MBUS_FMT_YUYV8_2X8, 684 .code = MEDIA_BUS_FMT_YUYV8_2X8,
684 .field = V4L2_FIELD_INTERLACED, 685 .field = V4L2_FIELD_INTERLACED,
685 .colorspace = V4L2_COLORSPACE_SMPTE170M, 686 .colorspace = V4L2_COLORSPACE_SMPTE170M,
686 }; 687 };
@@ -733,7 +734,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
733 /* Sanity checks */ 734 /* Sanity checks */
734 if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || 735 if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
735 (unsigned)mbfmt.height > img_height_max || 736 (unsigned)mbfmt.height > img_height_max ||
736 mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) 737 mbfmt.code != MEDIA_BUS_FMT_YUYV8_2X8)
737 return -EIO; 738 return -EIO;
738 739
739 if (mbfmt.width != geo.output.width || 740 if (mbfmt.width != geo.output.width ||
@@ -943,7 +944,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
943 struct sh_vou_geometry geo; 944 struct sh_vou_geometry geo;
944 struct v4l2_mbus_framefmt mbfmt = { 945 struct v4l2_mbus_framefmt mbfmt = {
945 /* Revisit: is this the correct code? */ 946 /* Revisit: is this the correct code? */
946 .code = V4L2_MBUS_FMT_YUYV8_2X8, 947 .code = MEDIA_BUS_FMT_YUYV8_2X8,
947 .field = V4L2_FIELD_INTERLACED, 948 .field = V4L2_FIELD_INTERLACED,
948 .colorspace = V4L2_COLORSPACE_SMPTE170M, 949 .colorspace = V4L2_COLORSPACE_SMPTE170M,
949 }; 950 };
@@ -994,7 +995,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
994 /* Sanity checks */ 995 /* Sanity checks */
995 if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || 996 if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
996 (unsigned)mbfmt.height > img_height_max || 997 (unsigned)mbfmt.height > img_height_max ||
997 mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) 998 mbfmt.code != MEDIA_BUS_FMT_YUYV8_2X8)
998 return -EIO; 999 return -EIO;
999 1000
1000 geo.output.width = mbfmt.width; 1001 geo.output.width = mbfmt.width;
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index c5291b001057..ee5650f4ea2d 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -105,25 +105,25 @@ static u32 isi_readl(struct atmel_isi *isi, u32 reg)
105} 105}
106 106
107static int configure_geometry(struct atmel_isi *isi, u32 width, 107static int configure_geometry(struct atmel_isi *isi, u32 width,
108 u32 height, enum v4l2_mbus_pixelcode code) 108 u32 height, u32 code)
109{ 109{
110 u32 cfg2, cr; 110 u32 cfg2, cr;
111 111
112 switch (code) { 112 switch (code) {
113 /* YUV, including grey */ 113 /* YUV, including grey */
114 case V4L2_MBUS_FMT_Y8_1X8: 114 case MEDIA_BUS_FMT_Y8_1X8:
115 cr = ISI_CFG2_GRAYSCALE; 115 cr = ISI_CFG2_GRAYSCALE;
116 break; 116 break;
117 case V4L2_MBUS_FMT_VYUY8_2X8: 117 case MEDIA_BUS_FMT_VYUY8_2X8:
118 cr = ISI_CFG2_YCC_SWAP_MODE_3; 118 cr = ISI_CFG2_YCC_SWAP_MODE_3;
119 break; 119 break;
120 case V4L2_MBUS_FMT_UYVY8_2X8: 120 case MEDIA_BUS_FMT_UYVY8_2X8:
121 cr = ISI_CFG2_YCC_SWAP_MODE_2; 121 cr = ISI_CFG2_YCC_SWAP_MODE_2;
122 break; 122 break;
123 case V4L2_MBUS_FMT_YVYU8_2X8: 123 case MEDIA_BUS_FMT_YVYU8_2X8:
124 cr = ISI_CFG2_YCC_SWAP_MODE_1; 124 cr = ISI_CFG2_YCC_SWAP_MODE_1;
125 break; 125 break;
126 case V4L2_MBUS_FMT_YUYV8_2X8: 126 case MEDIA_BUS_FMT_YUYV8_2X8:
127 cr = ISI_CFG2_YCC_SWAP_DEFAULT; 127 cr = ISI_CFG2_YCC_SWAP_DEFAULT;
128 break; 128 break;
129 /* RGB, TODO */ 129 /* RGB, TODO */
@@ -645,7 +645,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
645 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 645 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
646 int formats = 0, ret; 646 int formats = 0, ret;
647 /* sensor format */ 647 /* sensor format */
648 enum v4l2_mbus_pixelcode code; 648 u32 code;
649 /* soc camera host format */ 649 /* soc camera host format */
650 const struct soc_mbus_pixelfmt *fmt; 650 const struct soc_mbus_pixelfmt *fmt;
651 651
@@ -670,10 +670,10 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
670 } 670 }
671 671
672 switch (code) { 672 switch (code) {
673 case V4L2_MBUS_FMT_UYVY8_2X8: 673 case MEDIA_BUS_FMT_UYVY8_2X8:
674 case V4L2_MBUS_FMT_VYUY8_2X8: 674 case MEDIA_BUS_FMT_VYUY8_2X8:
675 case V4L2_MBUS_FMT_YUYV8_2X8: 675 case MEDIA_BUS_FMT_YUYV8_2X8:
676 case V4L2_MBUS_FMT_YVYU8_2X8: 676 case MEDIA_BUS_FMT_YVYU8_2X8:
677 formats++; 677 formats++;
678 if (xlate) { 678 if (xlate) {
679 xlate->host_fmt = &isi_camera_formats[0]; 679 xlate->host_fmt = &isi_camera_formats[0];
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 2347612a4cc1..ce72bd26a6ac 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -211,7 +211,7 @@ struct emma_prp_resize {
211 211
212/* prp configuration for a client-host fmt pair */ 212/* prp configuration for a client-host fmt pair */
213struct mx2_fmt_cfg { 213struct mx2_fmt_cfg {
214 enum v4l2_mbus_pixelcode in_fmt; 214 u32 in_fmt;
215 u32 out_fmt; 215 u32 out_fmt;
216 struct mx2_prp_cfg cfg; 216 struct mx2_prp_cfg cfg;
217}; 217};
@@ -309,7 +309,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
309 } 309 }
310 }, 310 },
311 { 311 {
312 .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8, 312 .in_fmt = MEDIA_BUS_FMT_UYVY8_2X8,
313 .out_fmt = V4L2_PIX_FMT_YUYV, 313 .out_fmt = V4L2_PIX_FMT_YUYV,
314 .cfg = { 314 .cfg = {
315 .channel = 1, 315 .channel = 1,
@@ -323,7 +323,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
323 } 323 }
324 }, 324 },
325 { 325 {
326 .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8, 326 .in_fmt = MEDIA_BUS_FMT_YUYV8_2X8,
327 .out_fmt = V4L2_PIX_FMT_YUYV, 327 .out_fmt = V4L2_PIX_FMT_YUYV,
328 .cfg = { 328 .cfg = {
329 .channel = 1, 329 .channel = 1,
@@ -337,7 +337,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
337 } 337 }
338 }, 338 },
339 { 339 {
340 .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8, 340 .in_fmt = MEDIA_BUS_FMT_YUYV8_2X8,
341 .out_fmt = V4L2_PIX_FMT_YUV420, 341 .out_fmt = V4L2_PIX_FMT_YUV420,
342 .cfg = { 342 .cfg = {
343 .channel = 2, 343 .channel = 2,
@@ -351,7 +351,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
351 } 351 }
352 }, 352 },
353 { 353 {
354 .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8, 354 .in_fmt = MEDIA_BUS_FMT_UYVY8_2X8,
355 .out_fmt = V4L2_PIX_FMT_YUV420, 355 .out_fmt = V4L2_PIX_FMT_YUV420,
356 .cfg = { 356 .cfg = {
357 .channel = 2, 357 .channel = 2,
@@ -366,9 +366,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
366 }, 366 },
367}; 367};
368 368
369static struct mx2_fmt_cfg *mx27_emma_prp_get_format( 369static struct mx2_fmt_cfg *mx27_emma_prp_get_format(u32 in_fmt, u32 out_fmt)
370 enum v4l2_mbus_pixelcode in_fmt,
371 u32 out_fmt)
372{ 370{
373 int i; 371 int i;
374 372
@@ -945,7 +943,7 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
945 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 943 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
946 const struct soc_mbus_pixelfmt *fmt; 944 const struct soc_mbus_pixelfmt *fmt;
947 struct device *dev = icd->parent; 945 struct device *dev = icd->parent;
948 enum v4l2_mbus_pixelcode code; 946 u32 code;
949 int ret, formats = 0; 947 int ret, formats = 0;
950 948
951 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 949 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -959,8 +957,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
959 return 0; 957 return 0;
960 } 958 }
961 959
962 if (code == V4L2_MBUS_FMT_YUYV8_2X8 || 960 if (code == MEDIA_BUS_FMT_YUYV8_2X8 ||
963 code == V4L2_MBUS_FMT_UYVY8_2X8) { 961 code == MEDIA_BUS_FMT_UYVY8_2X8) {
964 formats++; 962 formats++;
965 if (xlate) { 963 if (xlate) {
966 /* 964 /*
@@ -968,7 +966,7 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
968 * soc_mediabus.c 966 * soc_mediabus.c
969 */ 967 */
970 xlate->host_fmt = 968 xlate->host_fmt =
971 soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_1_5X8); 969 soc_mbus_get_fmtdesc(MEDIA_BUS_FMT_YUYV8_1_5X8);
972 xlate->code = code; 970 xlate->code = code;
973 dev_dbg(dev, "Providing host format %s for sensor code %d\n", 971 dev_dbg(dev, "Providing host format %s for sensor code %d\n",
974 xlate->host_fmt->name, code); 972 xlate->host_fmt->name, code);
@@ -976,11 +974,11 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
976 } 974 }
977 } 975 }
978 976
979 if (code == V4L2_MBUS_FMT_UYVY8_2X8) { 977 if (code == MEDIA_BUS_FMT_UYVY8_2X8) {
980 formats++; 978 formats++;
981 if (xlate) { 979 if (xlate) {
982 xlate->host_fmt = 980 xlate->host_fmt =
983 soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8); 981 soc_mbus_get_fmtdesc(MEDIA_BUS_FMT_YUYV8_2X8);
984 xlate->code = code; 982 xlate->code = code;
985 dev_dbg(dev, "Providing host format %s for sensor code %d\n", 983 dev_dbg(dev, "Providing host format %s for sensor code %d\n",
986 xlate->host_fmt->name, code); 984 xlate->host_fmt->name, code);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 7696a873510d..8e52ccce66de 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -656,7 +656,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
656 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 656 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
657 struct device *dev = icd->parent; 657 struct device *dev = icd->parent;
658 int formats = 0, ret; 658 int formats = 0, ret;
659 enum v4l2_mbus_pixelcode code; 659 u32 code;
660 const struct soc_mbus_pixelfmt *fmt; 660 const struct soc_mbus_pixelfmt *fmt;
661 661
662 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 662 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -677,7 +677,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
677 return 0; 677 return 0;
678 678
679 switch (code) { 679 switch (code) {
680 case V4L2_MBUS_FMT_SBGGR10_1X10: 680 case MEDIA_BUS_FMT_SBGGR10_1X10:
681 formats++; 681 formats++;
682 if (xlate) { 682 if (xlate) {
683 xlate->host_fmt = &mx3_camera_formats[0]; 683 xlate->host_fmt = &mx3_camera_formats[0];
@@ -687,7 +687,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
687 mx3_camera_formats[0].name, code); 687 mx3_camera_formats[0].name, code);
688 } 688 }
689 break; 689 break;
690 case V4L2_MBUS_FMT_Y10_1X10: 690 case MEDIA_BUS_FMT_Y10_1X10:
691 formats++; 691 formats++;
692 if (xlate) { 692 if (xlate) {
693 xlate->host_fmt = &mx3_camera_formats[1]; 693 xlate->host_fmt = &mx3_camera_formats[1];
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 74ce8b6b79fa..e6b93281f246 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -140,7 +140,7 @@
140/* buffer for one video frame */ 140/* buffer for one video frame */
141struct omap1_cam_buf { 141struct omap1_cam_buf {
142 struct videobuf_buffer vb; 142 struct videobuf_buffer vb;
143 enum v4l2_mbus_pixelcode code; 143 u32 code;
144 int inwork; 144 int inwork;
145 struct scatterlist *sgbuf; 145 struct scatterlist *sgbuf;
146 int sgcount; 146 int sgcount;
@@ -980,7 +980,7 @@ static void omap1_cam_clock_stop(struct soc_camera_host *ici)
980/* Duplicate standard formats based on host capability of byte swapping */ 980/* Duplicate standard formats based on host capability of byte swapping */
981static const struct soc_mbus_lookup omap1_cam_formats[] = { 981static const struct soc_mbus_lookup omap1_cam_formats[] = {
982{ 982{
983 .code = V4L2_MBUS_FMT_UYVY8_2X8, 983 .code = MEDIA_BUS_FMT_UYVY8_2X8,
984 .fmt = { 984 .fmt = {
985 .fourcc = V4L2_PIX_FMT_YUYV, 985 .fourcc = V4L2_PIX_FMT_YUYV,
986 .name = "YUYV", 986 .name = "YUYV",
@@ -990,7 +990,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
990 .layout = SOC_MBUS_LAYOUT_PACKED, 990 .layout = SOC_MBUS_LAYOUT_PACKED,
991 }, 991 },
992}, { 992}, {
993 .code = V4L2_MBUS_FMT_VYUY8_2X8, 993 .code = MEDIA_BUS_FMT_VYUY8_2X8,
994 .fmt = { 994 .fmt = {
995 .fourcc = V4L2_PIX_FMT_YVYU, 995 .fourcc = V4L2_PIX_FMT_YVYU,
996 .name = "YVYU", 996 .name = "YVYU",
@@ -1000,7 +1000,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1000 .layout = SOC_MBUS_LAYOUT_PACKED, 1000 .layout = SOC_MBUS_LAYOUT_PACKED,
1001 }, 1001 },
1002}, { 1002}, {
1003 .code = V4L2_MBUS_FMT_YUYV8_2X8, 1003 .code = MEDIA_BUS_FMT_YUYV8_2X8,
1004 .fmt = { 1004 .fmt = {
1005 .fourcc = V4L2_PIX_FMT_UYVY, 1005 .fourcc = V4L2_PIX_FMT_UYVY,
1006 .name = "UYVY", 1006 .name = "UYVY",
@@ -1010,7 +1010,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1010 .layout = SOC_MBUS_LAYOUT_PACKED, 1010 .layout = SOC_MBUS_LAYOUT_PACKED,
1011 }, 1011 },
1012}, { 1012}, {
1013 .code = V4L2_MBUS_FMT_YVYU8_2X8, 1013 .code = MEDIA_BUS_FMT_YVYU8_2X8,
1014 .fmt = { 1014 .fmt = {
1015 .fourcc = V4L2_PIX_FMT_VYUY, 1015 .fourcc = V4L2_PIX_FMT_VYUY,
1016 .name = "VYUY", 1016 .name = "VYUY",
@@ -1020,7 +1020,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1020 .layout = SOC_MBUS_LAYOUT_PACKED, 1020 .layout = SOC_MBUS_LAYOUT_PACKED,
1021 }, 1021 },
1022}, { 1022}, {
1023 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, 1023 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
1024 .fmt = { 1024 .fmt = {
1025 .fourcc = V4L2_PIX_FMT_RGB555, 1025 .fourcc = V4L2_PIX_FMT_RGB555,
1026 .name = "RGB555", 1026 .name = "RGB555",
@@ -1030,7 +1030,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1030 .layout = SOC_MBUS_LAYOUT_PACKED, 1030 .layout = SOC_MBUS_LAYOUT_PACKED,
1031 }, 1031 },
1032}, { 1032}, {
1033 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, 1033 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
1034 .fmt = { 1034 .fmt = {
1035 .fourcc = V4L2_PIX_FMT_RGB555X, 1035 .fourcc = V4L2_PIX_FMT_RGB555X,
1036 .name = "RGB555X", 1036 .name = "RGB555X",
@@ -1040,7 +1040,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1040 .layout = SOC_MBUS_LAYOUT_PACKED, 1040 .layout = SOC_MBUS_LAYOUT_PACKED,
1041 }, 1041 },
1042}, { 1042}, {
1043 .code = V4L2_MBUS_FMT_RGB565_2X8_BE, 1043 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
1044 .fmt = { 1044 .fmt = {
1045 .fourcc = V4L2_PIX_FMT_RGB565, 1045 .fourcc = V4L2_PIX_FMT_RGB565,
1046 .name = "RGB565", 1046 .name = "RGB565",
@@ -1050,7 +1050,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
1050 .layout = SOC_MBUS_LAYOUT_PACKED, 1050 .layout = SOC_MBUS_LAYOUT_PACKED,
1051 }, 1051 },
1052}, { 1052}, {
1053 .code = V4L2_MBUS_FMT_RGB565_2X8_LE, 1053 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
1054 .fmt = { 1054 .fmt = {
1055 .fourcc = V4L2_PIX_FMT_RGB565X, 1055 .fourcc = V4L2_PIX_FMT_RGB565X,
1056 .name = "RGB565X", 1056 .name = "RGB565X",
@@ -1068,7 +1068,7 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
1068 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 1068 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
1069 struct device *dev = icd->parent; 1069 struct device *dev = icd->parent;
1070 int formats = 0, ret; 1070 int formats = 0, ret;
1071 enum v4l2_mbus_pixelcode code; 1071 u32 code;
1072 const struct soc_mbus_pixelfmt *fmt; 1072 const struct soc_mbus_pixelfmt *fmt;
1073 1073
1074 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 1074 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1088,14 +1088,14 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
1088 return 0; 1088 return 0;
1089 1089
1090 switch (code) { 1090 switch (code) {
1091 case V4L2_MBUS_FMT_YUYV8_2X8: 1091 case MEDIA_BUS_FMT_YUYV8_2X8:
1092 case V4L2_MBUS_FMT_YVYU8_2X8: 1092 case MEDIA_BUS_FMT_YVYU8_2X8:
1093 case V4L2_MBUS_FMT_UYVY8_2X8: 1093 case MEDIA_BUS_FMT_UYVY8_2X8:
1094 case V4L2_MBUS_FMT_VYUY8_2X8: 1094 case MEDIA_BUS_FMT_VYUY8_2X8:
1095 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: 1095 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
1096 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 1096 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
1097 case V4L2_MBUS_FMT_RGB565_2X8_BE: 1097 case MEDIA_BUS_FMT_RGB565_2X8_BE:
1098 case V4L2_MBUS_FMT_RGB565_2X8_LE: 1098 case MEDIA_BUS_FMT_RGB565_2X8_LE:
1099 formats++; 1099 formats++;
1100 if (xlate) { 1100 if (xlate) {
1101 xlate->host_fmt = soc_mbus_find_fmtdesc(code, 1101 xlate->host_fmt = soc_mbus_find_fmtdesc(code,
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 66178fc9f9eb..951226af0eba 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -187,7 +187,7 @@ struct pxa_cam_dma {
187struct pxa_buffer { 187struct pxa_buffer {
188 /* common v4l buffer stuff -- must be first */ 188 /* common v4l buffer stuff -- must be first */
189 struct videobuf_buffer vb; 189 struct videobuf_buffer vb;
190 enum v4l2_mbus_pixelcode code; 190 u32 code;
191 /* our descriptor lists for Y, U and V channels */ 191 /* our descriptor lists for Y, U and V channels */
192 struct pxa_cam_dma dmas[3]; 192 struct pxa_cam_dma dmas[3];
193 int inwork; 193 int inwork;
@@ -1253,7 +1253,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
1253 struct device *dev = icd->parent; 1253 struct device *dev = icd->parent;
1254 int formats = 0, ret; 1254 int formats = 0, ret;
1255 struct pxa_cam *cam; 1255 struct pxa_cam *cam;
1256 enum v4l2_mbus_pixelcode code; 1256 u32 code;
1257 const struct soc_mbus_pixelfmt *fmt; 1257 const struct soc_mbus_pixelfmt *fmt;
1258 1258
1259 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 1259 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1283,7 +1283,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
1283 } 1283 }
1284 1284
1285 switch (code) { 1285 switch (code) {
1286 case V4L2_MBUS_FMT_UYVY8_2X8: 1286 case MEDIA_BUS_FMT_UYVY8_2X8:
1287 formats++; 1287 formats++;
1288 if (xlate) { 1288 if (xlate) {
1289 xlate->host_fmt = &pxa_camera_formats[0]; 1289 xlate->host_fmt = &pxa_camera_formats[0];
@@ -1292,11 +1292,11 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
1292 dev_dbg(dev, "Providing format %s using code %d\n", 1292 dev_dbg(dev, "Providing format %s using code %d\n",
1293 pxa_camera_formats[0].name, code); 1293 pxa_camera_formats[0].name, code);
1294 } 1294 }
1295 case V4L2_MBUS_FMT_VYUY8_2X8: 1295 case MEDIA_BUS_FMT_VYUY8_2X8:
1296 case V4L2_MBUS_FMT_YUYV8_2X8: 1296 case MEDIA_BUS_FMT_YUYV8_2X8:
1297 case V4L2_MBUS_FMT_YVYU8_2X8: 1297 case MEDIA_BUS_FMT_YVYU8_2X8:
1298 case V4L2_MBUS_FMT_RGB565_2X8_LE: 1298 case MEDIA_BUS_FMT_RGB565_2X8_LE:
1299 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: 1299 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
1300 if (xlate) 1300 if (xlate)
1301 dev_dbg(dev, "Providing format %s packed\n", 1301 dev_dbg(dev, "Providing format %s packed\n",
1302 fmt->name); 1302 fmt->name);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 20defcb8b31b..8d8438b10b87 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -272,16 +272,16 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
272 272
273 /* input interface */ 273 /* input interface */
274 switch (icd->current_fmt->code) { 274 switch (icd->current_fmt->code) {
275 case V4L2_MBUS_FMT_YUYV8_1X16: 275 case MEDIA_BUS_FMT_YUYV8_1X16:
276 /* BT.601/BT.1358 16bit YCbCr422 */ 276 /* BT.601/BT.1358 16bit YCbCr422 */
277 vnmc |= VNMC_INF_YUV16; 277 vnmc |= VNMC_INF_YUV16;
278 break; 278 break;
279 case V4L2_MBUS_FMT_YUYV8_2X8: 279 case MEDIA_BUS_FMT_YUYV8_2X8:
280 /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */ 280 /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
281 vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ? 281 vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
282 VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601; 282 VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
283 break; 283 break;
284 case V4L2_MBUS_FMT_YUYV10_2X10: 284 case MEDIA_BUS_FMT_YUYV10_2X10:
285 /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */ 285 /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
286 vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ? 286 vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
287 VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601; 287 VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
@@ -921,7 +921,7 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
921 int ret, k, n; 921 int ret, k, n;
922 int formats = 0; 922 int formats = 0;
923 struct rcar_vin_cam *cam; 923 struct rcar_vin_cam *cam;
924 enum v4l2_mbus_pixelcode code; 924 u32 code;
925 const struct soc_mbus_pixelfmt *fmt; 925 const struct soc_mbus_pixelfmt *fmt;
926 926
927 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 927 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1010,9 +1010,9 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
1010 cam->extra_fmt = NULL; 1010 cam->extra_fmt = NULL;
1011 1011
1012 switch (code) { 1012 switch (code) {
1013 case V4L2_MBUS_FMT_YUYV8_1X16: 1013 case MEDIA_BUS_FMT_YUYV8_1X16:
1014 case V4L2_MBUS_FMT_YUYV8_2X8: 1014 case MEDIA_BUS_FMT_YUYV8_2X8:
1015 case V4L2_MBUS_FMT_YUYV10_2X10: 1015 case MEDIA_BUS_FMT_YUYV10_2X10:
1016 if (cam->extra_fmt) 1016 if (cam->extra_fmt)
1017 break; 1017 break;
1018 1018
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 20ad4a571d37..5f58ed995320 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -149,7 +149,7 @@ struct sh_mobile_ceu_cam {
149 /* Camera cropping rectangle */ 149 /* Camera cropping rectangle */
150 struct v4l2_rect rect; 150 struct v4l2_rect rect;
151 const struct soc_mbus_pixelfmt *extra_fmt; 151 const struct soc_mbus_pixelfmt *extra_fmt;
152 enum v4l2_mbus_pixelcode code; 152 u32 code;
153}; 153};
154 154
155static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) 155static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
@@ -861,16 +861,16 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
861 case V4L2_PIX_FMT_NV16: 861 case V4L2_PIX_FMT_NV16:
862 case V4L2_PIX_FMT_NV61: 862 case V4L2_PIX_FMT_NV61:
863 switch (cam->code) { 863 switch (cam->code) {
864 case V4L2_MBUS_FMT_UYVY8_2X8: 864 case MEDIA_BUS_FMT_UYVY8_2X8:
865 value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ 865 value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
866 break; 866 break;
867 case V4L2_MBUS_FMT_VYUY8_2X8: 867 case MEDIA_BUS_FMT_VYUY8_2X8:
868 value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ 868 value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
869 break; 869 break;
870 case V4L2_MBUS_FMT_YUYV8_2X8: 870 case MEDIA_BUS_FMT_YUYV8_2X8:
871 value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ 871 value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
872 break; 872 break;
873 case V4L2_MBUS_FMT_YVYU8_2X8: 873 case MEDIA_BUS_FMT_YVYU8_2X8:
874 value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ 874 value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
875 break; 875 break;
876 default: 876 default:
@@ -1048,7 +1048,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
1048 int ret, k, n; 1048 int ret, k, n;
1049 int formats = 0; 1049 int formats = 0;
1050 struct sh_mobile_ceu_cam *cam; 1050 struct sh_mobile_ceu_cam *cam;
1051 enum v4l2_mbus_pixelcode code; 1051 u32 code;
1052 const struct soc_mbus_pixelfmt *fmt; 1052 const struct soc_mbus_pixelfmt *fmt;
1053 1053
1054 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); 1054 ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
@@ -1141,10 +1141,10 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
1141 cam->extra_fmt = NULL; 1141 cam->extra_fmt = NULL;
1142 1142
1143 switch (code) { 1143 switch (code) {
1144 case V4L2_MBUS_FMT_UYVY8_2X8: 1144 case MEDIA_BUS_FMT_UYVY8_2X8:
1145 case V4L2_MBUS_FMT_VYUY8_2X8: 1145 case MEDIA_BUS_FMT_VYUY8_2X8:
1146 case V4L2_MBUS_FMT_YUYV8_2X8: 1146 case MEDIA_BUS_FMT_YUYV8_2X8:
1147 case V4L2_MBUS_FMT_YVYU8_2X8: 1147 case MEDIA_BUS_FMT_YVYU8_2X8:
1148 if (cam->extra_fmt) 1148 if (cam->extra_fmt)
1149 break; 1149 break;
1150 1150
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index 05dd21a35d63..c738e27a75d7 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -59,28 +59,28 @@ static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
59 switch (pdata->type) { 59 switch (pdata->type) {
60 case SH_CSI2C: 60 case SH_CSI2C:
61 switch (mf->code) { 61 switch (mf->code) {
62 case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */ 62 case MEDIA_BUS_FMT_UYVY8_2X8: /* YUV422 */
63 case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */ 63 case MEDIA_BUS_FMT_YUYV8_1_5X8: /* YUV420 */
64 case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ 64 case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
65 case V4L2_MBUS_FMT_SBGGR8_1X8: 65 case MEDIA_BUS_FMT_SBGGR8_1X8:
66 case V4L2_MBUS_FMT_SGRBG8_1X8: 66 case MEDIA_BUS_FMT_SGRBG8_1X8:
67 break; 67 break;
68 default: 68 default:
69 /* All MIPI CSI-2 devices must support one of primary formats */ 69 /* All MIPI CSI-2 devices must support one of primary formats */
70 mf->code = V4L2_MBUS_FMT_YUYV8_2X8; 70 mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
71 } 71 }
72 break; 72 break;
73 case SH_CSI2I: 73 case SH_CSI2I:
74 switch (mf->code) { 74 switch (mf->code) {
75 case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ 75 case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
76 case V4L2_MBUS_FMT_SBGGR8_1X8: 76 case MEDIA_BUS_FMT_SBGGR8_1X8:
77 case V4L2_MBUS_FMT_SGRBG8_1X8: 77 case MEDIA_BUS_FMT_SGRBG8_1X8:
78 case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */ 78 case MEDIA_BUS_FMT_SBGGR10_1X10: /* RAW10 */
79 case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */ 79 case MEDIA_BUS_FMT_SBGGR12_1X12: /* RAW12 */
80 break; 80 break;
81 default: 81 default:
82 /* All MIPI CSI-2 devices must support one of primary formats */ 82 /* All MIPI CSI-2 devices must support one of primary formats */
83 mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; 83 mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
84 } 84 }
85 break; 85 break;
86 } 86 }
@@ -104,21 +104,21 @@ static int sh_csi2_s_fmt(struct v4l2_subdev *sd,
104 return -EINVAL; 104 return -EINVAL;
105 105
106 switch (mf->code) { 106 switch (mf->code) {
107 case V4L2_MBUS_FMT_UYVY8_2X8: 107 case MEDIA_BUS_FMT_UYVY8_2X8:
108 tmp |= 0x1e; /* YUV422 8 bit */ 108 tmp |= 0x1e; /* YUV422 8 bit */
109 break; 109 break;
110 case V4L2_MBUS_FMT_YUYV8_1_5X8: 110 case MEDIA_BUS_FMT_YUYV8_1_5X8:
111 tmp |= 0x18; /* YUV420 8 bit */ 111 tmp |= 0x18; /* YUV420 8 bit */
112 break; 112 break;
113 case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: 113 case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
114 tmp |= 0x21; /* RGB555 */ 114 tmp |= 0x21; /* RGB555 */
115 break; 115 break;
116 case V4L2_MBUS_FMT_RGB565_2X8_BE: 116 case MEDIA_BUS_FMT_RGB565_2X8_BE:
117 tmp |= 0x22; /* RGB565 */ 117 tmp |= 0x22; /* RGB565 */
118 break; 118 break;
119 case V4L2_MBUS_FMT_Y8_1X8: 119 case MEDIA_BUS_FMT_Y8_1X8:
120 case V4L2_MBUS_FMT_SBGGR8_1X8: 120 case MEDIA_BUS_FMT_SBGGR8_1X8:
121 case V4L2_MBUS_FMT_SGRBG8_1X8: 121 case MEDIA_BUS_FMT_SGRBG8_1X8:
122 tmp |= 0x2a; /* RAW8 */ 122 tmp |= 0x2a; /* RAW8 */
123 break; 123 break;
124 default: 124 default:
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 8e61b976da19..f4be2a1c659a 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -460,7 +460,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
460 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 460 struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
461 unsigned int i, fmts = 0, raw_fmts = 0; 461 unsigned int i, fmts = 0, raw_fmts = 0;
462 int ret; 462 int ret;
463 enum v4l2_mbus_pixelcode code; 463 u32 code;
464 464
465 while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code)) 465 while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
466 raw_fmts++; 466 raw_fmts++;
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index ceaddfb85e49..f2ce1ab06d53 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -62,7 +62,7 @@ static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
62}; 62};
63 63
64static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index, 64static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
65 enum v4l2_mbus_pixelcode *code) 65 u32 *code)
66{ 66{
67 struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); 67 struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
68 68
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
index dc02deca7563..1dbcd426683c 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -17,7 +17,7 @@
17 17
18static const struct soc_mbus_lookup mbus_fmt[] = { 18static const struct soc_mbus_lookup mbus_fmt[] = {
19{ 19{
20 .code = V4L2_MBUS_FMT_YUYV8_2X8, 20 .code = MEDIA_BUS_FMT_YUYV8_2X8,
21 .fmt = { 21 .fmt = {
22 .fourcc = V4L2_PIX_FMT_YUYV, 22 .fourcc = V4L2_PIX_FMT_YUYV,
23 .name = "YUYV", 23 .name = "YUYV",
@@ -27,7 +27,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
27 .layout = SOC_MBUS_LAYOUT_PACKED, 27 .layout = SOC_MBUS_LAYOUT_PACKED,
28 }, 28 },
29}, { 29}, {
30 .code = V4L2_MBUS_FMT_YVYU8_2X8, 30 .code = MEDIA_BUS_FMT_YVYU8_2X8,
31 .fmt = { 31 .fmt = {
32 .fourcc = V4L2_PIX_FMT_YVYU, 32 .fourcc = V4L2_PIX_FMT_YVYU,
33 .name = "YVYU", 33 .name = "YVYU",
@@ -37,7 +37,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
37 .layout = SOC_MBUS_LAYOUT_PACKED, 37 .layout = SOC_MBUS_LAYOUT_PACKED,
38 }, 38 },
39}, { 39}, {
40 .code = V4L2_MBUS_FMT_UYVY8_2X8, 40 .code = MEDIA_BUS_FMT_UYVY8_2X8,
41 .fmt = { 41 .fmt = {
42 .fourcc = V4L2_PIX_FMT_UYVY, 42 .fourcc = V4L2_PIX_FMT_UYVY,
43 .name = "UYVY", 43 .name = "UYVY",
@@ -47,7 +47,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
47 .layout = SOC_MBUS_LAYOUT_PACKED, 47 .layout = SOC_MBUS_LAYOUT_PACKED,
48 }, 48 },
49}, { 49}, {
50 .code = V4L2_MBUS_FMT_VYUY8_2X8, 50 .code = MEDIA_BUS_FMT_VYUY8_2X8,
51 .fmt = { 51 .fmt = {
52 .fourcc = V4L2_PIX_FMT_VYUY, 52 .fourcc = V4L2_PIX_FMT_VYUY,
53 .name = "VYUY", 53 .name = "VYUY",
@@ -57,7 +57,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
57 .layout = SOC_MBUS_LAYOUT_PACKED, 57 .layout = SOC_MBUS_LAYOUT_PACKED,
58 }, 58 },
59}, { 59}, {
60 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, 60 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
61 .fmt = { 61 .fmt = {
62 .fourcc = V4L2_PIX_FMT_RGB555, 62 .fourcc = V4L2_PIX_FMT_RGB555,
63 .name = "RGB555", 63 .name = "RGB555",
@@ -67,7 +67,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
67 .layout = SOC_MBUS_LAYOUT_PACKED, 67 .layout = SOC_MBUS_LAYOUT_PACKED,
68 }, 68 },
69}, { 69}, {
70 .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, 70 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
71 .fmt = { 71 .fmt = {
72 .fourcc = V4L2_PIX_FMT_RGB555X, 72 .fourcc = V4L2_PIX_FMT_RGB555X,
73 .name = "RGB555X", 73 .name = "RGB555X",
@@ -77,7 +77,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
77 .layout = SOC_MBUS_LAYOUT_PACKED, 77 .layout = SOC_MBUS_LAYOUT_PACKED,
78 }, 78 },
79}, { 79}, {
80 .code = V4L2_MBUS_FMT_RGB565_2X8_LE, 80 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
81 .fmt = { 81 .fmt = {
82 .fourcc = V4L2_PIX_FMT_RGB565, 82 .fourcc = V4L2_PIX_FMT_RGB565,
83 .name = "RGB565", 83 .name = "RGB565",
@@ -87,7 +87,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
87 .layout = SOC_MBUS_LAYOUT_PACKED, 87 .layout = SOC_MBUS_LAYOUT_PACKED,
88 }, 88 },
89}, { 89}, {
90 .code = V4L2_MBUS_FMT_RGB565_2X8_BE, 90 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
91 .fmt = { 91 .fmt = {
92 .fourcc = V4L2_PIX_FMT_RGB565X, 92 .fourcc = V4L2_PIX_FMT_RGB565X,
93 .name = "RGB565X", 93 .name = "RGB565X",
@@ -97,7 +97,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
97 .layout = SOC_MBUS_LAYOUT_PACKED, 97 .layout = SOC_MBUS_LAYOUT_PACKED,
98 }, 98 },
99}, { 99}, {
100 .code = V4L2_MBUS_FMT_RGB666_1X18, 100 .code = MEDIA_BUS_FMT_RGB666_1X18,
101 .fmt = { 101 .fmt = {
102 .fourcc = V4L2_PIX_FMT_RGB32, 102 .fourcc = V4L2_PIX_FMT_RGB32,
103 .name = "RGB666/32bpp", 103 .name = "RGB666/32bpp",
@@ -106,7 +106,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
106 .order = SOC_MBUS_ORDER_LE, 106 .order = SOC_MBUS_ORDER_LE,
107 }, 107 },
108}, { 108}, {
109 .code = V4L2_MBUS_FMT_RGB888_1X24, 109 .code = MEDIA_BUS_FMT_RGB888_1X24,
110 .fmt = { 110 .fmt = {
111 .fourcc = V4L2_PIX_FMT_RGB32, 111 .fourcc = V4L2_PIX_FMT_RGB32,
112 .name = "RGB888/32bpp", 112 .name = "RGB888/32bpp",
@@ -115,7 +115,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
115 .order = SOC_MBUS_ORDER_LE, 115 .order = SOC_MBUS_ORDER_LE,
116 }, 116 },
117}, { 117}, {
118 .code = V4L2_MBUS_FMT_RGB888_2X12_BE, 118 .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
119 .fmt = { 119 .fmt = {
120 .fourcc = V4L2_PIX_FMT_RGB32, 120 .fourcc = V4L2_PIX_FMT_RGB32,
121 .name = "RGB888/32bpp", 121 .name = "RGB888/32bpp",
@@ -124,7 +124,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
124 .order = SOC_MBUS_ORDER_BE, 124 .order = SOC_MBUS_ORDER_BE,
125 }, 125 },
126}, { 126}, {
127 .code = V4L2_MBUS_FMT_RGB888_2X12_LE, 127 .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
128 .fmt = { 128 .fmt = {
129 .fourcc = V4L2_PIX_FMT_RGB32, 129 .fourcc = V4L2_PIX_FMT_RGB32,
130 .name = "RGB888/32bpp", 130 .name = "RGB888/32bpp",
@@ -133,7 +133,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
133 .order = SOC_MBUS_ORDER_LE, 133 .order = SOC_MBUS_ORDER_LE,
134 }, 134 },
135}, { 135}, {
136 .code = V4L2_MBUS_FMT_SBGGR8_1X8, 136 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
137 .fmt = { 137 .fmt = {
138 .fourcc = V4L2_PIX_FMT_SBGGR8, 138 .fourcc = V4L2_PIX_FMT_SBGGR8,
139 .name = "Bayer 8 BGGR", 139 .name = "Bayer 8 BGGR",
@@ -143,7 +143,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
143 .layout = SOC_MBUS_LAYOUT_PACKED, 143 .layout = SOC_MBUS_LAYOUT_PACKED,
144 }, 144 },
145}, { 145}, {
146 .code = V4L2_MBUS_FMT_SBGGR10_1X10, 146 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
147 .fmt = { 147 .fmt = {
148 .fourcc = V4L2_PIX_FMT_SBGGR10, 148 .fourcc = V4L2_PIX_FMT_SBGGR10,
149 .name = "Bayer 10 BGGR", 149 .name = "Bayer 10 BGGR",
@@ -153,7 +153,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
153 .layout = SOC_MBUS_LAYOUT_PACKED, 153 .layout = SOC_MBUS_LAYOUT_PACKED,
154 }, 154 },
155}, { 155}, {
156 .code = V4L2_MBUS_FMT_Y8_1X8, 156 .code = MEDIA_BUS_FMT_Y8_1X8,
157 .fmt = { 157 .fmt = {
158 .fourcc = V4L2_PIX_FMT_GREY, 158 .fourcc = V4L2_PIX_FMT_GREY,
159 .name = "Grey", 159 .name = "Grey",
@@ -163,7 +163,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
163 .layout = SOC_MBUS_LAYOUT_PACKED, 163 .layout = SOC_MBUS_LAYOUT_PACKED,
164 }, 164 },
165}, { 165}, {
166 .code = V4L2_MBUS_FMT_Y10_1X10, 166 .code = MEDIA_BUS_FMT_Y10_1X10,
167 .fmt = { 167 .fmt = {
168 .fourcc = V4L2_PIX_FMT_Y10, 168 .fourcc = V4L2_PIX_FMT_Y10,
169 .name = "Grey 10bit", 169 .name = "Grey 10bit",
@@ -173,7 +173,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
173 .layout = SOC_MBUS_LAYOUT_PACKED, 173 .layout = SOC_MBUS_LAYOUT_PACKED,
174 }, 174 },
175}, { 175}, {
176 .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, 176 .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
177 .fmt = { 177 .fmt = {
178 .fourcc = V4L2_PIX_FMT_SBGGR10, 178 .fourcc = V4L2_PIX_FMT_SBGGR10,
179 .name = "Bayer 10 BGGR", 179 .name = "Bayer 10 BGGR",
@@ -183,7 +183,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
183 .layout = SOC_MBUS_LAYOUT_PACKED, 183 .layout = SOC_MBUS_LAYOUT_PACKED,
184 }, 184 },
185}, { 185}, {
186 .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, 186 .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
187 .fmt = { 187 .fmt = {
188 .fourcc = V4L2_PIX_FMT_SBGGR10, 188 .fourcc = V4L2_PIX_FMT_SBGGR10,
189 .name = "Bayer 10 BGGR", 189 .name = "Bayer 10 BGGR",
@@ -193,7 +193,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
193 .layout = SOC_MBUS_LAYOUT_PACKED, 193 .layout = SOC_MBUS_LAYOUT_PACKED,
194 }, 194 },
195}, { 195}, {
196 .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, 196 .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
197 .fmt = { 197 .fmt = {
198 .fourcc = V4L2_PIX_FMT_SBGGR10, 198 .fourcc = V4L2_PIX_FMT_SBGGR10,
199 .name = "Bayer 10 BGGR", 199 .name = "Bayer 10 BGGR",
@@ -203,7 +203,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
203 .layout = SOC_MBUS_LAYOUT_PACKED, 203 .layout = SOC_MBUS_LAYOUT_PACKED,
204 }, 204 },
205}, { 205}, {
206 .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, 206 .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
207 .fmt = { 207 .fmt = {
208 .fourcc = V4L2_PIX_FMT_SBGGR10, 208 .fourcc = V4L2_PIX_FMT_SBGGR10,
209 .name = "Bayer 10 BGGR", 209 .name = "Bayer 10 BGGR",
@@ -213,7 +213,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
213 .layout = SOC_MBUS_LAYOUT_PACKED, 213 .layout = SOC_MBUS_LAYOUT_PACKED,
214 }, 214 },
215}, { 215}, {
216 .code = V4L2_MBUS_FMT_JPEG_1X8, 216 .code = MEDIA_BUS_FMT_JPEG_1X8,
217 .fmt = { 217 .fmt = {
218 .fourcc = V4L2_PIX_FMT_JPEG, 218 .fourcc = V4L2_PIX_FMT_JPEG,
219 .name = "JPEG", 219 .name = "JPEG",
@@ -223,7 +223,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
223 .layout = SOC_MBUS_LAYOUT_PACKED, 223 .layout = SOC_MBUS_LAYOUT_PACKED,
224 }, 224 },
225}, { 225}, {
226 .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, 226 .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
227 .fmt = { 227 .fmt = {
228 .fourcc = V4L2_PIX_FMT_RGB444, 228 .fourcc = V4L2_PIX_FMT_RGB444,
229 .name = "RGB444", 229 .name = "RGB444",
@@ -233,7 +233,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
233 .layout = SOC_MBUS_LAYOUT_PACKED, 233 .layout = SOC_MBUS_LAYOUT_PACKED,
234 }, 234 },
235}, { 235}, {
236 .code = V4L2_MBUS_FMT_YUYV8_1_5X8, 236 .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
237 .fmt = { 237 .fmt = {
238 .fourcc = V4L2_PIX_FMT_YUV420, 238 .fourcc = V4L2_PIX_FMT_YUV420,
239 .name = "YUYV 4:2:0", 239 .name = "YUYV 4:2:0",
@@ -243,7 +243,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
243 .layout = SOC_MBUS_LAYOUT_PACKED, 243 .layout = SOC_MBUS_LAYOUT_PACKED,
244 }, 244 },
245}, { 245}, {
246 .code = V4L2_MBUS_FMT_YVYU8_1_5X8, 246 .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
247 .fmt = { 247 .fmt = {
248 .fourcc = V4L2_PIX_FMT_YVU420, 248 .fourcc = V4L2_PIX_FMT_YVU420,
249 .name = "YVYU 4:2:0", 249 .name = "YVYU 4:2:0",
@@ -253,7 +253,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
253 .layout = SOC_MBUS_LAYOUT_PACKED, 253 .layout = SOC_MBUS_LAYOUT_PACKED,
254 }, 254 },
255}, { 255}, {
256 .code = V4L2_MBUS_FMT_UYVY8_1X16, 256 .code = MEDIA_BUS_FMT_UYVY8_1X16,
257 .fmt = { 257 .fmt = {
258 .fourcc = V4L2_PIX_FMT_UYVY, 258 .fourcc = V4L2_PIX_FMT_UYVY,
259 .name = "UYVY 16bit", 259 .name = "UYVY 16bit",
@@ -263,7 +263,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
263 .layout = SOC_MBUS_LAYOUT_PACKED, 263 .layout = SOC_MBUS_LAYOUT_PACKED,
264 }, 264 },
265}, { 265}, {
266 .code = V4L2_MBUS_FMT_VYUY8_1X16, 266 .code = MEDIA_BUS_FMT_VYUY8_1X16,
267 .fmt = { 267 .fmt = {
268 .fourcc = V4L2_PIX_FMT_VYUY, 268 .fourcc = V4L2_PIX_FMT_VYUY,
269 .name = "VYUY 16bit", 269 .name = "VYUY 16bit",
@@ -273,7 +273,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
273 .layout = SOC_MBUS_LAYOUT_PACKED, 273 .layout = SOC_MBUS_LAYOUT_PACKED,
274 }, 274 },
275}, { 275}, {
276 .code = V4L2_MBUS_FMT_YUYV8_1X16, 276 .code = MEDIA_BUS_FMT_YUYV8_1X16,
277 .fmt = { 277 .fmt = {
278 .fourcc = V4L2_PIX_FMT_YUYV, 278 .fourcc = V4L2_PIX_FMT_YUYV,
279 .name = "YUYV 16bit", 279 .name = "YUYV 16bit",
@@ -283,7 +283,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
283 .layout = SOC_MBUS_LAYOUT_PACKED, 283 .layout = SOC_MBUS_LAYOUT_PACKED,
284 }, 284 },
285}, { 285}, {
286 .code = V4L2_MBUS_FMT_YVYU8_1X16, 286 .code = MEDIA_BUS_FMT_YVYU8_1X16,
287 .fmt = { 287 .fmt = {
288 .fourcc = V4L2_PIX_FMT_YVYU, 288 .fourcc = V4L2_PIX_FMT_YVYU,
289 .name = "YVYU 16bit", 289 .name = "YVYU 16bit",
@@ -293,7 +293,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
293 .layout = SOC_MBUS_LAYOUT_PACKED, 293 .layout = SOC_MBUS_LAYOUT_PACKED,
294 }, 294 },
295}, { 295}, {
296 .code = V4L2_MBUS_FMT_SGRBG8_1X8, 296 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
297 .fmt = { 297 .fmt = {
298 .fourcc = V4L2_PIX_FMT_SGRBG8, 298 .fourcc = V4L2_PIX_FMT_SGRBG8,
299 .name = "Bayer 8 GRBG", 299 .name = "Bayer 8 GRBG",
@@ -303,7 +303,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
303 .layout = SOC_MBUS_LAYOUT_PACKED, 303 .layout = SOC_MBUS_LAYOUT_PACKED,
304 }, 304 },
305}, { 305}, {
306 .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 306 .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
307 .fmt = { 307 .fmt = {
308 .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, 308 .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
309 .name = "Bayer 10 BGGR DPCM 8", 309 .name = "Bayer 10 BGGR DPCM 8",
@@ -313,7 +313,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
313 .layout = SOC_MBUS_LAYOUT_PACKED, 313 .layout = SOC_MBUS_LAYOUT_PACKED,
314 }, 314 },
315}, { 315}, {
316 .code = V4L2_MBUS_FMT_SGBRG10_1X10, 316 .code = MEDIA_BUS_FMT_SGBRG10_1X10,
317 .fmt = { 317 .fmt = {
318 .fourcc = V4L2_PIX_FMT_SGBRG10, 318 .fourcc = V4L2_PIX_FMT_SGBRG10,
319 .name = "Bayer 10 GBRG", 319 .name = "Bayer 10 GBRG",
@@ -323,7 +323,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
323 .layout = SOC_MBUS_LAYOUT_PACKED, 323 .layout = SOC_MBUS_LAYOUT_PACKED,
324 }, 324 },
325}, { 325}, {
326 .code = V4L2_MBUS_FMT_SGRBG10_1X10, 326 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
327 .fmt = { 327 .fmt = {
328 .fourcc = V4L2_PIX_FMT_SGRBG10, 328 .fourcc = V4L2_PIX_FMT_SGRBG10,
329 .name = "Bayer 10 GRBG", 329 .name = "Bayer 10 GRBG",
@@ -333,7 +333,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
333 .layout = SOC_MBUS_LAYOUT_PACKED, 333 .layout = SOC_MBUS_LAYOUT_PACKED,
334 }, 334 },
335}, { 335}, {
336 .code = V4L2_MBUS_FMT_SRGGB10_1X10, 336 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
337 .fmt = { 337 .fmt = {
338 .fourcc = V4L2_PIX_FMT_SRGGB10, 338 .fourcc = V4L2_PIX_FMT_SRGGB10,
339 .name = "Bayer 10 RGGB", 339 .name = "Bayer 10 RGGB",
@@ -343,7 +343,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
343 .layout = SOC_MBUS_LAYOUT_PACKED, 343 .layout = SOC_MBUS_LAYOUT_PACKED,
344 }, 344 },
345}, { 345}, {
346 .code = V4L2_MBUS_FMT_SBGGR12_1X12, 346 .code = MEDIA_BUS_FMT_SBGGR12_1X12,
347 .fmt = { 347 .fmt = {
348 .fourcc = V4L2_PIX_FMT_SBGGR12, 348 .fourcc = V4L2_PIX_FMT_SBGGR12,
349 .name = "Bayer 12 BGGR", 349 .name = "Bayer 12 BGGR",
@@ -353,7 +353,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
353 .layout = SOC_MBUS_LAYOUT_PACKED, 353 .layout = SOC_MBUS_LAYOUT_PACKED,
354 }, 354 },
355}, { 355}, {
356 .code = V4L2_MBUS_FMT_SGBRG12_1X12, 356 .code = MEDIA_BUS_FMT_SGBRG12_1X12,
357 .fmt = { 357 .fmt = {
358 .fourcc = V4L2_PIX_FMT_SGBRG12, 358 .fourcc = V4L2_PIX_FMT_SGBRG12,
359 .name = "Bayer 12 GBRG", 359 .name = "Bayer 12 GBRG",
@@ -363,7 +363,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
363 .layout = SOC_MBUS_LAYOUT_PACKED, 363 .layout = SOC_MBUS_LAYOUT_PACKED,
364 }, 364 },
365}, { 365}, {
366 .code = V4L2_MBUS_FMT_SGRBG12_1X12, 366 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
367 .fmt = { 367 .fmt = {
368 .fourcc = V4L2_PIX_FMT_SGRBG12, 368 .fourcc = V4L2_PIX_FMT_SGRBG12,
369 .name = "Bayer 12 GRBG", 369 .name = "Bayer 12 GRBG",
@@ -373,7 +373,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
373 .layout = SOC_MBUS_LAYOUT_PACKED, 373 .layout = SOC_MBUS_LAYOUT_PACKED,
374 }, 374 },
375}, { 375}, {
376 .code = V4L2_MBUS_FMT_SRGGB12_1X12, 376 .code = MEDIA_BUS_FMT_SRGGB12_1X12,
377 .fmt = { 377 .fmt = {
378 .fourcc = V4L2_PIX_FMT_SRGGB12, 378 .fourcc = V4L2_PIX_FMT_SRGGB12,
379 .name = "Bayer 12 RGGB", 379 .name = "Bayer 12 RGGB",
@@ -458,7 +458,7 @@ s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
458EXPORT_SYMBOL(soc_mbus_image_size); 458EXPORT_SYMBOL(soc_mbus_image_size);
459 459
460const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( 460const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
461 enum v4l2_mbus_pixelcode code, 461 u32 code,
462 const struct soc_mbus_lookup *lookup, 462 const struct soc_mbus_lookup *lookup,
463 int n) 463 int n)
464{ 464{
@@ -473,7 +473,7 @@ const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
473EXPORT_SYMBOL(soc_mbus_find_fmtdesc); 473EXPORT_SYMBOL(soc_mbus_find_fmtdesc);
474 474
475const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( 475const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
476 enum v4l2_mbus_pixelcode code) 476 u32 code)
477{ 477{
478 return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); 478 return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
479} 479}
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index 940df4000c42..bec674994752 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -93,12 +93,8 @@ void csc_dump_regs(struct csc_data *csc)
93{ 93{
94 struct device *dev = &csc->pdev->dev; 94 struct device *dev = &csc->pdev->dev;
95 95
96 u32 read_reg(struct csc_data *csc, int offset) 96#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
97 { 97 ioread32(csc->base + CSC_##r))
98 return ioread32(csc->base + offset);
99 }
100
101#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(csc, CSC_##r))
102 98
103 DUMPREG(CSC00); 99 DUMPREG(CSC00);
104 DUMPREG(CSC01); 100 DUMPREG(CSC01);
@@ -189,7 +185,7 @@ struct csc_data *csc_create(struct platform_device *pdev)
189 csc->base = devm_ioremap_resource(&pdev->dev, csc->res); 185 csc->base = devm_ioremap_resource(&pdev->dev, csc->res);
190 if (IS_ERR(csc->base)) { 186 if (IS_ERR(csc->base)) {
191 dev_err(&pdev->dev, "failed to ioremap\n"); 187 dev_err(&pdev->dev, "failed to ioremap\n");
192 return csc->base; 188 return ERR_CAST(csc->base);
193 } 189 }
194 190
195 return csc; 191 return csc;
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
index 6314171ffe9b..f82d1c7f667f 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -24,12 +24,8 @@ void sc_dump_regs(struct sc_data *sc)
24{ 24{
25 struct device *dev = &sc->pdev->dev; 25 struct device *dev = &sc->pdev->dev;
26 26
27 u32 read_reg(struct sc_data *sc, int offset) 27#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
28 { 28 ioread32(sc->base + CFG_##r))
29 return ioread32(sc->base + offset);
30 }
31
32#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(sc, CFG_##r))
33 29
34 DUMPREG(SC0); 30 DUMPREG(SC0);
35 DUMPREG(SC1); 31 DUMPREG(SC1);
@@ -304,7 +300,7 @@ struct sc_data *sc_create(struct platform_device *pdev)
304 sc->base = devm_ioremap_resource(&pdev->dev, sc->res); 300 sc->base = devm_ioremap_resource(&pdev->dev, sc->res);
305 if (IS_ERR(sc->base)) { 301 if (IS_ERR(sc->base)) {
306 dev_err(&pdev->dev, "failed to ioremap\n"); 302 dev_err(&pdev->dev, "failed to ioremap\n");
307 return sc->base; 303 return ERR_CAST(sc->base);
308 } 304 }
309 305
310 return sc; 306 return sc;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index ae6870cb8339..86989d86abfa 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -101,7 +101,7 @@ struct via_camera {
101 */ 101 */
102 struct v4l2_pix_format sensor_format; 102 struct v4l2_pix_format sensor_format;
103 struct v4l2_pix_format user_format; 103 struct v4l2_pix_format user_format;
104 enum v4l2_mbus_pixelcode mbus_code; 104 u32 mbus_code;
105}; 105};
106 106
107/* 107/*
@@ -143,12 +143,12 @@ static struct via_format {
143 __u8 *desc; 143 __u8 *desc;
144 __u32 pixelformat; 144 __u32 pixelformat;
145 int bpp; /* Bytes per pixel */ 145 int bpp; /* Bytes per pixel */
146 enum v4l2_mbus_pixelcode mbus_code; 146 u32 mbus_code;
147} via_formats[] = { 147} via_formats[] = {
148 { 148 {
149 .desc = "YUYV 4:2:2", 149 .desc = "YUYV 4:2:2",
150 .pixelformat = V4L2_PIX_FMT_YUYV, 150 .pixelformat = V4L2_PIX_FMT_YUYV,
151 .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8, 151 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
152 .bpp = 2, 152 .bpp = 2,
153 }, 153 },
154 /* RGB444 and Bayer should be doable, but have never been 154 /* RGB444 and Bayer should be doable, but have never been
@@ -849,7 +849,7 @@ static const struct v4l2_pix_format viacam_def_pix_format = {
849 .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2, 849 .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
850}; 850};
851 851
852static const enum v4l2_mbus_pixelcode via_def_mbus_code = V4L2_MBUS_FMT_YUYV8_2X8; 852static const u32 via_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
853 853
854static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv, 854static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
855 struct v4l2_fmtdesc *fmt) 855 struct v4l2_fmtdesc *fmt)
@@ -985,9 +985,9 @@ static int viacam_querycap(struct file *filp, void *priv,
985{ 985{
986 strcpy(cap->driver, "via-camera"); 986 strcpy(cap->driver, "via-camera");
987 strcpy(cap->card, "via-camera"); 987 strcpy(cap->card, "via-camera");
988 cap->version = 1; 988 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
989 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
990 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; 989 V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
990 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
991 return 0; 991 return 0;
992} 992}
993 993
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/vim2m.c
index c1b03cfd6ded..1105c111cc10 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/vim2m.c
@@ -31,12 +31,11 @@
31#include <media/v4l2-event.h> 31#include <media/v4l2-event.h>
32#include <media/videobuf2-vmalloc.h> 32#include <media/videobuf2-vmalloc.h>
33 33
34#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
35
36MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); 34MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
37MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 35MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
38MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
39MODULE_VERSION("0.1.1"); 37MODULE_VERSION("0.1.1");
38MODULE_ALIAS("mem2mem_testdev");
40 39
41static unsigned debug; 40static unsigned debug;
42module_param(debug, uint, 0644); 41module_param(debug, uint, 0644);
@@ -52,7 +51,7 @@ MODULE_PARM_DESC(debug, "activates debug info");
52#define MEM2MEM_CAPTURE (1 << 0) 51#define MEM2MEM_CAPTURE (1 << 0)
53#define MEM2MEM_OUTPUT (1 << 1) 52#define MEM2MEM_OUTPUT (1 << 1)
54 53
55#define MEM2MEM_NAME "m2m-testdev" 54#define MEM2MEM_NAME "vim2m"
56 55
57/* Per queue */ 56/* Per queue */
58#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME 57#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
@@ -72,15 +71,15 @@ MODULE_PARM_DESC(debug, "activates debug info");
72 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) 71 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
73 72
74 73
75static void m2mtest_dev_release(struct device *dev) 74static void vim2m_dev_release(struct device *dev)
76{} 75{}
77 76
78static struct platform_device m2mtest_pdev = { 77static struct platform_device vim2m_pdev = {
79 .name = MEM2MEM_NAME, 78 .name = MEM2MEM_NAME,
80 .dev.release = m2mtest_dev_release, 79 .dev.release = vim2m_dev_release,
81}; 80};
82 81
83struct m2mtest_fmt { 82struct vim2m_fmt {
84 char *name; 83 char *name;
85 u32 fourcc; 84 u32 fourcc;
86 int depth; 85 int depth;
@@ -88,7 +87,7 @@ struct m2mtest_fmt {
88 u32 types; 87 u32 types;
89}; 88};
90 89
91static struct m2mtest_fmt formats[] = { 90static struct vim2m_fmt formats[] = {
92 { 91 {
93 .name = "RGB565 (BE)", 92 .name = "RGB565 (BE)",
94 .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ 93 .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
@@ -108,12 +107,12 @@ static struct m2mtest_fmt formats[] = {
108#define NUM_FORMATS ARRAY_SIZE(formats) 107#define NUM_FORMATS ARRAY_SIZE(formats)
109 108
110/* Per-queue, driver-specific private data */ 109/* Per-queue, driver-specific private data */
111struct m2mtest_q_data { 110struct vim2m_q_data {
112 unsigned int width; 111 unsigned int width;
113 unsigned int height; 112 unsigned int height;
114 unsigned int sizeimage; 113 unsigned int sizeimage;
115 unsigned int sequence; 114 unsigned int sequence;
116 struct m2mtest_fmt *fmt; 115 struct vim2m_fmt *fmt;
117}; 116};
118 117
119enum { 118enum {
@@ -124,9 +123,9 @@ enum {
124#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000) 123#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
125#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001) 124#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
126 125
127static struct m2mtest_fmt *find_format(struct v4l2_format *f) 126static struct vim2m_fmt *find_format(struct v4l2_format *f)
128{ 127{
129 struct m2mtest_fmt *fmt; 128 struct vim2m_fmt *fmt;
130 unsigned int k; 129 unsigned int k;
131 130
132 for (k = 0; k < NUM_FORMATS; k++) { 131 for (k = 0; k < NUM_FORMATS; k++) {
@@ -141,7 +140,7 @@ static struct m2mtest_fmt *find_format(struct v4l2_format *f)
141 return &formats[k]; 140 return &formats[k];
142} 141}
143 142
144struct m2mtest_dev { 143struct vim2m_dev {
145 struct v4l2_device v4l2_dev; 144 struct v4l2_device v4l2_dev;
146 struct video_device *vfd; 145 struct video_device *vfd;
147 146
@@ -154,9 +153,9 @@ struct m2mtest_dev {
154 struct v4l2_m2m_dev *m2m_dev; 153 struct v4l2_m2m_dev *m2m_dev;
155}; 154};
156 155
157struct m2mtest_ctx { 156struct vim2m_ctx {
158 struct v4l2_fh fh; 157 struct v4l2_fh fh;
159 struct m2mtest_dev *dev; 158 struct vim2m_dev *dev;
160 159
161 struct v4l2_ctrl_handler hdl; 160 struct v4l2_ctrl_handler hdl;
162 161
@@ -177,15 +176,15 @@ struct m2mtest_ctx {
177 enum v4l2_colorspace colorspace; 176 enum v4l2_colorspace colorspace;
178 177
179 /* Source and destination queue data */ 178 /* Source and destination queue data */
180 struct m2mtest_q_data q_data[2]; 179 struct vim2m_q_data q_data[2];
181}; 180};
182 181
183static inline struct m2mtest_ctx *file2ctx(struct file *file) 182static inline struct vim2m_ctx *file2ctx(struct file *file)
184{ 183{
185 return container_of(file->private_data, struct m2mtest_ctx, fh); 184 return container_of(file->private_data, struct vim2m_ctx, fh);
186} 185}
187 186
188static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx, 187static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
189 enum v4l2_buf_type type) 188 enum v4l2_buf_type type)
190{ 189{
191 switch (type) { 190 switch (type) {
@@ -200,12 +199,12 @@ static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
200} 199}
201 200
202 201
203static int device_process(struct m2mtest_ctx *ctx, 202static int device_process(struct vim2m_ctx *ctx,
204 struct vb2_buffer *in_vb, 203 struct vb2_buffer *in_vb,
205 struct vb2_buffer *out_vb) 204 struct vb2_buffer *out_vb)
206{ 205{
207 struct m2mtest_dev *dev = ctx->dev; 206 struct vim2m_dev *dev = ctx->dev;
208 struct m2mtest_q_data *q_data; 207 struct vim2m_q_data *q_data;
209 u8 *p_in, *p_out; 208 u8 *p_in, *p_out;
210 int x, y, t, w; 209 int x, y, t, w;
211 int tile_w, bytes_left; 210 int tile_w, bytes_left;
@@ -334,7 +333,7 @@ static int device_process(struct m2mtest_ctx *ctx,
334 return 0; 333 return 0;
335} 334}
336 335
337static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout) 336static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
338{ 337{
339 dprintk(dev, "Scheduling a simulated irq\n"); 338 dprintk(dev, "Scheduling a simulated irq\n");
340 mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); 339 mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
@@ -349,7 +348,7 @@ static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
349 */ 348 */
350static int job_ready(void *priv) 349static int job_ready(void *priv)
351{ 350{
352 struct m2mtest_ctx *ctx = priv; 351 struct vim2m_ctx *ctx = priv;
353 352
354 if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen 353 if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen
355 || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) { 354 || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) {
@@ -362,7 +361,7 @@ static int job_ready(void *priv)
362 361
363static void job_abort(void *priv) 362static void job_abort(void *priv)
364{ 363{
365 struct m2mtest_ctx *ctx = priv; 364 struct vim2m_ctx *ctx = priv;
366 365
367 /* Will cancel the transaction in the next interrupt handler */ 366 /* Will cancel the transaction in the next interrupt handler */
368 ctx->aborting = 1; 367 ctx->aborting = 1;
@@ -376,8 +375,8 @@ static void job_abort(void *priv)
376 */ 375 */
377static void device_run(void *priv) 376static void device_run(void *priv)
378{ 377{
379 struct m2mtest_ctx *ctx = priv; 378 struct vim2m_ctx *ctx = priv;
380 struct m2mtest_dev *dev = ctx->dev; 379 struct vim2m_dev *dev = ctx->dev;
381 struct vb2_buffer *src_buf, *dst_buf; 380 struct vb2_buffer *src_buf, *dst_buf;
382 381
383 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 382 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
@@ -391,12 +390,12 @@ static void device_run(void *priv)
391 390
392static void device_isr(unsigned long priv) 391static void device_isr(unsigned long priv)
393{ 392{
394 struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv; 393 struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
395 struct m2mtest_ctx *curr_ctx; 394 struct vim2m_ctx *curr_ctx;
396 struct vb2_buffer *src_vb, *dst_vb; 395 struct vb2_buffer *src_vb, *dst_vb;
397 unsigned long flags; 396 unsigned long flags;
398 397
399 curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev); 398 curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev);
400 399
401 if (NULL == curr_ctx) { 400 if (NULL == curr_ctx) {
402 pr_err("Instance released before the end of transaction\n"); 401 pr_err("Instance released before the end of transaction\n");
@@ -408,16 +407,16 @@ static void device_isr(unsigned long priv)
408 407
409 curr_ctx->num_processed++; 408 curr_ctx->num_processed++;
410 409
411 spin_lock_irqsave(&m2mtest_dev->irqlock, flags); 410 spin_lock_irqsave(&vim2m_dev->irqlock, flags);
412 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); 411 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
413 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); 412 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
414 spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags); 413 spin_unlock_irqrestore(&vim2m_dev->irqlock, flags);
415 414
416 if (curr_ctx->num_processed == curr_ctx->translen 415 if (curr_ctx->num_processed == curr_ctx->translen
417 || curr_ctx->aborting) { 416 || curr_ctx->aborting) {
418 dprintk(curr_ctx->dev, "Finishing transaction\n"); 417 dprintk(curr_ctx->dev, "Finishing transaction\n");
419 curr_ctx->num_processed = 0; 418 curr_ctx->num_processed = 0;
420 v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->fh.m2m_ctx); 419 v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
421 } else { 420 } else {
422 device_run(curr_ctx); 421 device_run(curr_ctx);
423 } 422 }
@@ -441,7 +440,7 @@ static int vidioc_querycap(struct file *file, void *priv,
441static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) 440static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
442{ 441{
443 int i, num; 442 int i, num;
444 struct m2mtest_fmt *fmt; 443 struct vim2m_fmt *fmt;
445 444
446 num = 0; 445 num = 0;
447 446
@@ -480,10 +479,10 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
480 return enum_fmt(f, MEM2MEM_OUTPUT); 479 return enum_fmt(f, MEM2MEM_OUTPUT);
481} 480}
482 481
483static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) 482static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
484{ 483{
485 struct vb2_queue *vq; 484 struct vb2_queue *vq;
486 struct m2mtest_q_data *q_data; 485 struct vim2m_q_data *q_data;
487 486
488 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); 487 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
489 if (!vq) 488 if (!vq)
@@ -514,7 +513,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
514 return vidioc_g_fmt(file2ctx(file), f); 513 return vidioc_g_fmt(file2ctx(file), f);
515} 514}
516 515
517static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt) 516static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
518{ 517{
519 /* V4L2 specification suggests the driver corrects the format struct 518 /* V4L2 specification suggests the driver corrects the format struct
520 * if any of the dimensions is unsupported */ 519 * if any of the dimensions is unsupported */
@@ -539,8 +538,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
539static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 538static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
540 struct v4l2_format *f) 539 struct v4l2_format *f)
541{ 540{
542 struct m2mtest_fmt *fmt; 541 struct vim2m_fmt *fmt;
543 struct m2mtest_ctx *ctx = file2ctx(file); 542 struct vim2m_ctx *ctx = file2ctx(file);
544 543
545 fmt = find_format(f); 544 fmt = find_format(f);
546 if (!fmt) { 545 if (!fmt) {
@@ -561,8 +560,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
561static int vidioc_try_fmt_vid_out(struct file *file, void *priv, 560static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
562 struct v4l2_format *f) 561 struct v4l2_format *f)
563{ 562{
564 struct m2mtest_fmt *fmt; 563 struct vim2m_fmt *fmt;
565 struct m2mtest_ctx *ctx = file2ctx(file); 564 struct vim2m_ctx *ctx = file2ctx(file);
566 565
567 fmt = find_format(f); 566 fmt = find_format(f);
568 if (!fmt) { 567 if (!fmt) {
@@ -581,9 +580,9 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
581 return vidioc_try_fmt(f, fmt); 580 return vidioc_try_fmt(f, fmt);
582} 581}
583 582
584static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f) 583static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
585{ 584{
586 struct m2mtest_q_data *q_data; 585 struct vim2m_q_data *q_data;
587 struct vb2_queue *vq; 586 struct vb2_queue *vq;
588 587
589 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); 588 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -627,7 +626,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
627static int vidioc_s_fmt_vid_out(struct file *file, void *priv, 626static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
628 struct v4l2_format *f) 627 struct v4l2_format *f)
629{ 628{
630 struct m2mtest_ctx *ctx = file2ctx(file); 629 struct vim2m_ctx *ctx = file2ctx(file);
631 int ret; 630 int ret;
632 631
633 ret = vidioc_try_fmt_vid_out(file, priv, f); 632 ret = vidioc_try_fmt_vid_out(file, priv, f);
@@ -640,10 +639,10 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
640 return ret; 639 return ret;
641} 640}
642 641
643static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl) 642static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
644{ 643{
645 struct m2mtest_ctx *ctx = 644 struct vim2m_ctx *ctx =
646 container_of(ctrl->handler, struct m2mtest_ctx, hdl); 645 container_of(ctrl->handler, struct vim2m_ctx, hdl);
647 646
648 switch (ctrl->id) { 647 switch (ctrl->id) {
649 case V4L2_CID_HFLIP: 648 case V4L2_CID_HFLIP:
@@ -676,12 +675,12 @@ static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
676 return 0; 675 return 0;
677} 676}
678 677
679static const struct v4l2_ctrl_ops m2mtest_ctrl_ops = { 678static const struct v4l2_ctrl_ops vim2m_ctrl_ops = {
680 .s_ctrl = m2mtest_s_ctrl, 679 .s_ctrl = vim2m_s_ctrl,
681}; 680};
682 681
683 682
684static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = { 683static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
685 .vidioc_querycap = vidioc_querycap, 684 .vidioc_querycap = vidioc_querycap,
686 685
687 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, 686 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
@@ -698,6 +697,7 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
698 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, 697 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
699 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, 698 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
700 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, 699 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
700 .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
701 701
702 .vidioc_streamon = v4l2_m2m_ioctl_streamon, 702 .vidioc_streamon = v4l2_m2m_ioctl_streamon,
703 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, 703 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -711,13 +711,13 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
711 * Queue operations 711 * Queue operations
712 */ 712 */
713 713
714static int m2mtest_queue_setup(struct vb2_queue *vq, 714static int vim2m_queue_setup(struct vb2_queue *vq,
715 const struct v4l2_format *fmt, 715 const struct v4l2_format *fmt,
716 unsigned int *nbuffers, unsigned int *nplanes, 716 unsigned int *nbuffers, unsigned int *nplanes,
717 unsigned int sizes[], void *alloc_ctxs[]) 717 unsigned int sizes[], void *alloc_ctxs[])
718{ 718{
719 struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq); 719 struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
720 struct m2mtest_q_data *q_data; 720 struct vim2m_q_data *q_data;
721 unsigned int size, count = *nbuffers; 721 unsigned int size, count = *nbuffers;
722 722
723 q_data = get_q_data(ctx, vq->type); 723 q_data = get_q_data(ctx, vq->type);
@@ -741,10 +741,10 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
741 return 0; 741 return 0;
742} 742}
743 743
744static int m2mtest_buf_prepare(struct vb2_buffer *vb) 744static int vim2m_buf_prepare(struct vb2_buffer *vb)
745{ 745{
746 struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 746 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
747 struct m2mtest_q_data *q_data; 747 struct vim2m_q_data *q_data;
748 748
749 dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); 749 dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
750 750
@@ -770,25 +770,25 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
770 return 0; 770 return 0;
771} 771}
772 772
773static void m2mtest_buf_queue(struct vb2_buffer *vb) 773static void vim2m_buf_queue(struct vb2_buffer *vb)
774{ 774{
775 struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 775 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
776 776
777 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); 777 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
778} 778}
779 779
780static int m2mtest_start_streaming(struct vb2_queue *q, unsigned count) 780static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
781{ 781{
782 struct m2mtest_ctx *ctx = vb2_get_drv_priv(q); 782 struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
783 struct m2mtest_q_data *q_data = get_q_data(ctx, q->type); 783 struct vim2m_q_data *q_data = get_q_data(ctx, q->type);
784 784
785 q_data->sequence = 0; 785 q_data->sequence = 0;
786 return 0; 786 return 0;
787} 787}
788 788
789static void m2mtest_stop_streaming(struct vb2_queue *q) 789static void vim2m_stop_streaming(struct vb2_queue *q)
790{ 790{
791 struct m2mtest_ctx *ctx = vb2_get_drv_priv(q); 791 struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
792 struct vb2_buffer *vb; 792 struct vb2_buffer *vb;
793 unsigned long flags; 793 unsigned long flags;
794 794
@@ -805,26 +805,26 @@ static void m2mtest_stop_streaming(struct vb2_queue *q)
805 } 805 }
806} 806}
807 807
808static struct vb2_ops m2mtest_qops = { 808static struct vb2_ops vim2m_qops = {
809 .queue_setup = m2mtest_queue_setup, 809 .queue_setup = vim2m_queue_setup,
810 .buf_prepare = m2mtest_buf_prepare, 810 .buf_prepare = vim2m_buf_prepare,
811 .buf_queue = m2mtest_buf_queue, 811 .buf_queue = vim2m_buf_queue,
812 .start_streaming = m2mtest_start_streaming, 812 .start_streaming = vim2m_start_streaming,
813 .stop_streaming = m2mtest_stop_streaming, 813 .stop_streaming = vim2m_stop_streaming,
814 .wait_prepare = vb2_ops_wait_prepare, 814 .wait_prepare = vb2_ops_wait_prepare,
815 .wait_finish = vb2_ops_wait_finish, 815 .wait_finish = vb2_ops_wait_finish,
816}; 816};
817 817
818static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 818static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
819{ 819{
820 struct m2mtest_ctx *ctx = priv; 820 struct vim2m_ctx *ctx = priv;
821 int ret; 821 int ret;
822 822
823 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 823 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
824 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 824 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
825 src_vq->drv_priv = ctx; 825 src_vq->drv_priv = ctx;
826 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); 826 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
827 src_vq->ops = &m2mtest_qops; 827 src_vq->ops = &vim2m_qops;
828 src_vq->mem_ops = &vb2_vmalloc_memops; 828 src_vq->mem_ops = &vb2_vmalloc_memops;
829 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 829 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
830 src_vq->lock = &ctx->dev->dev_mutex; 830 src_vq->lock = &ctx->dev->dev_mutex;
@@ -837,7 +837,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
837 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 837 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
838 dst_vq->drv_priv = ctx; 838 dst_vq->drv_priv = ctx;
839 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); 839 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
840 dst_vq->ops = &m2mtest_qops; 840 dst_vq->ops = &vim2m_qops;
841 dst_vq->mem_ops = &vb2_vmalloc_memops; 841 dst_vq->mem_ops = &vb2_vmalloc_memops;
842 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 842 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
843 dst_vq->lock = &ctx->dev->dev_mutex; 843 dst_vq->lock = &ctx->dev->dev_mutex;
@@ -845,8 +845,8 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
845 return vb2_queue_init(dst_vq); 845 return vb2_queue_init(dst_vq);
846} 846}
847 847
848static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = { 848static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = {
849 .ops = &m2mtest_ctrl_ops, 849 .ops = &vim2m_ctrl_ops,
850 .id = V4L2_CID_TRANS_TIME_MSEC, 850 .id = V4L2_CID_TRANS_TIME_MSEC,
851 .name = "Transaction Time (msec)", 851 .name = "Transaction Time (msec)",
852 .type = V4L2_CTRL_TYPE_INTEGER, 852 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -856,8 +856,8 @@ static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
856 .step = 1, 856 .step = 1,
857}; 857};
858 858
859static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = { 859static const struct v4l2_ctrl_config vim2m_ctrl_trans_num_bufs = {
860 .ops = &m2mtest_ctrl_ops, 860 .ops = &vim2m_ctrl_ops,
861 .id = V4L2_CID_TRANS_NUM_BUFS, 861 .id = V4L2_CID_TRANS_NUM_BUFS,
862 .name = "Buffers Per Transaction", 862 .name = "Buffers Per Transaction",
863 .type = V4L2_CTRL_TYPE_INTEGER, 863 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -870,10 +870,10 @@ static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
870/* 870/*
871 * File operations 871 * File operations
872 */ 872 */
873static int m2mtest_open(struct file *file) 873static int vim2m_open(struct file *file)
874{ 874{
875 struct m2mtest_dev *dev = video_drvdata(file); 875 struct vim2m_dev *dev = video_drvdata(file);
876 struct m2mtest_ctx *ctx = NULL; 876 struct vim2m_ctx *ctx = NULL;
877 struct v4l2_ctrl_handler *hdl; 877 struct v4l2_ctrl_handler *hdl;
878 int rc = 0; 878 int rc = 0;
879 879
@@ -890,10 +890,10 @@ static int m2mtest_open(struct file *file)
890 ctx->dev = dev; 890 ctx->dev = dev;
891 hdl = &ctx->hdl; 891 hdl = &ctx->hdl;
892 v4l2_ctrl_handler_init(hdl, 4); 892 v4l2_ctrl_handler_init(hdl, 4);
893 v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); 893 v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
894 v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); 894 v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
895 v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_time_msec, NULL); 895 v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL);
896 v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_num_bufs, NULL); 896 v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL);
897 if (hdl->error) { 897 if (hdl->error) {
898 rc = hdl->error; 898 rc = hdl->error;
899 v4l2_ctrl_handler_free(hdl); 899 v4l2_ctrl_handler_free(hdl);
@@ -933,10 +933,10 @@ open_unlock:
933 return rc; 933 return rc;
934} 934}
935 935
936static int m2mtest_release(struct file *file) 936static int vim2m_release(struct file *file)
937{ 937{
938 struct m2mtest_dev *dev = video_drvdata(file); 938 struct vim2m_dev *dev = video_drvdata(file);
939 struct m2mtest_ctx *ctx = file2ctx(file); 939 struct vim2m_ctx *ctx = file2ctx(file);
940 940
941 dprintk(dev, "Releasing instance %p\n", ctx); 941 dprintk(dev, "Releasing instance %p\n", ctx);
942 942
@@ -953,20 +953,20 @@ static int m2mtest_release(struct file *file)
953 return 0; 953 return 0;
954} 954}
955 955
956static const struct v4l2_file_operations m2mtest_fops = { 956static const struct v4l2_file_operations vim2m_fops = {
957 .owner = THIS_MODULE, 957 .owner = THIS_MODULE,
958 .open = m2mtest_open, 958 .open = vim2m_open,
959 .release = m2mtest_release, 959 .release = vim2m_release,
960 .poll = v4l2_m2m_fop_poll, 960 .poll = v4l2_m2m_fop_poll,
961 .unlocked_ioctl = video_ioctl2, 961 .unlocked_ioctl = video_ioctl2,
962 .mmap = v4l2_m2m_fop_mmap, 962 .mmap = v4l2_m2m_fop_mmap,
963}; 963};
964 964
965static struct video_device m2mtest_videodev = { 965static struct video_device vim2m_videodev = {
966 .name = MEM2MEM_NAME, 966 .name = MEM2MEM_NAME,
967 .vfl_dir = VFL_DIR_M2M, 967 .vfl_dir = VFL_DIR_M2M,
968 .fops = &m2mtest_fops, 968 .fops = &vim2m_fops,
969 .ioctl_ops = &m2mtest_ioctl_ops, 969 .ioctl_ops = &vim2m_ioctl_ops,
970 .minor = -1, 970 .minor = -1,
971 .release = video_device_release, 971 .release = video_device_release,
972}; 972};
@@ -977,9 +977,9 @@ static struct v4l2_m2m_ops m2m_ops = {
977 .job_abort = job_abort, 977 .job_abort = job_abort,
978}; 978};
979 979
980static int m2mtest_probe(struct platform_device *pdev) 980static int vim2m_probe(struct platform_device *pdev)
981{ 981{
982 struct m2mtest_dev *dev; 982 struct vim2m_dev *dev;
983 struct video_device *vfd; 983 struct video_device *vfd;
984 int ret; 984 int ret;
985 985
@@ -1003,7 +1003,7 @@ static int m2mtest_probe(struct platform_device *pdev)
1003 goto unreg_dev; 1003 goto unreg_dev;
1004 } 1004 }
1005 1005
1006 *vfd = m2mtest_videodev; 1006 *vfd = vim2m_videodev;
1007 vfd->lock = &dev->dev_mutex; 1007 vfd->lock = &dev->dev_mutex;
1008 vfd->v4l2_dev = &dev->v4l2_dev; 1008 vfd->v4l2_dev = &dev->v4l2_dev;
1009 1009
@@ -1014,7 +1014,7 @@ static int m2mtest_probe(struct platform_device *pdev)
1014 } 1014 }
1015 1015
1016 video_set_drvdata(vfd, dev); 1016 video_set_drvdata(vfd, dev);
1017 snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name); 1017 snprintf(vfd->name, sizeof(vfd->name), "%s", vim2m_videodev.name);
1018 dev->vfd = vfd; 1018 dev->vfd = vfd;
1019 v4l2_info(&dev->v4l2_dev, 1019 v4l2_info(&dev->v4l2_dev,
1020 "Device registered as /dev/video%d\n", vfd->num); 1020 "Device registered as /dev/video%d\n", vfd->num);
@@ -1042,11 +1042,11 @@ unreg_dev:
1042 return ret; 1042 return ret;
1043} 1043}
1044 1044
1045static int m2mtest_remove(struct platform_device *pdev) 1045static int vim2m_remove(struct platform_device *pdev)
1046{ 1046{
1047 struct m2mtest_dev *dev = platform_get_drvdata(pdev); 1047 struct vim2m_dev *dev = platform_get_drvdata(pdev);
1048 1048
1049 v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME); 1049 v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
1050 v4l2_m2m_release(dev->m2m_dev); 1050 v4l2_m2m_release(dev->m2m_dev);
1051 del_timer_sync(&dev->timer); 1051 del_timer_sync(&dev->timer);
1052 video_unregister_device(dev->vfd); 1052 video_unregister_device(dev->vfd);
@@ -1055,35 +1055,35 @@ static int m2mtest_remove(struct platform_device *pdev)
1055 return 0; 1055 return 0;
1056} 1056}
1057 1057
1058static struct platform_driver m2mtest_pdrv = { 1058static struct platform_driver vim2m_pdrv = {
1059 .probe = m2mtest_probe, 1059 .probe = vim2m_probe,
1060 .remove = m2mtest_remove, 1060 .remove = vim2m_remove,
1061 .driver = { 1061 .driver = {
1062 .name = MEM2MEM_NAME, 1062 .name = MEM2MEM_NAME,
1063 .owner = THIS_MODULE, 1063 .owner = THIS_MODULE,
1064 }, 1064 },
1065}; 1065};
1066 1066
1067static void __exit m2mtest_exit(void) 1067static void __exit vim2m_exit(void)
1068{ 1068{
1069 platform_driver_unregister(&m2mtest_pdrv); 1069 platform_driver_unregister(&vim2m_pdrv);
1070 platform_device_unregister(&m2mtest_pdev); 1070 platform_device_unregister(&vim2m_pdev);
1071} 1071}
1072 1072
1073static int __init m2mtest_init(void) 1073static int __init vim2m_init(void)
1074{ 1074{
1075 int ret; 1075 int ret;
1076 1076
1077 ret = platform_device_register(&m2mtest_pdev); 1077 ret = platform_device_register(&vim2m_pdev);
1078 if (ret) 1078 if (ret)
1079 return ret; 1079 return ret;
1080 1080
1081 ret = platform_driver_register(&m2mtest_pdrv); 1081 ret = platform_driver_register(&vim2m_pdrv);
1082 if (ret) 1082 if (ret)
1083 platform_device_unregister(&m2mtest_pdev); 1083 platform_device_unregister(&vim2m_pdev);
1084 1084
1085 return 0; 1085 return 0;
1086} 1086}
1087 1087
1088module_init(m2mtest_init); 1088module_init(vim2m_init);
1089module_exit(m2mtest_exit); 1089module_exit(vim2m_exit);
diff --git a/drivers/media/platform/vino.c b/drivers/media/platform/vino.c
index 91d44ea16f27..2c85357f774d 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/media/platform/vino.c
@@ -2932,10 +2932,8 @@ static int vino_querycap(struct file *file, void *__fh,
2932 strcpy(cap->driver, vino_driver_name); 2932 strcpy(cap->driver, vino_driver_name);
2933 strcpy(cap->card, vino_driver_description); 2933 strcpy(cap->card, vino_driver_description);
2934 strcpy(cap->bus_info, vino_bus_name); 2934 strcpy(cap->bus_info, vino_bus_name);
2935 cap->capabilities = 2935 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
2936 V4L2_CAP_VIDEO_CAPTURE | 2936 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
2937 V4L2_CAP_STREAMING;
2938 // V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
2939 return 0; 2937 return 0;
2940} 2938}
2941 2939
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 686c3c2ad05b..a7e033a5d291 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -195,20 +195,6 @@ static const u8 vivid_hdmi_edid[256] = {
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7 195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
196}; 196};
197 197
198void vivid_lock(struct vb2_queue *vq)
199{
200 struct vivid_dev *dev = vb2_get_drv_priv(vq);
201
202 mutex_lock(&dev->mutex);
203}
204
205void vivid_unlock(struct vb2_queue *vq)
206{
207 struct vivid_dev *dev = vb2_get_drv_priv(vq);
208
209 mutex_unlock(&dev->mutex);
210}
211
212static int vidioc_querycap(struct file *file, void *priv, 198static int vidioc_querycap(struct file *file, void *priv,
213 struct v4l2_capability *cap) 199 struct v4l2_capability *cap)
214{ 200{
@@ -586,7 +572,7 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
586 .vidioc_querybuf = vb2_ioctl_querybuf, 572 .vidioc_querybuf = vb2_ioctl_querybuf,
587 .vidioc_qbuf = vb2_ioctl_qbuf, 573 .vidioc_qbuf = vb2_ioctl_qbuf,
588 .vidioc_dqbuf = vb2_ioctl_dqbuf, 574 .vidioc_dqbuf = vb2_ioctl_dqbuf,
589/* Not yet .vidioc_expbuf = vb2_ioctl_expbuf,*/ 575 .vidioc_expbuf = vb2_ioctl_expbuf,
590 .vidioc_streamon = vb2_ioctl_streamon, 576 .vidioc_streamon = vb2_ioctl_streamon,
591 .vidioc_streamoff = vb2_ioctl_streamoff, 577 .vidioc_streamoff = vb2_ioctl_streamoff,
592 578
@@ -1018,6 +1004,7 @@ static int __init vivid_create_instance(int inst)
1018 q->mem_ops = &vb2_vmalloc_memops; 1004 q->mem_ops = &vb2_vmalloc_memops;
1019 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1005 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1020 q->min_buffers_needed = 2; 1006 q->min_buffers_needed = 2;
1007 q->lock = &dev->mutex;
1021 1008
1022 ret = vb2_queue_init(q); 1009 ret = vb2_queue_init(q);
1023 if (ret) 1010 if (ret)
@@ -1036,6 +1023,7 @@ static int __init vivid_create_instance(int inst)
1036 q->mem_ops = &vb2_vmalloc_memops; 1023 q->mem_ops = &vb2_vmalloc_memops;
1037 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1024 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1038 q->min_buffers_needed = 2; 1025 q->min_buffers_needed = 2;
1026 q->lock = &dev->mutex;
1039 1027
1040 ret = vb2_queue_init(q); 1028 ret = vb2_queue_init(q);
1041 if (ret) 1029 if (ret)
@@ -1054,6 +1042,7 @@ static int __init vivid_create_instance(int inst)
1054 q->mem_ops = &vb2_vmalloc_memops; 1042 q->mem_ops = &vb2_vmalloc_memops;
1055 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1043 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1056 q->min_buffers_needed = 2; 1044 q->min_buffers_needed = 2;
1045 q->lock = &dev->mutex;
1057 1046
1058 ret = vb2_queue_init(q); 1047 ret = vb2_queue_init(q);
1059 if (ret) 1048 if (ret)
@@ -1072,6 +1061,7 @@ static int __init vivid_create_instance(int inst)
1072 q->mem_ops = &vb2_vmalloc_memops; 1061 q->mem_ops = &vb2_vmalloc_memops;
1073 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1062 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1074 q->min_buffers_needed = 2; 1063 q->min_buffers_needed = 2;
1064 q->lock = &dev->mutex;
1075 1065
1076 ret = vb2_queue_init(q); 1066 ret = vb2_queue_init(q);
1077 if (ret) 1067 if (ret)
@@ -1089,6 +1079,7 @@ static int __init vivid_create_instance(int inst)
1089 q->mem_ops = &vb2_vmalloc_memops; 1079 q->mem_ops = &vb2_vmalloc_memops;
1090 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1080 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1091 q->min_buffers_needed = 8; 1081 q->min_buffers_needed = 8;
1082 q->lock = &dev->mutex;
1092 1083
1093 ret = vb2_queue_init(q); 1084 ret = vb2_queue_init(q);
1094 if (ret) 1085 if (ret)
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 811c286491a5..4b497df4b6a4 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -116,6 +116,17 @@ enum vivid_signal_mode {
116 CUSTOM_DV_TIMINGS, 116 CUSTOM_DV_TIMINGS,
117}; 117};
118 118
119enum vivid_colorspace {
120 VIVID_CS_170M,
121 VIVID_CS_709,
122 VIVID_CS_SRGB,
123 VIVID_CS_ADOBERGB,
124 VIVID_CS_2020,
125 VIVID_CS_240M,
126 VIVID_CS_SYS_M,
127 VIVID_CS_SYS_BG,
128};
129
119#define VIVID_INVALID_SIGNAL(mode) \ 130#define VIVID_INVALID_SIGNAL(mode) \
120 ((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE) 131 ((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
121 132
@@ -318,6 +329,8 @@ struct vivid_dev {
318 v4l2_std_id std_out; 329 v4l2_std_id std_out;
319 struct v4l2_dv_timings dv_timings_out; 330 struct v4l2_dv_timings dv_timings_out;
320 u32 colorspace_out; 331 u32 colorspace_out;
332 u32 ycbcr_enc_out;
333 u32 quantization_out;
321 u32 service_set_out; 334 u32 service_set_out;
322 u32 bytesperline_out[2]; 335 u32 bytesperline_out[2];
323 unsigned tv_field_out; 336 unsigned tv_field_out;
@@ -514,7 +527,4 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
514 return dev->output_type[dev->output] == HDMI; 527 return dev->output_type[dev->output] == HDMI;
515} 528}
516 529
517void vivid_lock(struct vb2_queue *vq);
518void vivid_unlock(struct vb2_queue *vq);
519
520#endif 530#endif
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index d5cbf0038f24..857e7866e8bc 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -40,6 +40,9 @@
40#define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5) 40#define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5)
41#define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6) 41#define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6)
42#define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7) 42#define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7)
43#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
44#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
45#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
43 46
44#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000) 47#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
45#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1) 48#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -59,19 +62,21 @@
59#define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23) 62#define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23)
60#define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24) 63#define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24)
61#define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25) 64#define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25)
62#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 26) 65#define VIVID_CID_YCBCR_ENC (VIVID_CID_VIVID_BASE + 26)
63#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 27) 66#define VIVID_CID_QUANTIZATION (VIVID_CID_VIVID_BASE + 27)
64#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 28) 67#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 28)
65#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 29) 68#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 29)
66#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 30) 69#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 30)
67#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 31) 70#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 31)
68#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 32) 71#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 32)
69#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 33) 72#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 33)
70#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 34) 73#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 34)
71#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 35) 74#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 35)
72#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 36) 75#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 36)
73#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 37) 76#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 37)
74#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 38) 77#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 38)
78#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 39)
79#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 40)
75 80
76#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60) 81#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
77#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61) 82#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
@@ -163,6 +168,42 @@ static const struct v4l2_ctrl_config vivid_ctrl_int64 = {
163 .step = 1, 168 .step = 1,
164}; 169};
165 170
171static const struct v4l2_ctrl_config vivid_ctrl_u32_array = {
172 .ops = &vivid_user_gen_ctrl_ops,
173 .id = VIVID_CID_U32_ARRAY,
174 .name = "U32 1 Element Array",
175 .type = V4L2_CTRL_TYPE_U32,
176 .def = 0x18,
177 .min = 0x10,
178 .max = 0x20000,
179 .step = 1,
180 .dims = { 1 },
181};
182
183static const struct v4l2_ctrl_config vivid_ctrl_u16_matrix = {
184 .ops = &vivid_user_gen_ctrl_ops,
185 .id = VIVID_CID_U16_MATRIX,
186 .name = "U16 8x16 Matrix",
187 .type = V4L2_CTRL_TYPE_U16,
188 .def = 0x18,
189 .min = 0x10,
190 .max = 0x2000,
191 .step = 1,
192 .dims = { 8, 16 },
193};
194
195static const struct v4l2_ctrl_config vivid_ctrl_u8_4d_array = {
196 .ops = &vivid_user_gen_ctrl_ops,
197 .id = VIVID_CID_U8_4D_ARRAY,
198 .name = "U8 2x3x4x5 Array",
199 .type = V4L2_CTRL_TYPE_U8,
200 .def = 0x18,
201 .min = 0x10,
202 .max = 0x20,
203 .step = 1,
204 .dims = { 2, 3, 4, 5 },
205};
206
166static const char * const vivid_ctrl_menu_strings[] = { 207static const char * const vivid_ctrl_menu_strings[] = {
167 "Menu Item 0 (Skipped)", 208 "Menu Item 0 (Skipped)",
168 "Menu Item 1", 209 "Menu Item 1",
@@ -294,6 +335,16 @@ static const struct v4l2_ctrl_ops vivid_user_vid_ctrl_ops = {
294 335
295static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) 336static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
296{ 337{
338 static const u32 colorspaces[] = {
339 V4L2_COLORSPACE_SMPTE170M,
340 V4L2_COLORSPACE_REC709,
341 V4L2_COLORSPACE_SRGB,
342 V4L2_COLORSPACE_ADOBERGB,
343 V4L2_COLORSPACE_BT2020,
344 V4L2_COLORSPACE_SMPTE240M,
345 V4L2_COLORSPACE_470_SYSTEM_M,
346 V4L2_COLORSPACE_470_SYSTEM_BG,
347 };
297 struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap); 348 struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap);
298 unsigned i; 349 unsigned i;
299 350
@@ -303,7 +354,21 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
303 tpg_s_pattern(&dev->tpg, ctrl->val); 354 tpg_s_pattern(&dev->tpg, ctrl->val);
304 break; 355 break;
305 case VIVID_CID_COLORSPACE: 356 case VIVID_CID_COLORSPACE:
306 tpg_s_colorspace(&dev->tpg, ctrl->val); 357 tpg_s_colorspace(&dev->tpg, colorspaces[ctrl->val]);
358 vivid_send_source_change(dev, TV);
359 vivid_send_source_change(dev, SVID);
360 vivid_send_source_change(dev, HDMI);
361 vivid_send_source_change(dev, WEBCAM);
362 break;
363 case VIVID_CID_YCBCR_ENC:
364 tpg_s_ycbcr_enc(&dev->tpg, ctrl->val);
365 vivid_send_source_change(dev, TV);
366 vivid_send_source_change(dev, SVID);
367 vivid_send_source_change(dev, HDMI);
368 vivid_send_source_change(dev, WEBCAM);
369 break;
370 case VIVID_CID_QUANTIZATION:
371 tpg_s_quantization(&dev->tpg, ctrl->val);
307 vivid_send_source_change(dev, TV); 372 vivid_send_source_change(dev, TV);
308 vivid_send_source_change(dev, SVID); 373 vivid_send_source_change(dev, SVID);
309 vivid_send_source_change(dev, HDMI); 374 vivid_send_source_change(dev, HDMI);
@@ -623,15 +688,14 @@ static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
623}; 688};
624 689
625static const char * const vivid_ctrl_colorspace_strings[] = { 690static const char * const vivid_ctrl_colorspace_strings[] = {
626 "",
627 "SMPTE 170M", 691 "SMPTE 170M",
628 "SMPTE 240M",
629 "REC 709", 692 "REC 709",
630 "", /* Skip Bt878 entry */ 693 "sRGB",
694 "AdobeRGB",
695 "BT.2020",
696 "SMPTE 240M",
631 "470 System M", 697 "470 System M",
632 "470 System BG", 698 "470 System BG",
633 "", /* Skip JPEG entry */
634 "sRGB",
635 NULL, 699 NULL,
636}; 700};
637 701
@@ -640,13 +704,49 @@ static const struct v4l2_ctrl_config vivid_ctrl_colorspace = {
640 .id = VIVID_CID_COLORSPACE, 704 .id = VIVID_CID_COLORSPACE,
641 .name = "Colorspace", 705 .name = "Colorspace",
642 .type = V4L2_CTRL_TYPE_MENU, 706 .type = V4L2_CTRL_TYPE_MENU,
643 .min = 1, 707 .max = 7,
644 .max = 8, 708 .def = 2,
645 .menu_skip_mask = (1 << 4) | (1 << 7),
646 .def = 8,
647 .qmenu = vivid_ctrl_colorspace_strings, 709 .qmenu = vivid_ctrl_colorspace_strings,
648}; 710};
649 711
712static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
713 "Default",
714 "ITU-R 601",
715 "Rec. 709",
716 "xvYCC 601",
717 "xvYCC 709",
718 "sYCC",
719 "BT.2020 Non-Constant Luminance",
720 "BT.2020 Constant Luminance",
721 "SMPTE 240M",
722 NULL,
723};
724
725static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
726 .ops = &vivid_vid_cap_ctrl_ops,
727 .id = VIVID_CID_YCBCR_ENC,
728 .name = "Y'CbCr Encoding",
729 .type = V4L2_CTRL_TYPE_MENU,
730 .max = 8,
731 .qmenu = vivid_ctrl_ycbcr_enc_strings,
732};
733
734static const char * const vivid_ctrl_quantization_strings[] = {
735 "Default",
736 "Full Range",
737 "Limited Range",
738 NULL,
739};
740
741static const struct v4l2_ctrl_config vivid_ctrl_quantization = {
742 .ops = &vivid_vid_cap_ctrl_ops,
743 .id = VIVID_CID_QUANTIZATION,
744 .name = "Quantization",
745 .type = V4L2_CTRL_TYPE_MENU,
746 .max = 2,
747 .qmenu = vivid_ctrl_quantization_strings,
748};
749
650static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = { 750static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = {
651 .ops = &vivid_vid_cap_ctrl_ops, 751 .ops = &vivid_vid_cap_ctrl_ops,
652 .id = VIVID_CID_ALPHA_MODE, 752 .id = VIVID_CID_ALPHA_MODE,
@@ -723,8 +823,12 @@ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
723 dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M; 823 dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
724 else 824 else
725 dev->colorspace_out = V4L2_COLORSPACE_REC709; 825 dev->colorspace_out = V4L2_COLORSPACE_REC709;
826 dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
726 } else { 827 } else {
727 dev->colorspace_out = V4L2_COLORSPACE_SRGB; 828 dev->colorspace_out = V4L2_COLORSPACE_SRGB;
829 dev->quantization_out = dev->dvi_d_out ?
830 V4L2_QUANTIZATION_LIM_RANGE :
831 V4L2_QUANTIZATION_DEFAULT;
728 } 832 }
729 if (dev->loop_video) 833 if (dev->loop_video)
730 vivid_send_source_change(dev, HDMI); 834 vivid_send_source_change(dev, HDMI);
@@ -792,15 +896,15 @@ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
792 dev->start_streaming_error = true; 896 dev->start_streaming_error = true;
793 break; 897 break;
794 case VIVID_CID_QUEUE_ERROR: 898 case VIVID_CID_QUEUE_ERROR:
795 if (dev->vb_vid_cap_q.start_streaming_called) 899 if (vb2_start_streaming_called(&dev->vb_vid_cap_q))
796 vb2_queue_error(&dev->vb_vid_cap_q); 900 vb2_queue_error(&dev->vb_vid_cap_q);
797 if (dev->vb_vbi_cap_q.start_streaming_called) 901 if (vb2_start_streaming_called(&dev->vb_vbi_cap_q))
798 vb2_queue_error(&dev->vb_vbi_cap_q); 902 vb2_queue_error(&dev->vb_vbi_cap_q);
799 if (dev->vb_vid_out_q.start_streaming_called) 903 if (vb2_start_streaming_called(&dev->vb_vid_out_q))
800 vb2_queue_error(&dev->vb_vid_out_q); 904 vb2_queue_error(&dev->vb_vid_out_q);
801 if (dev->vb_vbi_out_q.start_streaming_called) 905 if (vb2_start_streaming_called(&dev->vb_vbi_out_q))
802 vb2_queue_error(&dev->vb_vbi_out_q); 906 vb2_queue_error(&dev->vb_vbi_out_q);
803 if (dev->vb_sdr_cap_q.start_streaming_called) 907 if (vb2_start_streaming_called(&dev->vb_sdr_cap_q))
804 vb2_queue_error(&dev->vb_sdr_cap_q); 908 vb2_queue_error(&dev->vb_sdr_cap_q);
805 break; 909 break;
806 case VIVID_CID_SEQ_WRAP: 910 case VIVID_CID_SEQ_WRAP:
@@ -1222,6 +1326,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
1222 dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL); 1326 dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
1223 dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL); 1327 dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
1224 dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL); 1328 dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
1329 v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
1330 v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
1331 v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
1225 1332
1226 if (dev->has_vid_cap) { 1333 if (dev->has_vid_cap) {
1227 /* Image Processing Controls */ 1334 /* Image Processing Controls */
@@ -1258,6 +1365,8 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
1258 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL); 1365 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL);
1259 dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap, 1366 dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap,
1260 &vivid_ctrl_colorspace, NULL); 1367 &vivid_ctrl_colorspace, NULL);
1368 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
1369 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
1261 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL); 1370 v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
1262 } 1371 }
1263 1372
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 8c5d661cfc49..4af55f18829f 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -297,8 +297,8 @@ const struct vb2_ops vivid_sdr_cap_qops = {
297 .buf_queue = sdr_cap_buf_queue, 297 .buf_queue = sdr_cap_buf_queue,
298 .start_streaming = sdr_cap_start_streaming, 298 .start_streaming = sdr_cap_start_streaming,
299 .stop_streaming = sdr_cap_stop_streaming, 299 .stop_streaming = sdr_cap_stop_streaming,
300 .wait_prepare = vivid_unlock, 300 .wait_prepare = vb2_ops_wait_prepare,
301 .wait_finish = vivid_lock, 301 .wait_finish = vb2_ops_wait_finish,
302}; 302};
303 303
304int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band) 304int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/platform/vivid/vivid-tpg-colors.c
index 2adddc0ca662..424aa7abc723 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.c
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.c
@@ -12,7 +12,7 @@
12 * This source also contains the code used to generate the tpg_csc_colors 12 * This source also contains the code used to generate the tpg_csc_colors
13 * table. Run the following command to compile it: 13 * table. Run the following command to compile it:
14 * 14 *
15 * gcc vivid-colors.c -DCOMPILE_APP -o gen-colors -lm 15 * gcc vivid-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
16 * 16 *
17 * and run the utility. 17 * and run the utility.
18 * 18 *
@@ -78,22 +78,542 @@ const struct color tpg_colors[TPG_COLOR_MAX] = {
78#ifndef COMPILE_APP 78#ifndef COMPILE_APP
79 79
80/* Generated table */ 80/* Generated table */
81const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = { 81const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {
82 [V4L2_COLORSPACE_SMPTE170M][0] = { 2953, 2939, 2939 }, 82 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3,
83 [V4L2_COLORSPACE_SMPTE170M][1] = { 2954, 2963, 585 }, 83 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7,
84 [V4L2_COLORSPACE_SMPTE170M][2] = { 84, 2967, 2937 }, 84 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10,
85 [V4L2_COLORSPACE_SMPTE170M][3] = { 93, 2990, 575 }, 85 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
86 [V4L2_COLORSPACE_SMPTE170M][4] = { 3030, 259, 2933 }, 86 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18,
87 [V4L2_COLORSPACE_SMPTE170M][5] = { 3031, 406, 557 }, 87 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21, 21, 21,
88 [V4L2_COLORSPACE_SMPTE170M][6] = { 544, 428, 2931 }, 88 21, 22, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25,
89 [V4L2_COLORSPACE_SMPTE170M][7] = { 551, 547, 547 }, 89 25, 25, 25, 26, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28,
90 28, 29, 29, 29, 29, 30, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32,
91 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 34, 35, 35, 35, 35,
92 36, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 39, 39,
93 39, 39, 40, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 42,
94 43, 43, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46,
95 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 48, 49, 49, 49, 49, 50,
96 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 52, 53, 53, 53,
97 53, 54, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 56, 57,
98 57, 57, 57, 58, 58, 58, 58, 58, 59, 59, 59, 59, 60, 60, 60, 60,
99 60, 61, 61, 61, 61, 62, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64,
100 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 66, 67, 67, 67, 67,
101 68, 68, 68, 68, 68, 69, 69, 69, 69, 70, 70, 70, 70, 70, 71, 71,
102 71, 71, 72, 72, 72, 72, 72, 73, 73, 73, 73, 73, 74, 74, 74, 74,
103 74, 75, 75, 75, 75, 76, 76, 76, 76, 76, 77, 77, 77, 77, 78, 78,
104 78, 78, 79, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82,
105 82, 82, 82, 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85,
106 86, 86, 86, 86, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89,
107 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, 92, 93, 93, 93, 93,
108 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97,
109 98, 98, 98, 98, 99, 99, 99, 99, 100, 100, 100, 101, 101, 101, 101, 102,
110 102, 102, 102, 103, 103, 103, 103, 104, 104, 104, 105, 105, 105, 105, 106, 106,
111 106, 106, 107, 107, 107, 107, 108, 108, 108, 109, 109, 109, 109, 110, 110, 110,
112 111, 111, 111, 111, 112, 112, 112, 112, 113, 113, 113, 114, 114, 114, 114, 115,
113 115, 115, 116, 116, 116, 116, 117, 117, 117, 118, 118, 118, 118, 119, 119, 119,
114 120, 120, 120, 120, 121, 121, 121, 122, 122, 122, 123, 123, 123, 123, 124, 124,
115 124, 125, 125, 125, 125, 126, 126, 126, 127, 127, 127, 128, 128, 128, 128, 129,
116 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 132, 132, 133, 133, 133, 134,
117 134, 134, 135, 135, 135, 136, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139,
118 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 142, 143, 143, 143, 144,
119 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149,
120 149, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154,
121 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160,
122 160, 160, 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164, 165, 165,
123 165, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169, 170, 170, 170, 171,
124 171, 171, 172, 172, 172, 173, 173, 174, 174, 174, 175, 175, 175, 176, 176, 176,
125 177, 177, 177, 178, 178, 179, 179, 179, 180, 180, 180, 181, 181, 181, 182, 182,
126 183, 183, 183, 184, 184, 184, 185, 185, 186, 186, 186, 187, 187, 187, 188, 188,
127 188, 189, 189, 190, 190, 190, 191, 191, 191, 192, 192, 193, 193, 193, 194, 194,
128 194, 195, 195, 196, 196, 196, 197, 197, 198, 198, 198, 199, 199, 199, 200, 200,
129 201, 201, 201, 202, 202, 203, 203, 203, 204, 204, 204, 205, 205, 206, 206, 206,
130 207, 207, 208, 208, 208, 209, 209, 210, 210, 210, 211, 211, 212, 212, 212, 213,
131 213, 214, 214, 214, 215, 215, 216, 216, 216, 217, 217, 218, 218, 218, 219, 219,
132 220, 220, 220, 221, 221, 222, 222, 222, 223, 223, 224, 224, 224, 225, 225, 226,
133 226, 227, 227, 227, 228, 228, 229, 229, 229, 230, 230, 231, 231, 232, 232, 232,
134 233, 233, 234, 234, 234, 235, 235, 236, 236, 237, 237, 237, 238, 238, 239, 239,
135 240, 240, 240, 241, 241, 242, 242, 243, 243, 243, 244, 244, 245, 245, 246, 246,
136 246, 247, 247, 248, 248, 249, 249, 249, 250, 250, 251, 251, 252, 252, 252, 253,
137 253, 254, 254, 255, 255, 256, 256, 256, 257, 257, 258, 258, 259, 259, 260, 260,
138 260, 261, 261, 262, 262, 263, 263, 264, 264, 264, 265, 265, 266, 266, 267, 267,
139 268, 268, 269, 269, 269, 270, 270, 271, 271, 272, 272, 273, 273, 274, 274, 274,
140 275, 275, 276, 276, 277, 277, 278, 278, 279, 279, 279, 280, 280, 281, 281, 282,
141 282, 283, 283, 284, 284, 285, 285, 286, 286, 286, 287, 287, 288, 288, 289, 289,
142 290, 290, 291, 291, 292, 292, 293, 293, 294, 294, 295, 295, 295, 296, 296, 297,
143 297, 298, 298, 299, 299, 300, 300, 301, 301, 302, 302, 303, 303, 304, 304, 305,
144 305, 306, 306, 307, 307, 308, 308, 309, 309, 309, 310, 310, 311, 311, 312, 312,
145 313, 313, 314, 314, 315, 315, 316, 316, 317, 317, 318, 318, 319, 319, 320, 320,
146 321, 321, 322, 322, 323, 323, 324, 324, 325, 325, 326, 326, 327, 327, 328, 328,
147 329, 329, 330, 330, 331, 331, 332, 332, 333, 333, 334, 335, 335, 336, 336, 337,
148 337, 338, 338, 339, 339, 340, 340, 341, 341, 342, 342, 343, 343, 344, 344, 345,
149 345, 346, 346, 347, 347, 348, 348, 349, 349, 350, 351, 351, 352, 352, 353, 353,
150 354, 354, 355, 355, 356, 356, 357, 357, 358, 358, 359, 360, 360, 361, 361, 362,
151 362, 363, 363, 364, 364, 365, 365, 366, 366, 367, 368, 368, 369, 369, 370, 370,
152 371, 371, 372, 372, 373, 373, 374, 375, 375, 376, 376, 377, 377, 378, 378, 379,
153 379, 380, 381, 381, 382, 382, 383, 383, 384, 384, 385, 386, 386, 387, 387, 388,
154 388, 389, 389, 390, 391, 391, 392, 392, 393, 393, 394, 394, 395, 396, 396, 397,
155 397, 398, 398, 399, 399, 400, 401, 401, 402, 402, 403, 403, 404, 405, 405, 406,
156 406, 407, 407, 408, 409, 409, 410, 410, 411, 411, 412, 413, 413, 414, 414, 415,
157 415, 416, 417, 417, 418, 418, 419, 419, 420, 421, 421, 422, 422, 423, 424, 424,
158 425, 425, 426, 426, 427, 428, 428, 429, 429, 430, 431, 431, 432, 432, 433, 433,
159 434, 435, 435, 436, 436, 437, 438, 438, 439, 439, 440, 441, 441, 442, 442, 443,
160 444, 444, 445, 445, 446, 447, 447, 448, 448, 449, 450, 450, 451, 451, 452, 453,
161 453, 454, 454, 455, 456, 456, 457, 457, 458, 459, 459, 460, 460, 461, 462, 462,
162 463, 463, 464, 465, 465, 466, 467, 467, 468, 468, 469, 470, 470, 471, 471, 472,
163 473, 473, 474, 475, 475, 476, 476, 477, 478, 478, 479, 480, 480, 481, 481, 482,
164 483, 483, 484, 485, 485, 486, 486, 487, 488, 488, 489, 490, 490, 491, 491, 492,
165 493, 493, 494, 495, 495, 496, 497, 497, 498, 498, 499, 500, 500, 501, 502, 502,
166 503, 504, 504, 505, 505, 506, 507, 507, 508, 509, 509, 510, 511, 511, 512, 513,
167 513, 514, 514, 515, 516, 516, 517, 518, 518, 519, 520, 520, 521, 522, 522, 523,
168 524, 524, 525, 526, 526, 527, 528, 528, 529, 529, 530, 531, 531, 532, 533, 533,
169 534, 535, 535, 536, 537, 537, 538, 539, 539, 540, 541, 541, 542, 543, 543, 544,
170 545, 545, 546, 547, 547, 548, 549, 549, 550, 551, 551, 552, 553, 553, 554, 555,
171 555, 556, 557, 557, 558, 559, 560, 560, 561, 562, 562, 563, 564, 564, 565, 566,
172 566, 567, 568, 568, 569, 570, 570, 571, 572, 572, 573, 574, 575, 575, 576, 577,
173 577, 578, 579, 579, 580, 581, 581, 582, 583, 584, 584, 585, 586, 586, 587, 588,
174 588, 589, 590, 590, 591, 592, 593, 593, 594, 595, 595, 596, 597, 598, 598, 599,
175 600, 600, 601, 602, 602, 603, 604, 605, 605, 606, 607, 607, 608, 609, 610, 610,
176 611, 612, 612, 613, 614, 615, 615, 616, 617, 617, 618, 619, 620, 620, 621, 622,
177 622, 623, 624, 625, 625, 626, 627, 627, 628, 629, 630, 630, 631, 632, 632, 633,
178 634, 635, 635, 636, 637, 638, 638, 639, 640, 640, 641, 642, 643, 643, 644, 645,
179 646, 646, 647, 648, 649, 649, 650, 651, 652, 652, 653, 654, 654, 655, 656, 657,
180 657, 658, 659, 660, 660, 661, 662, 663, 663, 664, 665, 666, 666, 667, 668, 669,
181 669, 670, 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678, 679, 680, 681,
182 681, 682, 683, 684, 684, 685, 686, 687, 687, 688, 689, 690, 690, 691, 692, 693,
183 694, 694, 695, 696, 697, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704, 705,
184 706, 707, 707, 708, 709, 710, 710, 711, 712, 713, 714, 714, 715, 716, 717, 717,
185 718, 719, 720, 720, 721, 722, 723, 724, 724, 725, 726, 727, 728, 728, 729, 730,
186 731, 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 739, 740, 741, 742, 742,
187 743, 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752, 753, 754, 754, 755,
188 756, 757, 758, 758, 759, 760, 761, 762, 762, 763, 764, 765, 766, 766, 767, 768,
189 769, 770, 771, 771, 772, 773, 774, 775, 775, 776, 777, 778, 779, 779, 780, 781,
190 782, 783, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791, 792, 793, 793, 794,
191 795, 796, 797, 797, 798, 799, 800, 801, 802, 802, 803, 804, 805, 806, 807, 807,
192 808, 809, 810, 811, 812, 812, 813, 814, 815, 816, 817, 817, 818, 819, 820, 821,
193 822, 822, 823, 824, 825, 826, 827, 827, 828, 829, 830, 831, 832, 832, 833, 834,
194 835, 836, 837, 838, 838, 839, 840, 841, 842, 843, 843, 844, 845, 846, 847, 848,
195 849, 849, 850, 851, 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 861, 861,
196 862, 863, 864, 865, 866, 867, 867, 868, 869, 870, 871, 872, 873, 873, 874, 875,
197 876, 877, 878, 879, 880, 880, 881, 882, 883, 884, 885, 886, 887, 887, 888, 889,
198 890, 891, 892, 893, 894, 894, 895, 896, 897, 898, 899, 900, 901, 901, 902, 903,
199 904, 905, 906, 907, 908, 909, 909, 910, 911, 912, 913, 914, 915, 916, 916, 917,
200 918, 919, 920, 921, 922, 923, 924, 925, 925, 926, 927, 928, 929, 930, 931, 932,
201 933, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 942, 943, 944, 945, 946,
202 947, 948, 949, 950, 951, 952, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961,
203 962, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 973, 974, 975,
204 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 985, 986, 987, 988, 989, 990,
205 991, 992, 993, 994, 995, 996, 997, 998, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005,
206 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020,
207 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1030, 1031, 1032, 1033, 1034, 1035,
208 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1050,
209 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066,
210 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1078, 1079, 1080, 1081,
211 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097,
212 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113,
213 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
214 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
215 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
216 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177,
217 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1193, 1194,
218 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
219 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1224, 1225, 1226, 1227,
220 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243,
221 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
222 1261, 1262, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277,
223 1278, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1295,
224 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1309, 1310, 1311, 1312,
225 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329,
226 1330, 1331, 1332, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1346, 1347,
227 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364,
228 1365, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1377, 1378, 1379, 1380, 1381, 1382,
229 1383, 1384, 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1396, 1397, 1398, 1399, 1400,
230 1401, 1402, 1403, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1414, 1415, 1416, 1417, 1418,
231 1419, 1420, 1421, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436,
232 1437, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448, 1449, 1450, 1451, 1452, 1453, 1455,
233 1456, 1457, 1458, 1459, 1460, 1461, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1471, 1472, 1473,
234 1474, 1475, 1476, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1486, 1487, 1488, 1489, 1490, 1491,
235 1493, 1494, 1495, 1496, 1497, 1498, 1500, 1501, 1502, 1503, 1504, 1505, 1507, 1508, 1509, 1510,
236 1511, 1512, 1514, 1515, 1516, 1517, 1518, 1519, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529,
237 1530, 1531, 1532, 1534, 1535, 1536, 1537, 1538, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548,
238 1549, 1550, 1551, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1566, 1567,
239 1568, 1569, 1570, 1572, 1573, 1574, 1575, 1576, 1578, 1579, 1580, 1581, 1582, 1584, 1585, 1586,
240 1587, 1588, 1590, 1591, 1592, 1593, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1605,
241 1607, 1608, 1609, 1610, 1611, 1613, 1614, 1615, 1616, 1617, 1619, 1620, 1621, 1622, 1624, 1625,
242 1626, 1627, 1628, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1641, 1642, 1643, 1644,
243 1646, 1647, 1648, 1649, 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1662, 1663, 1664,
244 1665, 1667, 1668, 1669, 1670, 1671, 1673, 1674, 1675, 1676, 1678, 1679, 1680, 1681, 1683, 1684,
245 1685, 1686, 1688, 1689, 1690, 1691, 1693, 1694, 1695, 1696, 1698, 1699, 1700, 1701, 1703, 1704,
246 1705, 1706, 1708, 1709, 1710, 1711, 1713, 1714, 1715, 1716, 1718, 1719, 1720, 1721, 1723, 1724,
247 1725, 1726, 1728, 1729, 1730, 1731, 1733, 1734, 1735, 1737, 1738, 1739, 1740, 1742, 1743, 1744,
248 1745, 1747, 1748, 1749, 1750, 1752, 1753, 1754, 1756, 1757, 1758, 1759, 1761, 1762, 1763, 1764,
249 1766, 1767, 1768, 1770, 1771, 1772, 1773, 1775, 1776, 1777, 1778, 1780, 1781, 1782, 1784, 1785,
250 1786, 1787, 1789, 1790, 1791, 1793, 1794, 1795, 1796, 1798, 1799, 1800, 1802, 1803, 1804, 1806,
251 1807, 1808, 1809, 1811, 1812, 1813, 1815, 1816, 1817, 1818, 1820, 1821, 1822, 1824, 1825, 1826,
252 1828, 1829, 1830, 1831, 1833, 1834, 1835, 1837, 1838, 1839, 1841, 1842, 1843, 1844, 1846, 1847,
253 1848, 1850, 1851, 1852, 1854, 1855, 1856, 1858, 1859, 1860, 1862, 1863, 1864, 1865, 1867, 1868,
254 1869, 1871, 1872, 1873, 1875, 1876, 1877, 1879, 1880, 1881, 1883, 1884, 1885, 1887, 1888, 1889,
255 1891, 1892, 1893, 1894, 1896, 1897, 1898, 1900, 1901, 1902, 1904, 1905, 1906, 1908, 1909, 1910,
256 1912, 1913, 1914, 1916, 1917, 1918, 1920, 1921, 1922, 1924, 1925, 1926, 1928, 1929, 1930, 1932,
257 1933, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1947, 1948, 1949, 1951, 1952, 1953,
258 1955, 1956, 1957, 1959, 1960, 1961, 1963, 1964, 1965, 1967, 1968, 1970, 1971, 1972, 1974, 1975,
259 1976, 1978, 1979, 1980, 1982, 1983, 1984, 1986, 1987, 1989, 1990, 1991, 1993, 1994, 1995, 1997,
260 1998, 1999, 2001, 2002, 2004, 2005, 2006, 2008, 2009, 2010, 2012, 2013, 2015, 2016, 2017, 2019,
261 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2030, 2031, 2032, 2034, 2035, 2037, 2038, 2039, 2041,
262 2042, 2043, 2045, 2046, 2048, 2049, 2050, 2052, 2053, 2055, 2056, 2057, 2059, 2060, 2061, 2063,
263 2064, 2066, 2067, 2068, 2070, 2071, 2073, 2074, 2075, 2077, 2078, 2080, 2081, 2082, 2084, 2085,
264 2087, 2088, 2089, 2091, 2092, 2094, 2095, 2096, 2098, 2099, 2101, 2102, 2103, 2105, 2106, 2108,
265 2109, 2110, 2112, 2113, 2115, 2116, 2117, 2119, 2120, 2122, 2123, 2124, 2126, 2127, 2129, 2130,
266 2132, 2133, 2134, 2136, 2137, 2139, 2140, 2141, 2143, 2144, 2146, 2147, 2149, 2150, 2151, 2153,
267 2154, 2156, 2157, 2159, 2160, 2161, 2163, 2164, 2166, 2167, 2169, 2170, 2171, 2173, 2174, 2176,
268 2177, 2179, 2180, 2181, 2183, 2184, 2186, 2187, 2189, 2190, 2191, 2193, 2194, 2196, 2197, 2199,
269 2200, 2202, 2203, 2204, 2206, 2207, 2209, 2210, 2212, 2213, 2214, 2216, 2217, 2219, 2220, 2222,
270 2223, 2225, 2226, 2228, 2229, 2230, 2232, 2233, 2235, 2236, 2238, 2239, 2241, 2242, 2243, 2245,
271 2246, 2248, 2249, 2251, 2252, 2254, 2255, 2257, 2258, 2260, 2261, 2262, 2264, 2265, 2267, 2268,
272 2270, 2271, 2273, 2274, 2276, 2277, 2279, 2280, 2282, 2283, 2284, 2286, 2287, 2289, 2290, 2292,
273 2293, 2295, 2296, 2298, 2299, 2301, 2302, 2304, 2305, 2307, 2308, 2310, 2311, 2312, 2314, 2315,
274 2317, 2318, 2320, 2321, 2323, 2324, 2326, 2327, 2329, 2330, 2332, 2333, 2335, 2336, 2338, 2339,
275 2341, 2342, 2344, 2345, 2347, 2348, 2350, 2351, 2353, 2354, 2356, 2357, 2359, 2360, 2362, 2363,
276 2365, 2366, 2368, 2369, 2371, 2372, 2374, 2375, 2377, 2378, 2380, 2381, 2383, 2384, 2386, 2387,
277 2389, 2390, 2392, 2393, 2395, 2396, 2398, 2399, 2401, 2402, 2404, 2405, 2407, 2408, 2410, 2411,
278 2413, 2414, 2416, 2417, 2419, 2420, 2422, 2423, 2425, 2426, 2428, 2429, 2431, 2433, 2434, 2436,
279 2437, 2439, 2440, 2442, 2443, 2445, 2446, 2448, 2449, 2451, 2452, 2454, 2455, 2457, 2458, 2460,
280 2462, 2463, 2465, 2466, 2468, 2469, 2471, 2472, 2474, 2475, 2477, 2478, 2480, 2481, 2483, 2485,
281 2486, 2488, 2489, 2491, 2492, 2494, 2495, 2497, 2498, 2500, 2502, 2503, 2505, 2506, 2508, 2509,
282 2511, 2512, 2514, 2515, 2517, 2519, 2520, 2522, 2523, 2525, 2526, 2528, 2529, 2531, 2533, 2534,
283 2536, 2537, 2539, 2540, 2542, 2543, 2545, 2547, 2548, 2550, 2551, 2553, 2554, 2556, 2557, 2559,
284 2561, 2562, 2564, 2565, 2567, 2568, 2570, 2572, 2573, 2575, 2576, 2578, 2579, 2581, 2583, 2584,
285 2586, 2587, 2589, 2590, 2592, 2594, 2595, 2597, 2598, 2600, 2601, 2603, 2605, 2606, 2608, 2609,
286 2611, 2613, 2614, 2616, 2617, 2619, 2620, 2622, 2624, 2625, 2627, 2628, 2630, 2632, 2633, 2635,
287 2636, 2638, 2640, 2641, 2643, 2644, 2646, 2647, 2649, 2651, 2652, 2654, 2655, 2657, 2659, 2660,
288 2662, 2663, 2665, 2667, 2668, 2670, 2671, 2673, 2675, 2676, 2678, 2679, 2681, 2683, 2684, 2686,
289 2687, 2689, 2691, 2692, 2694, 2696, 2697, 2699, 2700, 2702, 2704, 2705, 2707, 2708, 2710, 2712,
290 2713, 2715, 2716, 2718, 2720, 2721, 2723, 2725, 2726, 2728, 2729, 2731, 2733, 2734, 2736, 2738,
291 2739, 2741, 2742, 2744, 2746, 2747, 2749, 2751, 2752, 2754, 2755, 2757, 2759, 2760, 2762, 2764,
292 2765, 2767, 2769, 2770, 2772, 2773, 2775, 2777, 2778, 2780, 2782, 2783, 2785, 2787, 2788, 2790,
293 2791, 2793, 2795, 2796, 2798, 2800, 2801, 2803, 2805, 2806, 2808, 2810, 2811, 2813, 2814, 2816,
294 2818, 2819, 2821, 2823, 2824, 2826, 2828, 2829, 2831, 2833, 2834, 2836, 2838, 2839, 2841, 2843,
295 2844, 2846, 2848, 2849, 2851, 2853, 2854, 2856, 2857, 2859, 2861, 2862, 2864, 2866, 2867, 2869,
296 2871, 2872, 2874, 2876, 2877, 2879, 2881, 2882, 2884, 2886, 2888, 2889, 2891, 2893, 2894, 2896,
297 2898, 2899, 2901, 2903, 2904, 2906, 2908, 2909, 2911, 2913, 2914, 2916, 2918, 2919, 2921, 2923,
298 2924, 2926, 2928, 2929, 2931, 2933, 2935, 2936, 2938, 2940, 2941, 2943, 2945, 2946, 2948, 2950,
299 2951, 2953, 2955, 2956, 2958, 2960, 2962, 2963, 2965, 2967, 2968, 2970, 2972, 2973, 2975, 2977,
300 2979, 2980, 2982, 2984, 2985, 2987, 2989, 2990, 2992, 2994, 2996, 2997, 2999, 3001, 3002, 3004,
301 3006, 3008, 3009, 3011, 3013, 3014, 3016, 3018, 3020, 3021, 3023, 3025, 3026, 3028, 3030, 3032,
302 3033, 3035, 3037, 3038, 3040, 3042, 3044, 3045, 3047, 3049, 3050, 3052, 3054, 3056, 3057, 3059,
303 3061, 3063, 3064, 3066, 3068, 3069, 3071, 3073, 3075, 3076, 3078, 3080, 3082, 3083, 3085, 3087,
304 3089, 3090, 3092, 3094, 3095, 3097, 3099, 3101, 3102, 3104, 3106, 3108, 3109, 3111, 3113, 3115,
305 3116, 3118, 3120, 3122, 3123, 3125, 3127, 3129, 3130, 3132, 3134, 3136, 3137, 3139, 3141, 3143,
306 3144, 3146, 3148, 3150, 3151, 3153, 3155, 3157, 3158, 3160, 3162, 3164, 3165, 3167, 3169, 3171,
307 3172, 3174, 3176, 3178, 3179, 3181, 3183, 3185, 3187, 3188, 3190, 3192, 3194, 3195, 3197, 3199,
308 3201, 3202, 3204, 3206, 3208, 3209, 3211, 3213, 3215, 3217, 3218, 3220, 3222, 3224, 3225, 3227,
309 3229, 3231, 3233, 3234, 3236, 3238, 3240, 3241, 3243, 3245, 3247, 3249, 3250, 3252, 3254, 3256,
310 3258, 3259, 3261, 3263, 3265, 3266, 3268, 3270, 3272, 3274, 3275, 3277, 3279, 3281, 3283, 3284,
311 3286, 3288, 3290, 3292, 3293, 3295, 3297, 3299, 3301, 3302, 3304, 3306, 3308, 3310, 3311, 3313,
312 3315, 3317, 3319, 3320, 3322, 3324, 3326, 3328, 3329, 3331, 3333, 3335, 3337, 3338, 3340, 3342,
313 3344, 3346, 3348, 3349, 3351, 3353, 3355, 3357, 3358, 3360, 3362, 3364, 3366, 3368, 3369, 3371,
314 3373, 3375, 3377, 3378, 3380, 3382, 3384, 3386, 3388, 3389, 3391, 3393, 3395, 3397, 3399, 3400,
315 3402, 3404, 3406, 3408, 3410, 3411, 3413, 3415, 3417, 3419, 3421, 3422, 3424, 3426, 3428, 3430,
316 3432, 3433, 3435, 3437, 3439, 3441, 3443, 3444, 3446, 3448, 3450, 3452, 3454, 3455, 3457, 3459,
317 3461, 3463, 3465, 3467, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3481, 3483, 3485, 3487, 3489,
318 3491, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3507, 3509, 3511, 3513, 3515, 3517, 3519,
319 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3548,
320 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3578,
321 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609,
322 3611, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3629, 3631, 3633, 3635, 3637, 3639,
323 3641, 3643, 3645, 3647, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3667, 3669,
324 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3688, 3690, 3692, 3694, 3696, 3698, 3700,
325 3702, 3704, 3706, 3708, 3710, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731,
326 3733, 3735, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762,
327 3764, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793,
328 3795, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824,
329 3826, 3828, 3830, 3832, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855,
330 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3884, 3886,
331 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918,
332 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950,
333 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982,
334 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014,
335 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046,
336 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078,
337 4080,
338};
339
340/* Generated table */
341const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {
342 0, 5, 9, 14, 18, 22, 27, 32, 36, 41, 45, 50, 54, 59, 63, 68,
343 72, 77, 81, 86, 90, 95, 99, 104, 108, 113, 117, 122, 126, 131, 135, 139,
344 144, 149, 153, 158, 162, 167, 171, 176, 180, 185, 189, 194, 198, 203, 207, 212,
345 216, 221, 225, 230, 234, 239, 243, 248, 252, 257, 261, 266, 270, 275, 279, 284,
346 288, 293, 297, 302, 306, 311, 315, 320, 324, 328, 334, 338, 343, 347, 352, 356,
347 360, 365, 369, 373, 377, 381, 386, 390, 394, 398, 402, 406, 410, 414, 418, 422,
348 426, 430, 433, 437, 441, 445, 449, 452, 456, 460, 464, 467, 471, 475, 478, 482,
349 485, 489, 492, 496, 499, 503, 506, 510, 513, 517, 520, 524, 527, 530, 534, 537,
350 540, 544, 547, 550, 554, 557, 560, 563, 566, 570, 573, 576, 579, 582, 586, 589,
351 592, 595, 598, 601, 604, 607, 610, 613, 616, 619, 622, 625, 628, 631, 634, 637,
352 640, 643, 646, 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 677, 680, 683,
353 686, 689, 691, 694, 697, 700, 702, 705, 708, 711, 713, 716, 719, 721, 724, 727,
354 729, 732, 735, 737, 740, 743, 745, 748, 750, 753, 756, 758, 761, 763, 766, 768,
355 771, 773, 776, 779, 781, 784, 786, 789, 791, 794, 796, 799, 801, 803, 806, 808,
356 811, 813, 816, 818, 821, 823, 825, 828, 830, 833, 835, 837, 840, 842, 844, 847,
357 849, 851, 854, 856, 858, 861, 863, 865, 868, 870, 872, 875, 877, 879, 881, 884,
358 886, 888, 891, 893, 895, 897, 900, 902, 904, 906, 908, 911, 913, 915, 917, 919,
359 922, 924, 926, 928, 930, 933, 935, 937, 939, 941, 943, 946, 948, 950, 952, 954,
360 956, 958, 960, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 984, 986, 988,
361 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020,
362 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052,
363 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083,
364 1085, 1087, 1089, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1107, 1109, 1111, 1113,
365 1115, 1117, 1119, 1120, 1122, 1124, 1126, 1128, 1130, 1131, 1133, 1135, 1137, 1139, 1141, 1142,
366 1144, 1146, 1148, 1150, 1151, 1153, 1155, 1157, 1159, 1160, 1162, 1164, 1166, 1168, 1169, 1171,
367 1173, 1175, 1176, 1178, 1180, 1182, 1184, 1185, 1187, 1189, 1191, 1192, 1194, 1196, 1198, 1199,
368 1201, 1203, 1204, 1206, 1208, 1210, 1211, 1213, 1215, 1217, 1218, 1220, 1222, 1223, 1225, 1227,
369 1228, 1230, 1232, 1234, 1235, 1237, 1239, 1240, 1242, 1244, 1245, 1247, 1249, 1250, 1252, 1254,
370 1255, 1257, 1259, 1260, 1262, 1264, 1265, 1267, 1269, 1270, 1272, 1274, 1275, 1277, 1279, 1280,
371 1282, 1283, 1285, 1287, 1288, 1290, 1292, 1293, 1295, 1296, 1298, 1300, 1301, 1303, 1305, 1306,
372 1308, 1309, 1311, 1313, 1314, 1316, 1317, 1319, 1321, 1322, 1324, 1325, 1327, 1328, 1330, 1332,
373 1333, 1335, 1336, 1338, 1339, 1341, 1343, 1344, 1346, 1347, 1349, 1350, 1352, 1354, 1355, 1357,
374 1358, 1360, 1361, 1363, 1364, 1366, 1367, 1369, 1371, 1372, 1374, 1375, 1377, 1378, 1380, 1381,
375 1383, 1384, 1386, 1387, 1389, 1390, 1392, 1393, 1395, 1396, 1398, 1399, 1401, 1402, 1404, 1405,
376 1407, 1408, 1410, 1411, 1413, 1414, 1416, 1417, 1419, 1420, 1422, 1423, 1425, 1426, 1428, 1429,
377 1431, 1432, 1434, 1435, 1437, 1438, 1440, 1441, 1442, 1444, 1445, 1447, 1448, 1450, 1451, 1453,
378 1454, 1456, 1457, 1458, 1460, 1461, 1463, 1464, 1466, 1467, 1469, 1470, 1471, 1473, 1474, 1476,
379 1477, 1479, 1480, 1481, 1483, 1484, 1486, 1487, 1489, 1490, 1491, 1493, 1494, 1496, 1497, 1498,
380 1500, 1501, 1503, 1504, 1505, 1507, 1508, 1510, 1511, 1512, 1514, 1515, 1517, 1518, 1519, 1521,
381 1522, 1524, 1525, 1526, 1528, 1529, 1531, 1532, 1533, 1535, 1536, 1537, 1539, 1540, 1542, 1543,
382 1544, 1546, 1547, 1548, 1550, 1551, 1553, 1554, 1555, 1557, 1558, 1559, 1561, 1562, 1563, 1565,
383 1566, 1567, 1569, 1570, 1571, 1573, 1574, 1576, 1577, 1578, 1580, 1581, 1582, 1584, 1585, 1586,
384 1588, 1589, 1590, 1592, 1593, 1594, 1596, 1597, 1598, 1600, 1601, 1602, 1603, 1605, 1606, 1607,
385 1609, 1610, 1611, 1613, 1614, 1615, 1617, 1618, 1619, 1621, 1622, 1623, 1624, 1626, 1627, 1628,
386 1630, 1631, 1632, 1634, 1635, 1636, 1637, 1639, 1640, 1641, 1643, 1644, 1645, 1647, 1648, 1649,
387 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1661, 1662, 1663, 1664, 1666, 1667, 1668, 1670,
388 1671, 1672, 1673, 1675, 1676, 1677, 1678, 1680, 1681, 1682, 1683, 1685, 1686, 1687, 1688, 1690,
389 1691, 1692, 1693, 1695, 1696, 1697, 1698, 1700, 1701, 1702, 1703, 1705, 1706, 1707, 1708, 1710,
390 1711, 1712, 1713, 1715, 1716, 1717, 1718, 1720, 1721, 1722, 1723, 1724, 1726, 1727, 1728, 1729,
391 1731, 1732, 1733, 1734, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1745, 1746, 1748, 1749,
392 1750, 1751, 1753, 1754, 1755, 1756, 1757, 1759, 1760, 1761, 1762, 1763, 1765, 1766, 1767, 1768,
393 1769, 1771, 1772, 1773, 1774, 1775, 1777, 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, 1787,
394 1788, 1790, 1791, 1792, 1793, 1794, 1796, 1797, 1798, 1799, 1800, 1801, 1803, 1804, 1805, 1806,
395 1807, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1817, 1818, 1819, 1820, 1821, 1823, 1824, 1825,
396 1826, 1827, 1828, 1829, 1831, 1832, 1833, 1834, 1835, 1836, 1838, 1839, 1840, 1841, 1842, 1843,
397 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1861, 1862,
398 1863, 1864, 1865, 1866, 1867, 1868, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880,
399 1881, 1882, 1883, 1884, 1885, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1896, 1897, 1898,
400 1899, 1900, 1901, 1902, 1903, 1904, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1916,
401 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
402 1934, 1935, 1936, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1950, 1951,
403 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1963, 1964, 1965, 1966, 1967, 1968,
404 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
405 1986, 1987, 1988, 1989, 1990, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
406 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
407 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2031, 2032, 2033, 2034, 2035, 2036,
408 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052,
409 2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
410 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085,
411 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101,
412 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117,
413 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133,
414 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149,
415 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165,
416 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
417 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196,
418 2197, 2198, 2199, 2200, 2201, 2202, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
419 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2224, 2225, 2226,
420 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2241,
421 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
422 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2271,
423 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2283, 2284, 2285, 2286,
424 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
425 2302, 2303, 2304, 2305, 2306, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316,
426 2317, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2327, 2328, 2329, 2330,
427 2331, 2332, 2333, 2334, 2335, 2336, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
428 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2354, 2355, 2356, 2357, 2358, 2359,
429 2360, 2361, 2362, 2363, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2371, 2372, 2373,
430 2374, 2375, 2376, 2377, 2378, 2379, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2386, 2387,
431 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2401,
432 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2415,
433 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2428, 2429,
434 2430, 2431, 2432, 2433, 2434, 2435, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2441, 2442, 2443,
435 2444, 2445, 2446, 2447, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2453, 2454, 2455, 2456, 2457,
436 2458, 2459, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2465, 2466, 2467, 2468, 2469, 2470, 2471,
437 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2477, 2478, 2479, 2480, 2481, 2482, 2482, 2483, 2484,
438 2485, 2486, 2487, 2488, 2488, 2489, 2490, 2491, 2492, 2493, 2493, 2494, 2495, 2496, 2497, 2498,
439 2499, 2499, 2500, 2501, 2502, 2503, 2504, 2504, 2505, 2506, 2507, 2508, 2509, 2509, 2510, 2511,
440 2512, 2513, 2514, 2514, 2515, 2516, 2517, 2518, 2519, 2519, 2520, 2521, 2522, 2523, 2524, 2524,
441 2525, 2526, 2527, 2528, 2529, 2529, 2530, 2531, 2532, 2533, 2534, 2534, 2535, 2536, 2537, 2538,
442 2539, 2539, 2540, 2541, 2542, 2543, 2544, 2544, 2545, 2546, 2547, 2548, 2548, 2549, 2550, 2551,
443 2552, 2553, 2553, 2554, 2555, 2556, 2557, 2558, 2558, 2559, 2560, 2561, 2562, 2562, 2563, 2564,
444 2565, 2566, 2567, 2567, 2568, 2569, 2570, 2571, 2571, 2572, 2573, 2574, 2575, 2576, 2576, 2577,
445 2578, 2579, 2580, 2580, 2581, 2582, 2583, 2584, 2584, 2585, 2586, 2587, 2588, 2589, 2589, 2590,
446 2591, 2592, 2593, 2593, 2594, 2595, 2596, 2597, 2597, 2598, 2599, 2600, 2601, 2601, 2602, 2603,
447 2604, 2605, 2605, 2606, 2607, 2608, 2609, 2610, 2610, 2611, 2612, 2613, 2614, 2614, 2615, 2616,
448 2617, 2618, 2618, 2619, 2620, 2621, 2622, 2622, 2623, 2624, 2625, 2626, 2626, 2627, 2628, 2629,
449 2630, 2630, 2631, 2632, 2633, 2634, 2634, 2635, 2636, 2637, 2637, 2638, 2639, 2640, 2641, 2641,
450 2642, 2643, 2644, 2645, 2645, 2646, 2647, 2648, 2649, 2649, 2650, 2651, 2652, 2653, 2653, 2654,
451 2655, 2656, 2656, 2657, 2658, 2659, 2660, 2660, 2661, 2662, 2663, 2664, 2664, 2665, 2666, 2667,
452 2668, 2668, 2669, 2670, 2671, 2671, 2672, 2673, 2674, 2675, 2675, 2676, 2677, 2678, 2678, 2679,
453 2680, 2681, 2682, 2682, 2683, 2684, 2685, 2686, 2686, 2687, 2688, 2689, 2689, 2690, 2691, 2692,
454 2693, 2693, 2694, 2695, 2696, 2696, 2697, 2698, 2699, 2700, 2700, 2701, 2702, 2703, 2703, 2704,
455 2705, 2706, 2706, 2707, 2708, 2709, 2710, 2710, 2711, 2712, 2713, 2713, 2714, 2715, 2716, 2717,
456 2717, 2718, 2719, 2720, 2720, 2721, 2722, 2723, 2723, 2724, 2725, 2726, 2727, 2727, 2728, 2729,
457 2730, 2730, 2731, 2732, 2733, 2733, 2734, 2735, 2736, 2736, 2737, 2738, 2739, 2740, 2740, 2741,
458 2742, 2743, 2743, 2744, 2745, 2746, 2746, 2747, 2748, 2749, 2749, 2750, 2751, 2752, 2752, 2753,
459 2754, 2755, 2755, 2756, 2757, 2758, 2759, 2759, 2760, 2761, 2762, 2762, 2763, 2764, 2765, 2765,
460 2766, 2767, 2768, 2768, 2769, 2770, 2771, 2771, 2772, 2773, 2774, 2774, 2775, 2776, 2777, 2777,
461 2778, 2779, 2780, 2780, 2781, 2782, 2783, 2783, 2784, 2785, 2786, 2786, 2787, 2788, 2789, 2789,
462 2790, 2791, 2792, 2792, 2793, 2794, 2795, 2795, 2796, 2797, 2798, 2798, 2799, 2800, 2801, 2801,
463 2802, 2803, 2804, 2804, 2805, 2806, 2807, 2807, 2808, 2809, 2810, 2810, 2811, 2812, 2813, 2813,
464 2814, 2815, 2815, 2816, 2817, 2818, 2818, 2819, 2820, 2821, 2821, 2822, 2823, 2824, 2824, 2825,
465 2826, 2827, 2827, 2828, 2829, 2830, 2830, 2831, 2832, 2832, 2833, 2834, 2835, 2835, 2836, 2837,
466 2838, 2838, 2839, 2840, 2841, 2841, 2842, 2843, 2844, 2844, 2845, 2846, 2846, 2847, 2848, 2849,
467 2849, 2850, 2851, 2852, 2852, 2853, 2854, 2855, 2855, 2856, 2857, 2857, 2858, 2859, 2860, 2860,
468 2861, 2862, 2863, 2863, 2864, 2865, 2865, 2866, 2867, 2868, 2868, 2869, 2870, 2871, 2871, 2872,
469 2873, 2873, 2874, 2875, 2876, 2876, 2877, 2878, 2879, 2879, 2880, 2881, 2881, 2882, 2883, 2884,
470 2884, 2885, 2886, 2886, 2887, 2888, 2889, 2889, 2890, 2891, 2892, 2892, 2893, 2894, 2894, 2895,
471 2896, 2897, 2897, 2898, 2899, 2899, 2900, 2901, 2902, 2902, 2903, 2904, 2904, 2905, 2906, 2907,
472 2907, 2908, 2909, 2909, 2910, 2911, 2912, 2912, 2913, 2914, 2914, 2915, 2916, 2917, 2917, 2918,
473 2919, 2919, 2920, 2921, 2922, 2922, 2923, 2924, 2924, 2925, 2926, 2927, 2927, 2928, 2929, 2929,
474 2930, 2931, 2932, 2932, 2933, 2934, 2934, 2935, 2936, 2937, 2937, 2938, 2939, 2939, 2940, 2941,
475 2941, 2942, 2943, 2944, 2944, 2945, 2946, 2946, 2947, 2948, 2949, 2949, 2950, 2951, 2951, 2952,
476 2953, 2953, 2954, 2955, 2956, 2956, 2957, 2958, 2958, 2959, 2960, 2961, 2961, 2962, 2963, 2963,
477 2964, 2965, 2965, 2966, 2967, 2968, 2968, 2969, 2970, 2970, 2971, 2972, 2972, 2973, 2974, 2975,
478 2975, 2976, 2977, 2977, 2978, 2979, 2979, 2980, 2981, 2982, 2982, 2983, 2984, 2984, 2985, 2986,
479 2986, 2987, 2988, 2988, 2989, 2990, 2991, 2991, 2992, 2993, 2993, 2994, 2995, 2995, 2996, 2997,
480 2998, 2998, 2999, 3000, 3000, 3001, 3002, 3002, 3003, 3004, 3004, 3005, 3006, 3006, 3007, 3008,
481 3009, 3009, 3010, 3011, 3011, 3012, 3013, 3013, 3014, 3015, 3015, 3016, 3017, 3018, 3018, 3019,
482 3020, 3020, 3021, 3022, 3022, 3023, 3024, 3024, 3025, 3026, 3026, 3027, 3028, 3029, 3029, 3030,
483 3031, 3031, 3032, 3033, 3033, 3034, 3035, 3035, 3036, 3037, 3037, 3038, 3039, 3039, 3040, 3041,
484 3042, 3042, 3043, 3044, 3044, 3045, 3046, 3046, 3047, 3048, 3048, 3049, 3050, 3050, 3051, 3052,
485 3052, 3053, 3054, 3054, 3055, 3056, 3056, 3057, 3058, 3059, 3059, 3060, 3061, 3061, 3062, 3063,
486 3063, 3064, 3065, 3065, 3066, 3067, 3067, 3068, 3069, 3069, 3070, 3071, 3071, 3072, 3073, 3073,
487 3074, 3075, 3075, 3076, 3077, 3077, 3078, 3079, 3079, 3080, 3081, 3081, 3082, 3083, 3084, 3084,
488 3085, 3086, 3086, 3087, 3088, 3088, 3089, 3090, 3090, 3091, 3092, 3092, 3093, 3094, 3094, 3095,
489 3096, 3096, 3097, 3098, 3098, 3099, 3100, 3100, 3101, 3102, 3102, 3103, 3104, 3104, 3105, 3106,
490 3106, 3107, 3108, 3108, 3109, 3110, 3110, 3111, 3112, 3112, 3113, 3114, 3114, 3115, 3116, 3116,
491 3117, 3118, 3118, 3119, 3120, 3120, 3121, 3122, 3122, 3123, 3124, 3124, 3125, 3126, 3126, 3127,
492 3128, 3128, 3129, 3130, 3130, 3131, 3132, 3132, 3133, 3134, 3134, 3135, 3135, 3136, 3137, 3137,
493 3138, 3139, 3139, 3140, 3141, 3141, 3142, 3143, 3143, 3144, 3145, 3145, 3146, 3147, 3147, 3148,
494 3149, 3149, 3150, 3151, 3151, 3152, 3153, 3153, 3154, 3155, 3155, 3156, 3157, 3157, 3158, 3159,
495 3159, 3160, 3160, 3161, 3162, 3162, 3163, 3164, 3164, 3165, 3166, 3166, 3167, 3168, 3168, 3169,
496 3170, 3170, 3171, 3172, 3172, 3173, 3174, 3174, 3175, 3175, 3176, 3177, 3177, 3178, 3179, 3179,
497 3180, 3181, 3181, 3182, 3183, 3183, 3184, 3185, 3185, 3186, 3187, 3187, 3188, 3188, 3189, 3190,
498 3190, 3191, 3192, 3192, 3193, 3194, 3194, 3195, 3196, 3196, 3197, 3198, 3198, 3199, 3199, 3200,
499 3201, 3201, 3202, 3203, 3203, 3204, 3205, 3205, 3206, 3207, 3207, 3208, 3209, 3209, 3210, 3210,
500 3211, 3212, 3212, 3213, 3214, 3214, 3215, 3216, 3216, 3217, 3218, 3218, 3219, 3219, 3220, 3221,
501 3221, 3222, 3223, 3223, 3224, 3225, 3225, 3226, 3227, 3227, 3228, 3228, 3229, 3230, 3230, 3231,
502 3232, 3232, 3233, 3234, 3234, 3235, 3235, 3236, 3237, 3237, 3238, 3239, 3239, 3240, 3241, 3241,
503 3242, 3242, 3243, 3244, 3244, 3245, 3246, 3246, 3247, 3248, 3248, 3249, 3249, 3250, 3251, 3251,
504 3252, 3253, 3253, 3254, 3255, 3255, 3256, 3256, 3257, 3258, 3258, 3259, 3260, 3260, 3261, 3262,
505 3262, 3263, 3263, 3264, 3265, 3265, 3266, 3267, 3267, 3268, 3268, 3269, 3270, 3270, 3271, 3272,
506 3272, 3273, 3274, 3274, 3275, 3275, 3276, 3277, 3277, 3278, 3279, 3279, 3280, 3280, 3281, 3282,
507 3282, 3283, 3284, 3284, 3285, 3285, 3286, 3287, 3287, 3288, 3289, 3289, 3290, 3290, 3291, 3292,
508 3292, 3293, 3294, 3294, 3295, 3295, 3296, 3297, 3297, 3298, 3299, 3299, 3300, 3300, 3301, 3302,
509 3302, 3303, 3304, 3304, 3305, 3305, 3306, 3307, 3307, 3308, 3309, 3309, 3310, 3310, 3311, 3312,
510 3312, 3313, 3314, 3314, 3315, 3315, 3316, 3317, 3317, 3318, 3319, 3319, 3320, 3320, 3321, 3322,
511 3322, 3323, 3323, 3324, 3325, 3325, 3326, 3327, 3327, 3328, 3328, 3329, 3330, 3330, 3331, 3332,
512 3332, 3333, 3333, 3334, 3335, 3335, 3336, 3336, 3337, 3338, 3338, 3339, 3340, 3340, 3341, 3341,
513 3342, 3343, 3343, 3344, 3345, 3345, 3346, 3346, 3347, 3348, 3348, 3349, 3349, 3350, 3351, 3351,
514 3352, 3352, 3353, 3354, 3354, 3355, 3356, 3356, 3357, 3357, 3358, 3359, 3359, 3360, 3360, 3361,
515 3362, 3362, 3363, 3364, 3364, 3365, 3365, 3366, 3367, 3367, 3368, 3368, 3369, 3370, 3370, 3371,
516 3371, 3372, 3373, 3373, 3374, 3375, 3375, 3376, 3376, 3377, 3378, 3378, 3379, 3379, 3380, 3381,
517 3381, 3382, 3382, 3383, 3384, 3384, 3385, 3385, 3386, 3387, 3387, 3388, 3389, 3389, 3390, 3390,
518 3391, 3392, 3392, 3393, 3393, 3394, 3395, 3395, 3396, 3396, 3397, 3398, 3398, 3399, 3399, 3400,
519 3401, 3401, 3402, 3402, 3403, 3404, 3404, 3405, 3405, 3406, 3407, 3407, 3408, 3408, 3409, 3410,
520 3410, 3411, 3411, 3412, 3413, 3413, 3414, 3414, 3415, 3416, 3416, 3417, 3418, 3418, 3419, 3419,
521 3420, 3421, 3421, 3422, 3422, 3423, 3424, 3424, 3425, 3425, 3426, 3427, 3427, 3428, 3428, 3429,
522 3430, 3430, 3431, 3431, 3432, 3433, 3433, 3434, 3434, 3435, 3435, 3436, 3437, 3437, 3438, 3438,
523 3439, 3440, 3440, 3441, 3441, 3442, 3443, 3443, 3444, 3444, 3445, 3446, 3446, 3447, 3447, 3448,
524 3449, 3449, 3450, 3450, 3451, 3452, 3452, 3453, 3453, 3454, 3455, 3455, 3456, 3456, 3457, 3458,
525 3458, 3459, 3459, 3460, 3461, 3461, 3462, 3462, 3463, 3463, 3464, 3465, 3465, 3466, 3466, 3467,
526 3468, 3468, 3469, 3469, 3470, 3471, 3471, 3472, 3472, 3473, 3474, 3474, 3475, 3475, 3476, 3476,
527 3477, 3478, 3478, 3479, 3479, 3480, 3481, 3481, 3482, 3482, 3483, 3484, 3484, 3485, 3485, 3486,
528 3486, 3487, 3488, 3488, 3489, 3489, 3490, 3491, 3491, 3492, 3492, 3493, 3494, 3494, 3495, 3495,
529 3496, 3496, 3497, 3498, 3498, 3499, 3499, 3500, 3501, 3501, 3502, 3502, 3503, 3504, 3504, 3505,
530 3505, 3506, 3506, 3507, 3508, 3508, 3509, 3509, 3510, 3511, 3511, 3512, 3512, 3513, 3513, 3514,
531 3515, 3515, 3516, 3516, 3517, 3518, 3518, 3519, 3519, 3520, 3520, 3521, 3522, 3522, 3523, 3523,
532 3524, 3525, 3525, 3526, 3526, 3527, 3527, 3528, 3529, 3529, 3530, 3530, 3531, 3531, 3532, 3533,
533 3533, 3534, 3534, 3535, 3536, 3536, 3537, 3537, 3538, 3538, 3539, 3540, 3540, 3541, 3541, 3542,
534 3542, 3543, 3544, 3544, 3545, 3545, 3546, 3547, 3547, 3548, 3548, 3549, 3549, 3550, 3551, 3551,
535 3552, 3552, 3553, 3553, 3554, 3555, 3555, 3556, 3556, 3557, 3557, 3558, 3559, 3559, 3560, 3560,
536 3561, 3561, 3562, 3563, 3563, 3564, 3564, 3565, 3566, 3566, 3567, 3567, 3568, 3568, 3569, 3570,
537 3570, 3571, 3571, 3572, 3572, 3573, 3574, 3574, 3575, 3575, 3576, 3576, 3577, 3578, 3578, 3579,
538 3579, 3580, 3580, 3581, 3582, 3582, 3583, 3583, 3584, 3584, 3585, 3586, 3586, 3587, 3587, 3588,
539 3588, 3589, 3590, 3590, 3591, 3591, 3592, 3592, 3593, 3594, 3594, 3595, 3595, 3596, 3596, 3597,
540 3597, 3598, 3599, 3599, 3600, 3600, 3601, 3601, 3602, 3603, 3603, 3604, 3604, 3605, 3605, 3606,
541 3607, 3607, 3608, 3608, 3609, 3609, 3610, 3611, 3611, 3612, 3612, 3613, 3613, 3614, 3615, 3615,
542 3616, 3616, 3617, 3617, 3618, 3618, 3619, 3620, 3620, 3621, 3621, 3622, 3622, 3623, 3624, 3624,
543 3625, 3625, 3626, 3626, 3627, 3627, 3628, 3629, 3629, 3630, 3630, 3631, 3631, 3632, 3633, 3633,
544 3634, 3634, 3635, 3635, 3636, 3636, 3637, 3638, 3638, 3639, 3639, 3640, 3640, 3641, 3642, 3642,
545 3643, 3643, 3644, 3644, 3645, 3645, 3646, 3647, 3647, 3648, 3648, 3649, 3649, 3650, 3650, 3651,
546 3652, 3652, 3653, 3653, 3654, 3654, 3655, 3656, 3656, 3657, 3657, 3658, 3658, 3659, 3659, 3660,
547 3661, 3661, 3662, 3662, 3663, 3663, 3664, 3664, 3665, 3666, 3666, 3667, 3667, 3668, 3668, 3669,
548 3669, 3670, 3671, 3671, 3672, 3672, 3673, 3673, 3674, 3674, 3675, 3676, 3676, 3677, 3677, 3678,
549 3678, 3679, 3679, 3680, 3681, 3681, 3682, 3682, 3683, 3683, 3684, 3684, 3685, 3686, 3686, 3687,
550 3687, 3688, 3688, 3689, 3689, 3690, 3691, 3691, 3692, 3692, 3693, 3693, 3694, 3694, 3695, 3695,
551 3696, 3697, 3697, 3698, 3698, 3699, 3699, 3700, 3700, 3701, 3702, 3702, 3703, 3703, 3704, 3704,
552 3705, 3705, 3706, 3707, 3707, 3708, 3708, 3709, 3709, 3710, 3710, 3711, 3711, 3712, 3713, 3713,
553 3714, 3714, 3715, 3715, 3716, 3716, 3717, 3717, 3718, 3719, 3719, 3720, 3720, 3721, 3721, 3722,
554 3722, 3723, 3724, 3724, 3725, 3725, 3726, 3726, 3727, 3727, 3728, 3728, 3729, 3730, 3730, 3731,
555 3731, 3732, 3732, 3733, 3733, 3734, 3734, 3735, 3736, 3736, 3737, 3737, 3738, 3738, 3739, 3739,
556 3740, 3740, 3741, 3742, 3742, 3743, 3743, 3744, 3744, 3745, 3745, 3746, 3746, 3747, 3748, 3748,
557 3749, 3749, 3750, 3750, 3751, 3751, 3752, 3752, 3753, 3753, 3754, 3755, 3755, 3756, 3756, 3757,
558 3757, 3758, 3758, 3759, 3759, 3760, 3761, 3761, 3762, 3762, 3763, 3763, 3764, 3764, 3765, 3765,
559 3766, 3766, 3767, 3768, 3768, 3769, 3769, 3770, 3770, 3771, 3771, 3772, 3772, 3773, 3773, 3774,
560 3775, 3775, 3776, 3776, 3777, 3777, 3778, 3778, 3779, 3779, 3780, 3781, 3781, 3782, 3782, 3783,
561 3783, 3784, 3784, 3785, 3785, 3786, 3786, 3787, 3787, 3788, 3789, 3789, 3790, 3790, 3791, 3791,
562 3792, 3792, 3793, 3793, 3794, 3794, 3795, 3796, 3796, 3797, 3797, 3798, 3798, 3799, 3799, 3800,
563 3800, 3801, 3801, 3802, 3802, 3803, 3804, 3804, 3805, 3805, 3806, 3806, 3807, 3807, 3808, 3808,
564 3809, 3809, 3810, 3811, 3811, 3812, 3812, 3813, 3813, 3814, 3814, 3815, 3815, 3816, 3816, 3817,
565 3817, 3818, 3819, 3819, 3820, 3820, 3821, 3821, 3822, 3822, 3823, 3823, 3824, 3824, 3825, 3825,
566 3826, 3826, 3827, 3828, 3828, 3829, 3829, 3830, 3830, 3831, 3831, 3832, 3832, 3833, 3833, 3834,
567 3834, 3835, 3835, 3836, 3837, 3837, 3838, 3838, 3839, 3839, 3840, 3840, 3841, 3841, 3842, 3842,
568 3843, 3843, 3844, 3844, 3845, 3846, 3846, 3847, 3847, 3848, 3848, 3849, 3849, 3850, 3850, 3851,
569 3851, 3852, 3852, 3853, 3853, 3854, 3855, 3855, 3856, 3856, 3857, 3857, 3858, 3858, 3859, 3859,
570 3860, 3860, 3861, 3861, 3862, 3862, 3863, 3863, 3864, 3864, 3865, 3866, 3866, 3867, 3867, 3868,
571 3868, 3869, 3869, 3870, 3870, 3871, 3871, 3872, 3872, 3873, 3873, 3874, 3874, 3875, 3876, 3876,
572 3877, 3877, 3878, 3878, 3879, 3879, 3880, 3880, 3881, 3881, 3882, 3882, 3883, 3883, 3884, 3884,
573 3885, 3885, 3886, 3886, 3887, 3888, 3888, 3889, 3889, 3890, 3890, 3891, 3891, 3892, 3892, 3893,
574 3893, 3894, 3894, 3895, 3895, 3896, 3896, 3897, 3897, 3898, 3898, 3899, 3900, 3900, 3901, 3901,
575 3902, 3902, 3903, 3903, 3904, 3904, 3905, 3905, 3906, 3906, 3907, 3907, 3908, 3908, 3909, 3909,
576 3910, 3910, 3911, 3911, 3912, 3912, 3913, 3914, 3914, 3915, 3915, 3916, 3916, 3917, 3917, 3918,
577 3918, 3919, 3919, 3920, 3920, 3921, 3921, 3922, 3922, 3923, 3923, 3924, 3924, 3925, 3925, 3926,
578 3926, 3927, 3927, 3928, 3929, 3929, 3930, 3930, 3931, 3931, 3932, 3932, 3933, 3933, 3934, 3934,
579 3935, 3935, 3936, 3936, 3937, 3937, 3938, 3938, 3939, 3939, 3940, 3940, 3941, 3941, 3942, 3942,
580 3943, 3943, 3944, 3944, 3945, 3945, 3946, 3947, 3947, 3948, 3948, 3949, 3949, 3950, 3950, 3951,
581 3951, 3952, 3952, 3953, 3953, 3954, 3954, 3955, 3955, 3956, 3956, 3957, 3957, 3958, 3958, 3959,
582 3959, 3960, 3960, 3961, 3961, 3962, 3962, 3963, 3963, 3964, 3964, 3965, 3965, 3966, 3966, 3967,
583 3967, 3968, 3969, 3969, 3970, 3970, 3971, 3971, 3972, 3972, 3973, 3973, 3974, 3974, 3975, 3975,
584 3976, 3976, 3977, 3977, 3978, 3978, 3979, 3979, 3980, 3980, 3981, 3981, 3982, 3982, 3983, 3983,
585 3984, 3984, 3985, 3985, 3986, 3986, 3987, 3987, 3988, 3988, 3989, 3989, 3990, 3990, 3991, 3991,
586 3992, 3992, 3993, 3993, 3994, 3994, 3995, 3995, 3996, 3996, 3997, 3997, 3998, 3998, 3999, 3999,
587 4000, 4001, 4001, 4002, 4002, 4003, 4003, 4004, 4004, 4005, 4005, 4006, 4006, 4007, 4007, 4008,
588 4008, 4009, 4009, 4010, 4010, 4011, 4011, 4012, 4012, 4013, 4013, 4014, 4014, 4015, 4015, 4016,
589 4016, 4017, 4017, 4018, 4018, 4019, 4019, 4020, 4020, 4021, 4021, 4022, 4022, 4023, 4023, 4024,
590 4024, 4025, 4025, 4026, 4026, 4027, 4027, 4028, 4028, 4029, 4029, 4030, 4030, 4031, 4031, 4032,
591 4032, 4033, 4033, 4034, 4034, 4035, 4035, 4036, 4036, 4037, 4037, 4038, 4038, 4039, 4039, 4040,
592 4040, 4041, 4041, 4042, 4042, 4043, 4043, 4044, 4044, 4045, 4045, 4046, 4046, 4047, 4047, 4048,
593 4048, 4049, 4049, 4050, 4050, 4051, 4051, 4052, 4052, 4053, 4053, 4054, 4054, 4055, 4055, 4056,
594 4056, 4057, 4057, 4058, 4058, 4059, 4059, 4060, 4060, 4061, 4061, 4062, 4062, 4063, 4063, 4064,
595 4064, 4065, 4065, 4066, 4066, 4067, 4067, 4068, 4068, 4069, 4069, 4070, 4070, 4071, 4071, 4072,
596 4072, 4073, 4073, 4074, 4074, 4075, 4075, 4076, 4076, 4077, 4077, 4078, 4078, 4079, 4079, 4080,
597 4080,
598};
599
600/* Generated table */
601const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1] = {
602 [V4L2_COLORSPACE_SMPTE170M][0] = { 2939, 2939, 2939 },
603 [V4L2_COLORSPACE_SMPTE170M][1] = { 2953, 2963, 586 },
604 [V4L2_COLORSPACE_SMPTE170M][2] = { 0, 2967, 2937 },
605 [V4L2_COLORSPACE_SMPTE170M][3] = { 88, 2990, 575 },
606 [V4L2_COLORSPACE_SMPTE170M][4] = { 3016, 259, 2933 },
607 [V4L2_COLORSPACE_SMPTE170M][5] = { 3030, 405, 558 },
608 [V4L2_COLORSPACE_SMPTE170M][6] = { 478, 428, 2931 },
609 [V4L2_COLORSPACE_SMPTE170M][7] = { 547, 547, 547 },
90 [V4L2_COLORSPACE_SMPTE240M][0] = { 2926, 2926, 2926 }, 610 [V4L2_COLORSPACE_SMPTE240M][0] = { 2926, 2926, 2926 },
91 [V4L2_COLORSPACE_SMPTE240M][1] = { 2926, 2926, 857 }, 611 [V4L2_COLORSPACE_SMPTE240M][1] = { 2941, 2950, 546 },
92 [V4L2_COLORSPACE_SMPTE240M][2] = { 1594, 2901, 2901 }, 612 [V4L2_COLORSPACE_SMPTE240M][2] = { 0, 2954, 2924 },
93 [V4L2_COLORSPACE_SMPTE240M][3] = { 1594, 2901, 774 }, 613 [V4L2_COLORSPACE_SMPTE240M][3] = { 78, 2978, 536 },
94 [V4L2_COLORSPACE_SMPTE240M][4] = { 2484, 618, 2858 }, 614 [V4L2_COLORSPACE_SMPTE240M][4] = { 3004, 230, 2920 },
95 [V4L2_COLORSPACE_SMPTE240M][5] = { 2484, 618, 617 }, 615 [V4L2_COLORSPACE_SMPTE240M][5] = { 3018, 363, 518 },
96 [V4L2_COLORSPACE_SMPTE240M][6] = { 507, 507, 2832 }, 616 [V4L2_COLORSPACE_SMPTE240M][6] = { 437, 387, 2918 },
97 [V4L2_COLORSPACE_SMPTE240M][7] = { 507, 507, 507 }, 617 [V4L2_COLORSPACE_SMPTE240M][7] = { 507, 507, 507 },
98 [V4L2_COLORSPACE_REC709][0] = { 2939, 2939, 2939 }, 618 [V4L2_COLORSPACE_REC709][0] = { 2939, 2939, 2939 },
99 [V4L2_COLORSPACE_REC709][1] = { 2939, 2939, 547 }, 619 [V4L2_COLORSPACE_REC709][1] = { 2939, 2939, 547 },
@@ -103,21 +623,21 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
103 [V4L2_COLORSPACE_REC709][5] = { 2939, 547, 547 }, 623 [V4L2_COLORSPACE_REC709][5] = { 2939, 547, 547 },
104 [V4L2_COLORSPACE_REC709][6] = { 547, 547, 2939 }, 624 [V4L2_COLORSPACE_REC709][6] = { 547, 547, 2939 },
105 [V4L2_COLORSPACE_REC709][7] = { 547, 547, 547 }, 625 [V4L2_COLORSPACE_REC709][7] = { 547, 547, 547 },
106 [V4L2_COLORSPACE_470_SYSTEM_M][0] = { 2894, 2988, 2808 }, 626 [V4L2_COLORSPACE_470_SYSTEM_M][0] = { 2892, 2988, 2807 },
107 [V4L2_COLORSPACE_470_SYSTEM_M][1] = { 2847, 3070, 843 }, 627 [V4L2_COLORSPACE_470_SYSTEM_M][1] = { 2846, 3070, 843 },
108 [V4L2_COLORSPACE_470_SYSTEM_M][2] = { 1656, 2962, 2783 }, 628 [V4L2_COLORSPACE_470_SYSTEM_M][2] = { 1656, 2962, 2783 },
109 [V4L2_COLORSPACE_470_SYSTEM_M][3] = { 1572, 3045, 763 }, 629 [V4L2_COLORSPACE_470_SYSTEM_M][3] = { 1572, 3045, 763 },
110 [V4L2_COLORSPACE_470_SYSTEM_M][4] = { 2477, 229, 2743 }, 630 [V4L2_COLORSPACE_470_SYSTEM_M][4] = { 2476, 229, 2742 },
111 [V4L2_COLORSPACE_470_SYSTEM_M][5] = { 2422, 672, 614 }, 631 [V4L2_COLORSPACE_470_SYSTEM_M][5] = { 2420, 672, 614 },
112 [V4L2_COLORSPACE_470_SYSTEM_M][6] = { 725, 63, 2718 }, 632 [V4L2_COLORSPACE_470_SYSTEM_M][6] = { 725, 63, 2718 },
113 [V4L2_COLORSPACE_470_SYSTEM_M][7] = { 534, 561, 509 }, 633 [V4L2_COLORSPACE_470_SYSTEM_M][7] = { 534, 561, 509 },
114 [V4L2_COLORSPACE_470_SYSTEM_BG][0] = { 2939, 2939, 2939 }, 634 [V4L2_COLORSPACE_470_SYSTEM_BG][0] = { 2939, 2939, 2939 },
115 [V4L2_COLORSPACE_470_SYSTEM_BG][1] = { 2939, 2939, 621 }, 635 [V4L2_COLORSPACE_470_SYSTEM_BG][1] = { 2939, 2939, 464 },
116 [V4L2_COLORSPACE_470_SYSTEM_BG][2] = { 786, 2939, 2939 }, 636 [V4L2_COLORSPACE_470_SYSTEM_BG][2] = { 786, 2939, 2939 },
117 [V4L2_COLORSPACE_470_SYSTEM_BG][3] = { 786, 2939, 621 }, 637 [V4L2_COLORSPACE_470_SYSTEM_BG][3] = { 786, 2939, 464 },
118 [V4L2_COLORSPACE_470_SYSTEM_BG][4] = { 2879, 547, 2923 }, 638 [V4L2_COLORSPACE_470_SYSTEM_BG][4] = { 2879, 547, 2956 },
119 [V4L2_COLORSPACE_470_SYSTEM_BG][5] = { 2879, 547, 547 }, 639 [V4L2_COLORSPACE_470_SYSTEM_BG][5] = { 2879, 547, 547 },
120 [V4L2_COLORSPACE_470_SYSTEM_BG][6] = { 547, 547, 2923 }, 640 [V4L2_COLORSPACE_470_SYSTEM_BG][6] = { 547, 547, 2956 },
121 [V4L2_COLORSPACE_470_SYSTEM_BG][7] = { 547, 547, 547 }, 641 [V4L2_COLORSPACE_470_SYSTEM_BG][7] = { 547, 547, 547 },
122 [V4L2_COLORSPACE_SRGB][0] = { 3056, 3056, 3056 }, 642 [V4L2_COLORSPACE_SRGB][0] = { 3056, 3056, 3056 },
123 [V4L2_COLORSPACE_SRGB][1] = { 3056, 3056, 800 }, 643 [V4L2_COLORSPACE_SRGB][1] = { 3056, 3056, 800 },
@@ -127,6 +647,22 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
127 [V4L2_COLORSPACE_SRGB][5] = { 3056, 800, 800 }, 647 [V4L2_COLORSPACE_SRGB][5] = { 3056, 800, 800 },
128 [V4L2_COLORSPACE_SRGB][6] = { 800, 800, 3056 }, 648 [V4L2_COLORSPACE_SRGB][6] = { 800, 800, 3056 },
129 [V4L2_COLORSPACE_SRGB][7] = { 800, 800, 800 }, 649 [V4L2_COLORSPACE_SRGB][7] = { 800, 800, 800 },
650 [V4L2_COLORSPACE_ADOBERGB][0] = { 3033, 3033, 3033 },
651 [V4L2_COLORSPACE_ADOBERGB][1] = { 3033, 3033, 1063 },
652 [V4L2_COLORSPACE_ADOBERGB][2] = { 1828, 3033, 3033 },
653 [V4L2_COLORSPACE_ADOBERGB][3] = { 1828, 3033, 1063 },
654 [V4L2_COLORSPACE_ADOBERGB][4] = { 2633, 851, 2979 },
655 [V4L2_COLORSPACE_ADOBERGB][5] = { 2633, 851, 851 },
656 [V4L2_COLORSPACE_ADOBERGB][6] = { 851, 851, 2979 },
657 [V4L2_COLORSPACE_ADOBERGB][7] = { 851, 851, 851 },
658 [V4L2_COLORSPACE_BT2020][0] = { 2939, 2939, 2939 },
659 [V4L2_COLORSPACE_BT2020][1] = { 2877, 2923, 1058 },
660 [V4L2_COLORSPACE_BT2020][2] = { 1837, 2840, 2916 },
661 [V4L2_COLORSPACE_BT2020][3] = { 1734, 2823, 993 },
662 [V4L2_COLORSPACE_BT2020][4] = { 2427, 961, 2812 },
663 [V4L2_COLORSPACE_BT2020][5] = { 2351, 912, 648 },
664 [V4L2_COLORSPACE_BT2020][6] = { 792, 618, 2788 },
665 [V4L2_COLORSPACE_BT2020][7] = { 547, 547, 547 },
130}; 666};
131 667
132#else 668#else
@@ -138,29 +674,40 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLAC
138#include <stdlib.h> 674#include <stdlib.h>
139 675
140static const double rec709_to_ntsc1953[3][3] = { 676static const double rec709_to_ntsc1953[3][3] = {
141 { 0.6698, 0.2678, 0.0323 }, 677 { 0.6689794, 0.2678309, 0.0323187 },
142 { 0.0185, 1.0742, -0.0603 }, 678 { 0.0184901, 1.0742442, -0.0602820 },
143 { 0.0162, 0.0432, 0.8551 } 679 { 0.0162259, 0.0431716, 0.8549253 }
144}; 680};
145 681
146static const double rec709_to_ebu[3][3] = { 682static const double rec709_to_ebu[3][3] = {
147 { 0.9578, 0.0422, 0 }, 683 { 0.9578221, 0.0421779, -0.0000000 },
148 { 0 , 1 , 0 }, 684 { -0.0000000, 1.0000000, 0.0000000 },
149 { 0 , 0.0118, 0.9882 } 685 { -0.0000000, -0.0119367, 1.0119367 }
150}; 686};
151 687
152static const double rec709_to_170m[3][3] = { 688static const double rec709_to_170m[3][3] = {
153 { 1.0654, -0.0554, -0.0010 }, 689 { 1.0653640, -0.0553900, -0.0099740 },
154 { -0.0196, 1.0364, -0.0167 }, 690 { -0.0196361, 1.0363630, -0.0167269 },
155 { 0.0016, 0.0044, 0.9940 } 691 { 0.0016327, 0.0044133, 0.9939540 },
156}; 692};
157 693
158static const double rec709_to_240m[3][3] = { 694static const double rec709_to_240m[3][3] = {
159 { 0.7151, 0.2849, 0 }, 695 { 1.0653640, -0.0553900, -0.0099740 },
160 { 0.0179, 0.9821, 0 }, 696 { -0.0196361, 1.0363630, -0.0167269 },
161 { 0.0177, 0.0472, 0.9350 } 697 { 0.0016327, 0.0044133, 0.9939540 },
698};
699
700static const double rec709_to_adobergb[3][3] = {
701 { 0.7151627, 0.2848373, -0.0000000 },
702 { 0.0000000, 1.0000000, 0.0000000 },
703 { -0.0000000, 0.0411705, 0.9588295 },
162}; 704};
163 705
706static const double rec709_to_bt2020[3][3] = {
707 { 0.6274524, 0.3292485, 0.0432991 },
708 { 0.0691092, 0.9195311, 0.0113597 },
709 { 0.0163976, 0.0880301, 0.8955723 },
710};
164 711
165static void mult_matrix(double *r, double *g, double *b, const double m[3][3]) 712static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
166{ 713{
@@ -176,7 +723,18 @@ static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
176 723
177static double transfer_srgb_to_rgb(double v) 724static double transfer_srgb_to_rgb(double v)
178{ 725{
179 return (v <= 0.03928) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4); 726 if (v < -0.04045)
727 return pow((-v + 0.055) / 1.055, 2.4);
728 return (v <= 0.04045) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
729}
730
731static double transfer_rgb_to_srgb(double v)
732{
733 if (v <= -0.0031308)
734 return -1.055 * pow(-v, 1.0 / 2.4) + 0.055;
735 if (v <= 0.0031308)
736 return v * 12.92;
737 return 1.055 * pow(v, 1.0 / 2.4) - 0.055;
180} 738}
181 739
182static double transfer_rgb_to_smpte240m(double v) 740static double transfer_rgb_to_smpte240m(double v)
@@ -186,9 +744,21 @@ static double transfer_rgb_to_smpte240m(double v)
186 744
187static double transfer_rgb_to_rec709(double v) 745static double transfer_rgb_to_rec709(double v)
188{ 746{
747 if (v <= -0.018)
748 return -1.099 * pow(-v, 0.45) + 0.099;
189 return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099; 749 return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099;
190} 750}
191 751
752static double transfer_rec709_to_rgb(double v)
753{
754 return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45);
755}
756
757static double transfer_rgb_to_adobergb(double v)
758{
759 return pow(v, 1.0 / 2.19921875);
760}
761
192static double transfer_srgb_to_rec709(double v) 762static double transfer_srgb_to_rec709(double v)
193{ 763{
194 return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v)); 764 return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
@@ -196,6 +766,8 @@ static double transfer_srgb_to_rec709(double v)
196 766
197static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b) 767static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b)
198{ 768{
769 int clamp = 1;
770
199 /* Convert the primaries of Rec. 709 Linear RGB */ 771 /* Convert the primaries of Rec. 709 Linear RGB */
200 switch (colorspace) { 772 switch (colorspace) {
201 case V4L2_COLORSPACE_SMPTE240M: 773 case V4L2_COLORSPACE_SMPTE240M:
@@ -222,15 +794,29 @@ static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b
222 *b = transfer_srgb_to_rgb(*b); 794 *b = transfer_srgb_to_rgb(*b);
223 mult_matrix(r, g, b, rec709_to_ntsc1953); 795 mult_matrix(r, g, b, rec709_to_ntsc1953);
224 break; 796 break;
797 case V4L2_COLORSPACE_ADOBERGB:
798 *r = transfer_srgb_to_rgb(*r);
799 *g = transfer_srgb_to_rgb(*g);
800 *b = transfer_srgb_to_rgb(*b);
801 mult_matrix(r, g, b, rec709_to_adobergb);
802 break;
803 case V4L2_COLORSPACE_BT2020:
804 *r = transfer_srgb_to_rgb(*r);
805 *g = transfer_srgb_to_rgb(*g);
806 *b = transfer_srgb_to_rgb(*b);
807 mult_matrix(r, g, b, rec709_to_bt2020);
808 break;
225 case V4L2_COLORSPACE_SRGB: 809 case V4L2_COLORSPACE_SRGB:
226 case V4L2_COLORSPACE_REC709: 810 case V4L2_COLORSPACE_REC709:
227 default: 811 default:
228 break; 812 break;
229 } 813 }
230 814
231 *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r)); 815 if (clamp) {
232 *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g)); 816 *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
233 *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b)); 817 *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
818 *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
819 }
234 820
235 /* Encode to gamma corrected colorspace */ 821 /* Encode to gamma corrected colorspace */
236 switch (colorspace) { 822 switch (colorspace) {
@@ -242,12 +828,18 @@ static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b
242 case V4L2_COLORSPACE_SMPTE170M: 828 case V4L2_COLORSPACE_SMPTE170M:
243 case V4L2_COLORSPACE_470_SYSTEM_M: 829 case V4L2_COLORSPACE_470_SYSTEM_M:
244 case V4L2_COLORSPACE_470_SYSTEM_BG: 830 case V4L2_COLORSPACE_470_SYSTEM_BG:
831 case V4L2_COLORSPACE_BT2020:
245 *r = transfer_rgb_to_rec709(*r); 832 *r = transfer_rgb_to_rec709(*r);
246 *g = transfer_rgb_to_rec709(*g); 833 *g = transfer_rgb_to_rec709(*g);
247 *b = transfer_rgb_to_rec709(*b); 834 *b = transfer_rgb_to_rec709(*b);
248 break; 835 break;
249 case V4L2_COLORSPACE_SRGB: 836 case V4L2_COLORSPACE_SRGB:
250 break; 837 break;
838 case V4L2_COLORSPACE_ADOBERGB:
839 *r = transfer_rgb_to_adobergb(*r);
840 *g = transfer_rgb_to_adobergb(*g);
841 *b = transfer_rgb_to_adobergb(*b);
842 break;
251 case V4L2_COLORSPACE_REC709: 843 case V4L2_COLORSPACE_REC709:
252 default: 844 default:
253 *r = transfer_srgb_to_rec709(*r); 845 *r = transfer_srgb_to_rec709(*r);
@@ -269,6 +861,8 @@ int main(int argc, char **argv)
269 V4L2_COLORSPACE_470_SYSTEM_BG, 861 V4L2_COLORSPACE_470_SYSTEM_BG,
270 0, 862 0,
271 V4L2_COLORSPACE_SRGB, 863 V4L2_COLORSPACE_SRGB,
864 V4L2_COLORSPACE_ADOBERGB,
865 V4L2_COLORSPACE_BT2020,
272 }; 866 };
273 static const char * const colorspace_names[] = { 867 static const char * const colorspace_names[] = {
274 "", 868 "",
@@ -280,13 +874,39 @@ int main(int argc, char **argv)
280 "V4L2_COLORSPACE_470_SYSTEM_BG", 874 "V4L2_COLORSPACE_470_SYSTEM_BG",
281 "", 875 "",
282 "V4L2_COLORSPACE_SRGB", 876 "V4L2_COLORSPACE_SRGB",
877 "V4L2_COLORSPACE_ADOBERGB",
878 "V4L2_COLORSPACE_BT2020",
283 }; 879 };
284 int i; 880 int i;
285 int c; 881 int c;
286 882
287 printf("/* Generated table */\n"); 883 printf("/* Generated table */\n");
288 printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = {\n"); 884 printf("const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {");
289 for (c = 0; c <= V4L2_COLORSPACE_SRGB; c++) { 885 for (i = 0; i <= 255 * 16; i++) {
886 if (i % 16 == 0)
887 printf("\n\t");
888 printf("%4d,%s",
889 (int)(0.5 + 16.0 * 255.0 *
890 transfer_rec709_to_rgb(i / (16.0 * 255.0))),
891 i % 16 == 15 || i == 255 * 16 ? "" : " ");
892 }
893 printf("\n};\n\n");
894
895 printf("/* Generated table */\n");
896 printf("const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {");
897 for (i = 0; i <= 255 * 16; i++) {
898 if (i % 16 == 0)
899 printf("\n\t");
900 printf("%4d,%s",
901 (int)(0.5 + 16.0 * 255.0 *
902 transfer_rgb_to_rec709(i / (16.0 * 255.0))),
903 i % 16 == 15 || i == 255 * 16 ? "" : " ");
904 }
905 printf("\n};\n\n");
906
907 printf("/* Generated table */\n");
908 printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
909 for (c = 0; c <= V4L2_COLORSPACE_BT2020; c++) {
290 for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) { 910 for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
291 double r, g, b; 911 double r, g, b;
292 912
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
index a2678fbec256..2c333356451c 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.h
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.h
@@ -59,6 +59,8 @@ enum tpg_color {
59}; 59};
60 60
61extern const struct color tpg_colors[TPG_COLOR_MAX]; 61extern const struct color tpg_colors[TPG_COLOR_MAX];
62extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1]; 62extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
63extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
64extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_BT2020 + 1][TPG_COLOR_CSC_BLACK + 1];
63 65
64#endif 66#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index cbcd6250e7b2..fc9c6536ba02 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -296,127 +296,193 @@ static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg)
296 } 296 }
297} 297}
298 298
299static u16 color_to_y(struct tpg_data *tpg, int r, int g, int b) 299static inline int rec709_to_linear(int v)
300{ 300{
301 switch (tpg->colorspace) { 301 v = clamp(v, 0, 0xff0);
302 case V4L2_COLORSPACE_SMPTE170M: 302 return tpg_rec709_to_linear[v];
303 case V4L2_COLORSPACE_470_SYSTEM_M:
304 case V4L2_COLORSPACE_470_SYSTEM_BG:
305 return ((16829 * r + 33039 * g + 6416 * b + 16 * 32768) >> 16) + (16 << 4);
306 case V4L2_COLORSPACE_SMPTE240M:
307 return ((11932 * r + 39455 * g + 4897 * b + 16 * 32768) >> 16) + (16 << 4);
308 case V4L2_COLORSPACE_REC709:
309 case V4L2_COLORSPACE_SRGB:
310 default:
311 return ((11966 * r + 40254 * g + 4064 * b + 16 * 32768) >> 16) + (16 << 4);
312 }
313} 303}
314 304
315static u16 color_to_cb(struct tpg_data *tpg, int r, int g, int b) 305static inline int linear_to_rec709(int v)
316{ 306{
317 switch (tpg->colorspace) { 307 v = clamp(v, 0, 0xff0);
318 case V4L2_COLORSPACE_SMPTE170M: 308 return tpg_linear_to_rec709[v];
319 case V4L2_COLORSPACE_470_SYSTEM_M:
320 case V4L2_COLORSPACE_470_SYSTEM_BG:
321 return ((-9714 * r - 19070 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
322 case V4L2_COLORSPACE_SMPTE240M:
323 return ((-6684 * r - 22100 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
324 case V4L2_COLORSPACE_REC709:
325 case V4L2_COLORSPACE_SRGB:
326 default:
327 return ((-6596 * r - 22189 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
328 }
329} 309}
330 310
331static u16 color_to_cr(struct tpg_data *tpg, int r, int g, int b) 311static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
312 int y_offset, int *y, int *cb, int *cr)
332{ 313{
333 switch (tpg->colorspace) { 314 *y = ((m[0][0] * r + m[0][1] * g + m[0][2] * b) >> 16) + (y_offset << 4);
334 case V4L2_COLORSPACE_SMPTE170M: 315 *cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4);
335 case V4L2_COLORSPACE_470_SYSTEM_M: 316 *cr = ((m[2][0] * r + m[2][1] * g + m[2][2] * b) >> 16) + (128 << 4);
336 case V4L2_COLORSPACE_470_SYSTEM_BG:
337 return ((28784 * r - 24103 * g - 4681 * b + 16 * 32768) >> 16) + (128 << 4);
338 case V4L2_COLORSPACE_SMPTE240M:
339 return ((28784 * r - 25606 * g - 3178 * b + 16 * 32768) >> 16) + (128 << 4);
340 case V4L2_COLORSPACE_REC709:
341 case V4L2_COLORSPACE_SRGB:
342 default:
343 return ((28784 * r - 26145 * g - 2639 * b + 16 * 32768) >> 16) + (128 << 4);
344 }
345} 317}
346 318
347static u16 ycbcr_to_r(struct tpg_data *tpg, int y, int cb, int cr) 319static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
320 int *y, int *cb, int *cr)
348{ 321{
349 int r; 322#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
350 323
351 y -= 16 << 4; 324 static const int bt601[3][3] = {
352 cb -= 128 << 4; 325 { COEFF(0.299, 219), COEFF(0.587, 219), COEFF(0.114, 219) },
353 cr -= 128 << 4; 326 { COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224) },
354 switch (tpg->colorspace) { 327 { COEFF(0.5, 224), COEFF(-0.419, 224), COEFF(-0.081, 224) },
355 case V4L2_COLORSPACE_SMPTE170M: 328 };
356 case V4L2_COLORSPACE_470_SYSTEM_M: 329 static const int bt601_full[3][3] = {
357 case V4L2_COLORSPACE_470_SYSTEM_BG: 330 { COEFF(0.299, 255), COEFF(0.587, 255), COEFF(0.114, 255) },
358 r = 4769 * y + 6537 * cr; 331 { COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255) },
332 { COEFF(0.5, 255), COEFF(-0.419, 255), COEFF(-0.081, 255) },
333 };
334 static const int rec709[3][3] = {
335 { COEFF(0.2126, 219), COEFF(0.7152, 219), COEFF(0.0722, 219) },
336 { COEFF(-0.1146, 224), COEFF(-0.3854, 224), COEFF(0.5, 224) },
337 { COEFF(0.5, 224), COEFF(-0.4542, 224), COEFF(-0.0458, 224) },
338 };
339 static const int rec709_full[3][3] = {
340 { COEFF(0.2126, 255), COEFF(0.7152, 255), COEFF(0.0722, 255) },
341 { COEFF(-0.1146, 255), COEFF(-0.3854, 255), COEFF(0.5, 255) },
342 { COEFF(0.5, 255), COEFF(-0.4542, 255), COEFF(-0.0458, 255) },
343 };
344 static const int smpte240m[3][3] = {
345 { COEFF(0.212, 219), COEFF(0.701, 219), COEFF(0.087, 219) },
346 { COEFF(-0.116, 224), COEFF(-0.384, 224), COEFF(0.5, 224) },
347 { COEFF(0.5, 224), COEFF(-0.445, 224), COEFF(-0.055, 224) },
348 };
349 static const int bt2020[3][3] = {
350 { COEFF(0.2726, 219), COEFF(0.6780, 219), COEFF(0.0593, 219) },
351 { COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224) },
352 { COEFF(0.5, 224), COEFF(-0.4629, 224), COEFF(-0.0405, 224) },
353 };
354 bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
355 int lin_y, yc;
356
357 switch (tpg->real_ycbcr_enc) {
358 case V4L2_YCBCR_ENC_601:
359 case V4L2_YCBCR_ENC_XV601:
360 case V4L2_YCBCR_ENC_SYCC:
361 rgb2ycbcr(full ? bt601_full : bt601, r, g, b, 16, y, cb, cr);
362 break;
363 case V4L2_YCBCR_ENC_BT2020:
364 rgb2ycbcr(bt2020, r, g, b, 16, y, cb, cr);
359 break; 365 break;
360 case V4L2_COLORSPACE_SMPTE240M: 366 case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
361 r = 4769 * y + 7376 * cr; 367 lin_y = (COEFF(0.2627, 255) * rec709_to_linear(r) +
368 COEFF(0.6780, 255) * rec709_to_linear(g) +
369 COEFF(0.0593, 255) * rec709_to_linear(b)) >> 16;
370 yc = linear_to_rec709(lin_y);
371 *y = (yc * 219) / 255 + (16 << 4);
372 if (b <= yc)
373 *cb = (((b - yc) * COEFF(1.0 / 1.9404, 224)) >> 16) + (128 << 4);
374 else
375 *cb = (((b - yc) * COEFF(1.0 / 1.5816, 224)) >> 16) + (128 << 4);
376 if (r <= yc)
377 *cr = (((r - yc) * COEFF(1.0 / 1.7184, 224)) >> 16) + (128 << 4);
378 else
379 *cr = (((r - yc) * COEFF(1.0 / 0.9936, 224)) >> 16) + (128 << 4);
362 break; 380 break;
363 case V4L2_COLORSPACE_REC709: 381 case V4L2_YCBCR_ENC_SMPTE240M:
364 case V4L2_COLORSPACE_SRGB: 382 rgb2ycbcr(smpte240m, r, g, b, 16, y, cb, cr);
383 break;
384 case V4L2_YCBCR_ENC_709:
385 case V4L2_YCBCR_ENC_XV709:
365 default: 386 default:
366 r = 4769 * y + 7343 * cr; 387 rgb2ycbcr(full ? rec709_full : rec709, r, g, b, 0, y, cb, cr);
367 break; 388 break;
368 } 389 }
369 return clamp(r >> 12, 0, 0xff0);
370} 390}
371 391
372static u16 ycbcr_to_g(struct tpg_data *tpg, int y, int cb, int cr) 392static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr,
393 int y_offset, int *r, int *g, int *b)
373{ 394{
374 int g; 395 y -= y_offset << 4;
375
376 y -= 16 << 4;
377 cb -= 128 << 4; 396 cb -= 128 << 4;
378 cr -= 128 << 4; 397 cr -= 128 << 4;
379 switch (tpg->colorspace) { 398 *r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr;
380 case V4L2_COLORSPACE_SMPTE170M: 399 *g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr;
381 case V4L2_COLORSPACE_470_SYSTEM_M: 400 *b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr;
382 case V4L2_COLORSPACE_470_SYSTEM_BG: 401 *r = clamp(*r >> 12, 0, 0xff0);
383 g = 4769 * y - 1605 * cb - 3330 * cr; 402 *g = clamp(*g >> 12, 0, 0xff0);
384 break; 403 *b = clamp(*b >> 12, 0, 0xff0);
385 case V4L2_COLORSPACE_SMPTE240M:
386 g = 4769 * y - 1055 * cb - 2341 * cr;
387 break;
388 case V4L2_COLORSPACE_REC709:
389 case V4L2_COLORSPACE_SRGB:
390 default:
391 g = 4769 * y - 873 * cb - 2183 * cr;
392 break;
393 }
394 return clamp(g >> 12, 0, 0xff0);
395} 404}
396 405
397static u16 ycbcr_to_b(struct tpg_data *tpg, int y, int cb, int cr) 406static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
407 int *r, int *g, int *b)
398{ 408{
399 int b; 409#undef COEFF
410#define COEFF(v, r) ((int)(0.5 + (v) * ((255.0 * 255.0 * 16.0) / (r))))
411 static const int bt601[3][3] = {
412 { COEFF(1, 219), COEFF(0, 224), COEFF(1.4020, 224) },
413 { COEFF(1, 219), COEFF(-0.3441, 224), COEFF(-0.7141, 224) },
414 { COEFF(1, 219), COEFF(1.7720, 224), COEFF(0, 224) },
415 };
416 static const int bt601_full[3][3] = {
417 { COEFF(1, 255), COEFF(0, 255), COEFF(1.4020, 255) },
418 { COEFF(1, 255), COEFF(-0.3441, 255), COEFF(-0.7141, 255) },
419 { COEFF(1, 255), COEFF(1.7720, 255), COEFF(0, 255) },
420 };
421 static const int rec709[3][3] = {
422 { COEFF(1, 219), COEFF(0, 224), COEFF(1.5748, 224) },
423 { COEFF(1, 219), COEFF(-0.1873, 224), COEFF(-0.4681, 224) },
424 { COEFF(1, 219), COEFF(1.8556, 224), COEFF(0, 224) },
425 };
426 static const int rec709_full[3][3] = {
427 { COEFF(1, 255), COEFF(0, 255), COEFF(1.5748, 255) },
428 { COEFF(1, 255), COEFF(-0.1873, 255), COEFF(-0.4681, 255) },
429 { COEFF(1, 255), COEFF(1.8556, 255), COEFF(0, 255) },
430 };
431 static const int smpte240m[3][3] = {
432 { COEFF(1, 219), COEFF(0, 224), COEFF(1.5756, 224) },
433 { COEFF(1, 219), COEFF(-0.2253, 224), COEFF(-0.4767, 224) },
434 { COEFF(1, 219), COEFF(1.8270, 224), COEFF(0, 224) },
435 };
436 static const int bt2020[3][3] = {
437 { COEFF(1, 219), COEFF(0, 224), COEFF(1.4746, 224) },
438 { COEFF(1, 219), COEFF(-0.1646, 224), COEFF(-0.5714, 224) },
439 { COEFF(1, 219), COEFF(1.8814, 224), COEFF(0, 224) },
440 };
441 bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
442 int lin_r, lin_g, lin_b, lin_y;
443
444 switch (tpg->real_ycbcr_enc) {
445 case V4L2_YCBCR_ENC_601:
446 case V4L2_YCBCR_ENC_XV601:
447 case V4L2_YCBCR_ENC_SYCC:
448 ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, 16, r, g, b);
449 break;
450 case V4L2_YCBCR_ENC_BT2020:
451 ycbcr2rgb(bt2020, y, cb, cr, 16, r, g, b);
452 break;
453 case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
454 y -= 16 << 4;
455 cb -= 128 << 4;
456 cr -= 128 << 4;
400 457
401 y -= 16 << 4; 458 if (cb <= 0)
402 cb -= 128 << 4; 459 *b = COEFF(1.0, 219) * y + COEFF(1.9404, 224) * cb;
403 cr -= 128 << 4; 460 else
404 switch (tpg->colorspace) { 461 *b = COEFF(1.0, 219) * y + COEFF(1.5816, 224) * cb;
405 case V4L2_COLORSPACE_SMPTE170M: 462 *b = *b >> 12;
406 case V4L2_COLORSPACE_470_SYSTEM_M: 463 if (cr <= 0)
407 case V4L2_COLORSPACE_470_SYSTEM_BG: 464 *r = COEFF(1.0, 219) * y + COEFF(1.7184, 224) * cr;
408 b = 4769 * y + 7343 * cb; 465 else
466 *r = COEFF(1.0, 219) * y + COEFF(0.9936, 224) * cr;
467 *r = *r >> 12;
468 lin_r = rec709_to_linear(*r);
469 lin_b = rec709_to_linear(*b);
470 lin_y = rec709_to_linear((y * 255) / 219);
471
472 lin_g = COEFF(1.0 / 0.6780, 255) * lin_y -
473 COEFF(0.2627 / 0.6780, 255) * lin_r -
474 COEFF(0.0593 / 0.6780, 255) * lin_b;
475 *g = linear_to_rec709(lin_g >> 12);
409 break; 476 break;
410 case V4L2_COLORSPACE_SMPTE240M: 477 case V4L2_YCBCR_ENC_SMPTE240M:
411 b = 4769 * y + 8552 * cb; 478 ycbcr2rgb(smpte240m, y, cb, cr, 16, r, g, b);
412 break; 479 break;
413 case V4L2_COLORSPACE_REC709: 480 case V4L2_YCBCR_ENC_709:
414 case V4L2_COLORSPACE_SRGB: 481 case V4L2_YCBCR_ENC_XV709:
415 default: 482 default:
416 b = 4769 * y + 8652 * cb; 483 ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, 16, r, g, b);
417 break; 484 break;
418 } 485 }
419 return clamp(b >> 12, 0, 0xff0);
420} 486}
421 487
422/* precalculate color bar values to speed up rendering */ 488/* precalculate color bar values to speed up rendering */
@@ -456,18 +522,17 @@ static void precalculate_color(struct tpg_data *tpg, int k)
456 g <<= 4; 522 g <<= 4;
457 b <<= 4; 523 b <<= 4;
458 } 524 }
459 if (tpg->qual == TPG_QUAL_GRAY) 525 if (tpg->qual == TPG_QUAL_GRAY) {
460 r = g = b = color_to_y(tpg, r, g, b); 526 /* Rec. 709 Luma function */
527 /* (0.2126, 0.7152, 0.0722) * (255 * 256) */
528 r = g = b = ((13879 * r + 46688 * g + 4713 * b) >> 16) + (16 << 4);
529 }
461 530
462 /* 531 /*
463 * The assumption is that the RGB output is always full range, 532 * The assumption is that the RGB output is always full range,
464 * so only if the rgb_range overrides the 'real' rgb range do 533 * so only if the rgb_range overrides the 'real' rgb range do
465 * we need to convert the RGB values. 534 * we need to convert the RGB values.
466 * 535 *
467 * Currently there is no way of signalling to userspace if you
468 * are actually giving it limited range RGB (or full range
469 * YUV for that matter).
470 *
471 * Remember that r, g and b are still in the 0 - 0xff0 range. 536 * Remember that r, g and b are still in the 0 - 0xff0 range.
472 */ 537 */
473 if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED && 538 if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
@@ -497,12 +562,12 @@ static void precalculate_color(struct tpg_data *tpg, int k)
497 if (tpg->brightness != 128 || tpg->contrast != 128 || 562 if (tpg->brightness != 128 || tpg->contrast != 128 ||
498 tpg->saturation != 128 || tpg->hue) { 563 tpg->saturation != 128 || tpg->hue) {
499 /* Implement these operations */ 564 /* Implement these operations */
565 int y, cb, cr;
566 int tmp_cb, tmp_cr;
500 567
501 /* First convert to YCbCr */ 568 /* First convert to YCbCr */
502 int y = color_to_y(tpg, r, g, b); /* Luma */ 569
503 int cb = color_to_cb(tpg, r, g, b); /* Cb */ 570 color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
504 int cr = color_to_cr(tpg, r, g, b); /* Cr */
505 int tmp_cb, tmp_cr;
506 571
507 y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128; 572 y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
508 y += (tpg->brightness << 4) - (128 << 4); 573 y += (tpg->brightness << 4) - (128 << 4);
@@ -520,21 +585,29 @@ static void precalculate_color(struct tpg_data *tpg, int k)
520 tpg->colors[k][2] = clamp(cr >> 4, 1, 254); 585 tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
521 return; 586 return;
522 } 587 }
523 r = ycbcr_to_r(tpg, y, cb, cr); 588 ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
524 g = ycbcr_to_g(tpg, y, cb, cr);
525 b = ycbcr_to_b(tpg, y, cb, cr);
526 } 589 }
527 590
528 if (tpg->is_yuv) { 591 if (tpg->is_yuv) {
529 /* Convert to YCbCr */ 592 /* Convert to YCbCr */
530 u16 y = color_to_y(tpg, r, g, b); /* Luma */ 593 int y, cb, cr;
531 u16 cb = color_to_cb(tpg, r, g, b); /* Cb */ 594
532 u16 cr = color_to_cr(tpg, r, g, b); /* Cr */ 595 color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
533 596
597 if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
598 y = clamp(y, 16 << 4, 235 << 4);
599 cb = clamp(cb, 16 << 4, 240 << 4);
600 cr = clamp(cr, 16 << 4, 240 << 4);
601 }
534 tpg->colors[k][0] = clamp(y >> 4, 1, 254); 602 tpg->colors[k][0] = clamp(y >> 4, 1, 254);
535 tpg->colors[k][1] = clamp(cb >> 4, 1, 254); 603 tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
536 tpg->colors[k][2] = clamp(cr >> 4, 1, 254); 604 tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
537 } else { 605 } else {
606 if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
607 r = (r * 219) / 255 + (16 << 4);
608 g = (g * 219) / 255 + (16 << 4);
609 b = (b * 219) / 255 + (16 << 4);
610 }
538 switch (tpg->fourcc) { 611 switch (tpg->fourcc) {
539 case V4L2_PIX_FMT_RGB565: 612 case V4L2_PIX_FMT_RGB565:
540 case V4L2_PIX_FMT_RGB565X: 613 case V4L2_PIX_FMT_RGB565X:
@@ -1152,6 +1225,46 @@ static void tpg_recalc(struct tpg_data *tpg)
1152 if (tpg->recalc_colors) { 1225 if (tpg->recalc_colors) {
1153 tpg->recalc_colors = false; 1226 tpg->recalc_colors = false;
1154 tpg->recalc_lines = true; 1227 tpg->recalc_lines = true;
1228 tpg->real_ycbcr_enc = tpg->ycbcr_enc;
1229 tpg->real_quantization = tpg->quantization;
1230 if (tpg->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
1231 switch (tpg->colorspace) {
1232 case V4L2_COLORSPACE_REC709:
1233 tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_709;
1234 break;
1235 case V4L2_COLORSPACE_SRGB:
1236 tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_SYCC;
1237 break;
1238 case V4L2_COLORSPACE_BT2020:
1239 tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_BT2020;
1240 break;
1241 case V4L2_COLORSPACE_SMPTE240M:
1242 tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_SMPTE240M;
1243 break;
1244 case V4L2_COLORSPACE_SMPTE170M:
1245 case V4L2_COLORSPACE_470_SYSTEM_M:
1246 case V4L2_COLORSPACE_470_SYSTEM_BG:
1247 case V4L2_COLORSPACE_ADOBERGB:
1248 default:
1249 tpg->real_ycbcr_enc = V4L2_YCBCR_ENC_601;
1250 break;
1251 }
1252 }
1253 if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT) {
1254 tpg->real_quantization = V4L2_QUANTIZATION_FULL_RANGE;
1255 if (tpg->is_yuv) {
1256 switch (tpg->real_ycbcr_enc) {
1257 case V4L2_YCBCR_ENC_SYCC:
1258 case V4L2_YCBCR_ENC_XV601:
1259 case V4L2_YCBCR_ENC_XV709:
1260 break;
1261 default:
1262 tpg->real_quantization =
1263 V4L2_QUANTIZATION_LIM_RANGE;
1264 break;
1265 }
1266 }
1267 }
1155 tpg_precalculate_colors(tpg); 1268 tpg_precalculate_colors(tpg);
1156 } 1269 }
1157 if (tpg->recalc_square_border) { 1270 if (tpg->recalc_square_border) {
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index 8ef3e52ba3be..9dc463a40ed3 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -119,6 +119,18 @@ struct tpg_data {
119 u32 fourcc; 119 u32 fourcc;
120 bool is_yuv; 120 bool is_yuv;
121 u32 colorspace; 121 u32 colorspace;
122 u32 ycbcr_enc;
123 /*
124 * Stores the actual Y'CbCr encoding, i.e. will never be
125 * V4L2_YCBCR_ENC_DEFAULT.
126 */
127 u32 real_ycbcr_enc;
128 u32 quantization;
129 /*
130 * Stores the actual quantization, i.e. will never be
131 * V4L2_QUANTIZATION_DEFAULT.
132 */
133 u32 real_quantization;
122 enum tpg_video_aspect vid_aspect; 134 enum tpg_video_aspect vid_aspect;
123 enum tpg_pixel_aspect pix_aspect; 135 enum tpg_pixel_aspect pix_aspect;
124 unsigned rgb_range; 136 unsigned rgb_range;
@@ -286,6 +298,32 @@ static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
286 return tpg->colorspace; 298 return tpg->colorspace;
287} 299}
288 300
301static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
302{
303 if (tpg->ycbcr_enc == ycbcr_enc)
304 return;
305 tpg->ycbcr_enc = ycbcr_enc;
306 tpg->recalc_colors = true;
307}
308
309static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
310{
311 return tpg->ycbcr_enc;
312}
313
314static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
315{
316 if (tpg->quantization == quantization)
317 return;
318 tpg->quantization = quantization;
319 tpg->recalc_colors = true;
320}
321
322static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
323{
324 return tpg->quantization;
325}
326
289static inline unsigned tpg_g_planes(const struct tpg_data *tpg) 327static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
290{ 328{
291 return tpg->planes; 329 return tpg->planes;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 2166d0bf6fe2..ef81b01b53d2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -236,8 +236,8 @@ const struct vb2_ops vivid_vbi_cap_qops = {
236 .buf_queue = vbi_cap_buf_queue, 236 .buf_queue = vbi_cap_buf_queue,
237 .start_streaming = vbi_cap_start_streaming, 237 .start_streaming = vbi_cap_start_streaming,
238 .stop_streaming = vbi_cap_stop_streaming, 238 .stop_streaming = vbi_cap_stop_streaming,
239 .wait_prepare = vivid_unlock, 239 .wait_prepare = vb2_ops_wait_prepare,
240 .wait_finish = vivid_lock, 240 .wait_finish = vb2_ops_wait_finish,
241}; 241};
242 242
243int vidioc_g_fmt_vbi_cap(struct file *file, void *priv, 243int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 9d00a07ecdcd..4e4c70e1e04a 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -131,8 +131,8 @@ const struct vb2_ops vivid_vbi_out_qops = {
131 .buf_queue = vbi_out_buf_queue, 131 .buf_queue = vbi_out_buf_queue,
132 .start_streaming = vbi_out_start_streaming, 132 .start_streaming = vbi_out_start_streaming,
133 .stop_streaming = vbi_out_stop_streaming, 133 .stop_streaming = vbi_out_stop_streaming,
134 .wait_prepare = vivid_unlock, 134 .wait_prepare = vb2_ops_wait_prepare,
135 .wait_finish = vivid_lock, 135 .wait_finish = vb2_ops_wait_finish,
136}; 136};
137 137
138int vidioc_g_fmt_vbi_out(struct file *file, void *priv, 138int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 331c54429b40..867a29a6d18f 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -288,8 +288,8 @@ const struct vb2_ops vivid_vid_cap_qops = {
288 .buf_queue = vid_cap_buf_queue, 288 .buf_queue = vid_cap_buf_queue,
289 .start_streaming = vid_cap_start_streaming, 289 .start_streaming = vid_cap_start_streaming,
290 .stop_streaming = vid_cap_stop_streaming, 290 .stop_streaming = vid_cap_stop_streaming,
291 .wait_prepare = vivid_unlock, 291 .wait_prepare = vb2_ops_wait_prepare,
292 .wait_finish = vivid_lock, 292 .wait_finish = vb2_ops_wait_finish,
293}; 293};
294 294
295/* 295/*
@@ -443,12 +443,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
443 break; 443 break;
444 if (bt->standards & V4L2_DV_BT_STD_CEA861) { 444 if (bt->standards & V4L2_DV_BT_STD_CEA861) {
445 if (bt->width == 720 && bt->height <= 576) 445 if (bt->width == 720 && bt->height <= 576)
446 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M); 446 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
447 else 447 else
448 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709); 448 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
449 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 449 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
450 } else { 450 } else {
451 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB); 451 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
452 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 452 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
453 } 453 }
454 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 454 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
@@ -498,6 +498,20 @@ static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
498 return dev->colorspace_out; 498 return dev->colorspace_out;
499} 499}
500 500
501static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
502{
503 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
504 return tpg_g_ycbcr_enc(&dev->tpg);
505 return dev->ycbcr_enc_out;
506}
507
508static unsigned vivid_quantization_cap(struct vivid_dev *dev)
509{
510 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
511 return tpg_g_quantization(&dev->tpg);
512 return dev->quantization_out;
513}
514
501int vivid_g_fmt_vid_cap(struct file *file, void *priv, 515int vivid_g_fmt_vid_cap(struct file *file, void *priv,
502 struct v4l2_format *f) 516 struct v4l2_format *f)
503{ 517{
@@ -510,6 +524,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
510 mp->field = dev->field_cap; 524 mp->field = dev->field_cap;
511 mp->pixelformat = dev->fmt_cap->fourcc; 525 mp->pixelformat = dev->fmt_cap->fourcc;
512 mp->colorspace = vivid_colorspace_cap(dev); 526 mp->colorspace = vivid_colorspace_cap(dev);
527 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
528 mp->quantization = vivid_quantization_cap(dev);
513 mp->num_planes = dev->fmt_cap->planes; 529 mp->num_planes = dev->fmt_cap->planes;
514 for (p = 0; p < mp->num_planes; p++) { 530 for (p = 0; p < mp->num_planes; p++) {
515 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 531 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
@@ -595,6 +611,8 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
595 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 611 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
596 } 612 }
597 mp->colorspace = vivid_colorspace_cap(dev); 613 mp->colorspace = vivid_colorspace_cap(dev);
614 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
615 mp->quantization = vivid_quantization_cap(dev);
598 memset(mp->reserved, 0, sizeof(mp->reserved)); 616 memset(mp->reserved, 0, sizeof(mp->reserved));
599 return 0; 617 return 0;
600} 618}
@@ -1307,20 +1325,20 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
1307 if (dev->colorspace) { 1325 if (dev->colorspace) {
1308 switch (dev->input_type[i]) { 1326 switch (dev->input_type[i]) {
1309 case WEBCAM: 1327 case WEBCAM:
1310 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB); 1328 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1311 break; 1329 break;
1312 case TV: 1330 case TV:
1313 case SVID: 1331 case SVID:
1314 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M); 1332 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1315 break; 1333 break;
1316 case HDMI: 1334 case HDMI:
1317 if (bt->standards & V4L2_DV_BT_STD_CEA861) { 1335 if (bt->standards & V4L2_DV_BT_STD_CEA861) {
1318 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1336 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1319 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M); 1337 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1320 else 1338 else
1321 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709); 1339 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
1322 } else { 1340 } else {
1323 v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB); 1341 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1324 } 1342 }
1325 break; 1343 break;
1326 } 1344 }
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 16cd6d2d2ed6..6bef1e6d6788 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -259,6 +259,8 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
259 mp->pixelformat = pix->pixelformat; 259 mp->pixelformat = pix->pixelformat;
260 mp->field = pix->field; 260 mp->field = pix->field;
261 mp->colorspace = pix->colorspace; 261 mp->colorspace = pix->colorspace;
262 mp->ycbcr_enc = pix->ycbcr_enc;
263 mp->quantization = pix->quantization;
262 mp->num_planes = 1; 264 mp->num_planes = 1;
263 mp->flags = pix->flags; 265 mp->flags = pix->flags;
264 ppix->sizeimage = pix->sizeimage; 266 ppix->sizeimage = pix->sizeimage;
@@ -285,6 +287,8 @@ int fmt_sp2mp_func(struct file *file, void *priv,
285 pix->pixelformat = mp->pixelformat; 287 pix->pixelformat = mp->pixelformat;
286 pix->field = mp->field; 288 pix->field = mp->field;
287 pix->colorspace = mp->colorspace; 289 pix->colorspace = mp->colorspace;
290 pix->ycbcr_enc = mp->ycbcr_enc;
291 pix->quantization = mp->quantization;
288 pix->sizeimage = ppix->sizeimage; 292 pix->sizeimage = ppix->sizeimage;
289 pix->bytesperline = ppix->bytesperline; 293 pix->bytesperline = ppix->bytesperline;
290 pix->flags = mp->flags; 294 pix->flags = mp->flags;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 69c2dbd2d165..ee5c3992b276 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -209,8 +209,8 @@ const struct vb2_ops vivid_vid_out_qops = {
209 .buf_queue = vid_out_buf_queue, 209 .buf_queue = vid_out_buf_queue,
210 .start_streaming = vid_out_start_streaming, 210 .start_streaming = vid_out_start_streaming,
211 .stop_streaming = vid_out_stop_streaming, 211 .stop_streaming = vid_out_stop_streaming,
212 .wait_prepare = vivid_unlock, 212 .wait_prepare = vb2_ops_wait_prepare,
213 .wait_finish = vivid_lock, 213 .wait_finish = vb2_ops_wait_finish,
214}; 214};
215 215
216/* 216/*
@@ -259,6 +259,8 @@ void vivid_update_format_out(struct vivid_dev *dev)
259 } 259 }
260 break; 260 break;
261 } 261 }
262 dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
263 dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
262 dev->compose_out = dev->sink_rect; 264 dev->compose_out = dev->sink_rect;
263 dev->compose_bounds_out = dev->sink_rect; 265 dev->compose_bounds_out = dev->sink_rect;
264 dev->crop_out = dev->compose_out; 266 dev->crop_out = dev->compose_out;
@@ -318,6 +320,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
318 mp->field = dev->field_out; 320 mp->field = dev->field_out;
319 mp->pixelformat = dev->fmt_out->fourcc; 321 mp->pixelformat = dev->fmt_out->fourcc;
320 mp->colorspace = dev->colorspace_out; 322 mp->colorspace = dev->colorspace_out;
323 mp->ycbcr_enc = dev->ycbcr_enc_out;
324 mp->quantization = dev->quantization_out;
321 mp->num_planes = dev->fmt_out->planes; 325 mp->num_planes = dev->fmt_out->planes;
322 for (p = 0; p < mp->num_planes; p++) { 326 for (p = 0; p < mp->num_planes; p++) {
323 mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; 327 mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
@@ -394,16 +398,23 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
394 pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height; 398 pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height;
395 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 399 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
396 } 400 }
397 if (vivid_is_svid_out(dev)) 401 mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
402 mp->quantization = V4L2_QUANTIZATION_DEFAULT;
403 if (vivid_is_svid_out(dev)) {
398 mp->colorspace = V4L2_COLORSPACE_SMPTE170M; 404 mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
399 else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861)) 405 } else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861)) {
400 mp->colorspace = V4L2_COLORSPACE_SRGB; 406 mp->colorspace = V4L2_COLORSPACE_SRGB;
401 else if (bt->width == 720 && bt->height <= 576) 407 if (dev->dvi_d_out)
408 mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
409 } else if (bt->width == 720 && bt->height <= 576) {
402 mp->colorspace = V4L2_COLORSPACE_SMPTE170M; 410 mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
403 else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M && 411 } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
404 mp->colorspace != V4L2_COLORSPACE_REC709 && 412 mp->colorspace != V4L2_COLORSPACE_REC709 &&
405 mp->colorspace != V4L2_COLORSPACE_SRGB) 413 mp->colorspace != V4L2_COLORSPACE_ADOBERGB &&
414 mp->colorspace != V4L2_COLORSPACE_BT2020 &&
415 mp->colorspace != V4L2_COLORSPACE_SRGB) {
406 mp->colorspace = V4L2_COLORSPACE_REC709; 416 mp->colorspace = V4L2_COLORSPACE_REC709;
417 }
407 memset(mp->reserved, 0, sizeof(mp->reserved)); 418 memset(mp->reserved, 0, sizeof(mp->reserved));
408 return 0; 419 return 0;
409} 420}
@@ -522,6 +533,8 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
522 533
523set_colorspace: 534set_colorspace:
524 dev->colorspace_out = mp->colorspace; 535 dev->colorspace_out = mp->colorspace;
536 dev->ycbcr_enc_out = mp->ycbcr_enc;
537 dev->quantization_out = mp->quantization;
525 if (dev->loop_video) { 538 if (dev->loop_video) {
526 vivid_send_source_change(dev, SVID); 539 vivid_send_source_change(dev, SVID);
527 vivid_send_source_change(dev, HDMI); 540 vivid_send_source_change(dev, HDMI);
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index a0c1984c733e..b21f381a9862 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -187,8 +187,8 @@ static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
187 struct v4l2_subdev_mbus_code_enum *code) 187 struct v4l2_subdev_mbus_code_enum *code)
188{ 188{
189 static const unsigned int codes[] = { 189 static const unsigned int codes[] = {
190 V4L2_MBUS_FMT_ARGB8888_1X32, 190 MEDIA_BUS_FMT_ARGB8888_1X32,
191 V4L2_MBUS_FMT_AYUV8_1X32, 191 MEDIA_BUS_FMT_AYUV8_1X32,
192 }; 192 };
193 struct v4l2_mbus_framefmt *format; 193 struct v4l2_mbus_framefmt *format;
194 194
@@ -215,8 +215,8 @@ static int bru_enum_frame_size(struct v4l2_subdev *subdev,
215 if (fse->index) 215 if (fse->index)
216 return -EINVAL; 216 return -EINVAL;
217 217
218 if (fse->code != V4L2_MBUS_FMT_ARGB8888_1X32 && 218 if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
219 fse->code != V4L2_MBUS_FMT_AYUV8_1X32) 219 fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 fse->min_width = BRU_MIN_SIZE; 222 fse->min_width = BRU_MIN_SIZE;
@@ -261,9 +261,9 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_fh *fh,
261 switch (pad) { 261 switch (pad) {
262 case BRU_PAD_SINK(0): 262 case BRU_PAD_SINK(0):
263 /* Default to YUV if the requested format is not supported. */ 263 /* Default to YUV if the requested format is not supported. */
264 if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 && 264 if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
265 fmt->code != V4L2_MBUS_FMT_AYUV8_1X32) 265 fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
266 fmt->code = V4L2_MBUS_FMT_AYUV8_1X32; 266 fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
267 break; 267 break;
268 268
269 default: 269 default:
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index db2950a73c60..80bedc554ee3 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -70,9 +70,9 @@ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
70 70
71 if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) | 71 if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
72 (code->pad == HSIT_PAD_SOURCE && hsit->inverse)) 72 (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
73 code->code = V4L2_MBUS_FMT_ARGB8888_1X32; 73 code->code = MEDIA_BUS_FMT_ARGB8888_1X32;
74 else 74 else
75 code->code = V4L2_MBUS_FMT_AHSV8888_1X32; 75 code->code = MEDIA_BUS_FMT_AHSV8888_1X32;
76 76
77 return 0; 77 return 0;
78} 78}
@@ -136,8 +136,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
136 return 0; 136 return 0;
137 } 137 }
138 138
139 format->code = hsit->inverse ? V4L2_MBUS_FMT_AHSV8888_1X32 139 format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
140 : V4L2_MBUS_FMT_ARGB8888_1X32; 140 : MEDIA_BUS_FMT_ARGB8888_1X32;
141 format->width = clamp_t(unsigned int, fmt->format.width, 141 format->width = clamp_t(unsigned int, fmt->format.width,
142 HSIT_MIN_SIZE, HSIT_MAX_SIZE); 142 HSIT_MIN_SIZE, HSIT_MAX_SIZE);
143 format->height = clamp_t(unsigned int, fmt->format.height, 143 format->height = clamp_t(unsigned int, fmt->format.height,
@@ -151,8 +151,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
151 format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE, 151 format = vsp1_entity_get_pad_format(&hsit->entity, fh, HSIT_PAD_SOURCE,
152 fmt->which); 152 fmt->which);
153 *format = fmt->format; 153 *format = fmt->format;
154 format->code = hsit->inverse ? V4L2_MBUS_FMT_ARGB8888_1X32 154 format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
155 : V4L2_MBUS_FMT_AHSV8888_1X32; 155 : MEDIA_BUS_FMT_AHSV8888_1X32;
156 156
157 return 0; 157 return 0;
158} 158}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index d4fb23e9c4a8..17a6ca7dafe6 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -78,8 +78,8 @@ static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
78 struct v4l2_subdev_mbus_code_enum *code) 78 struct v4l2_subdev_mbus_code_enum *code)
79{ 79{
80 static const unsigned int codes[] = { 80 static const unsigned int codes[] = {
81 V4L2_MBUS_FMT_ARGB8888_1X32, 81 MEDIA_BUS_FMT_ARGB8888_1X32,
82 V4L2_MBUS_FMT_AYUV8_1X32, 82 MEDIA_BUS_FMT_AYUV8_1X32,
83 }; 83 };
84 84
85 if (code->pad == LIF_PAD_SINK) { 85 if (code->pad == LIF_PAD_SINK) {
@@ -147,9 +147,9 @@ static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
147 struct v4l2_mbus_framefmt *format; 147 struct v4l2_mbus_framefmt *format;
148 148
149 /* Default to YUV if the requested format is not supported. */ 149 /* Default to YUV if the requested format is not supported. */
150 if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 && 150 if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
151 fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32) 151 fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
152 fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32; 152 fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
153 153
154 format = vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad, 154 format = vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
155 fmt->which); 155 fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index fea36ebe2565..6f185c3621fe 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -86,9 +86,9 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
86 struct v4l2_subdev_mbus_code_enum *code) 86 struct v4l2_subdev_mbus_code_enum *code)
87{ 87{
88 static const unsigned int codes[] = { 88 static const unsigned int codes[] = {
89 V4L2_MBUS_FMT_ARGB8888_1X32, 89 MEDIA_BUS_FMT_ARGB8888_1X32,
90 V4L2_MBUS_FMT_AHSV8888_1X32, 90 MEDIA_BUS_FMT_AHSV8888_1X32,
91 V4L2_MBUS_FMT_AYUV8_1X32, 91 MEDIA_BUS_FMT_AYUV8_1X32,
92 }; 92 };
93 struct v4l2_mbus_framefmt *format; 93 struct v4l2_mbus_framefmt *format;
94 94
@@ -158,10 +158,10 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
158 struct v4l2_mbus_framefmt *format; 158 struct v4l2_mbus_framefmt *format;
159 159
160 /* Default to YUV if the requested format is not supported. */ 160 /* Default to YUV if the requested format is not supported. */
161 if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 && 161 if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
162 fmt->format.code != V4L2_MBUS_FMT_AHSV8888_1X32 && 162 fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
163 fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32) 163 fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
164 fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32; 164 fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
165 165
166 format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad, 166 format = vsp1_entity_get_pad_format(&lut->entity, fh, fmt->pad,
167 fmt->which); 167 fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index ec3dab6a9b9b..1f1ba26a834a 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -29,8 +29,8 @@ int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
29 struct v4l2_subdev_mbus_code_enum *code) 29 struct v4l2_subdev_mbus_code_enum *code)
30{ 30{
31 static const unsigned int codes[] = { 31 static const unsigned int codes[] = {
32 V4L2_MBUS_FMT_ARGB8888_1X32, 32 MEDIA_BUS_FMT_ARGB8888_1X32,
33 V4L2_MBUS_FMT_AYUV8_1X32, 33 MEDIA_BUS_FMT_AYUV8_1X32,
34 }; 34 };
35 35
36 if (code->index >= ARRAY_SIZE(codes)) 36 if (code->index >= ARRAY_SIZE(codes))
@@ -103,9 +103,9 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
103 struct v4l2_rect *crop; 103 struct v4l2_rect *crop;
104 104
105 /* Default to YUV if the requested format is not supported. */ 105 /* Default to YUV if the requested format is not supported. */
106 if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 && 106 if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
107 fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32) 107 fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
108 fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32; 108 fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
109 109
110 format = vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad, 110 format = vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
111 fmt->which); 111 fmt->which);
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index b7d3c8b9f189..1129494c7cfc 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -139,7 +139,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
139 input = &sru->entity.formats[SRU_PAD_SINK]; 139 input = &sru->entity.formats[SRU_PAD_SINK];
140 output = &sru->entity.formats[SRU_PAD_SOURCE]; 140 output = &sru->entity.formats[SRU_PAD_SOURCE];
141 141
142 if (input->code == V4L2_MBUS_FMT_ARGB8888_1X32) 142 if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
143 ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3 143 ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
144 | VI6_SRU_CTRL0_PARAM4; 144 | VI6_SRU_CTRL0_PARAM4;
145 else 145 else
@@ -170,8 +170,8 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
170 struct v4l2_subdev_mbus_code_enum *code) 170 struct v4l2_subdev_mbus_code_enum *code)
171{ 171{
172 static const unsigned int codes[] = { 172 static const unsigned int codes[] = {
173 V4L2_MBUS_FMT_ARGB8888_1X32, 173 MEDIA_BUS_FMT_ARGB8888_1X32,
174 V4L2_MBUS_FMT_AYUV8_1X32, 174 MEDIA_BUS_FMT_AYUV8_1X32,
175 }; 175 };
176 struct v4l2_mbus_framefmt *format; 176 struct v4l2_mbus_framefmt *format;
177 177
@@ -248,9 +248,9 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_fh *fh,
248 switch (pad) { 248 switch (pad) {
249 case SRU_PAD_SINK: 249 case SRU_PAD_SINK:
250 /* Default to YUV if the requested format is not supported. */ 250 /* Default to YUV if the requested format is not supported. */
251 if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 && 251 if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
252 fmt->code != V4L2_MBUS_FMT_AYUV8_1X32) 252 fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
253 fmt->code = V4L2_MBUS_FMT_AYUV8_1X32; 253 fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
254 254
255 fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE); 255 fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
256 fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE); 256 fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index de92ef4944b3..a4afec133800 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -173,8 +173,8 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
173 struct v4l2_subdev_mbus_code_enum *code) 173 struct v4l2_subdev_mbus_code_enum *code)
174{ 174{
175 static const unsigned int codes[] = { 175 static const unsigned int codes[] = {
176 V4L2_MBUS_FMT_ARGB8888_1X32, 176 MEDIA_BUS_FMT_ARGB8888_1X32,
177 V4L2_MBUS_FMT_AYUV8_1X32, 177 MEDIA_BUS_FMT_AYUV8_1X32,
178 }; 178 };
179 179
180 if (code->pad == UDS_PAD_SINK) { 180 if (code->pad == UDS_PAD_SINK) {
@@ -246,9 +246,9 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
246 switch (pad) { 246 switch (pad) {
247 case UDS_PAD_SINK: 247 case UDS_PAD_SINK:
248 /* Default to YUV if the requested format is not supported. */ 248 /* Default to YUV if the requested format is not supported. */
249 if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 && 249 if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
250 fmt->code != V4L2_MBUS_FMT_AYUV8_1X32) 250 fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
251 fmt->code = V4L2_MBUS_FMT_AYUV8_1X32; 251 fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
252 252
253 fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE); 253 fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
254 fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE); 254 fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 915a20eb003e..d91f19a9e1c1 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -48,85 +48,85 @@
48 */ 48 */
49 49
50static const struct vsp1_format_info vsp1_video_formats[] = { 50static const struct vsp1_format_info vsp1_video_formats[] = {
51 { V4L2_PIX_FMT_RGB332, V4L2_MBUS_FMT_ARGB8888_1X32, 51 { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
52 VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 52 VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
53 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 53 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
54 1, { 8, 0, 0 }, false, false, 1, 1, false }, 54 1, { 8, 0, 0 }, false, false, 1, 1, false },
55 { V4L2_PIX_FMT_ARGB444, V4L2_MBUS_FMT_ARGB8888_1X32, 55 { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
56 VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 56 VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
57 VI6_RPF_DSWAP_P_WDS, 57 VI6_RPF_DSWAP_P_WDS,
58 1, { 16, 0, 0 }, false, false, 1, 1, true }, 58 1, { 16, 0, 0 }, false, false, 1, 1, true },
59 { V4L2_PIX_FMT_XRGB444, V4L2_MBUS_FMT_ARGB8888_1X32, 59 { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
60 VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 60 VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
61 VI6_RPF_DSWAP_P_WDS, 61 VI6_RPF_DSWAP_P_WDS,
62 1, { 16, 0, 0 }, false, false, 1, 1, true }, 62 1, { 16, 0, 0 }, false, false, 1, 1, true },
63 { V4L2_PIX_FMT_ARGB555, V4L2_MBUS_FMT_ARGB8888_1X32, 63 { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
64 VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 64 VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
65 VI6_RPF_DSWAP_P_WDS, 65 VI6_RPF_DSWAP_P_WDS,
66 1, { 16, 0, 0 }, false, false, 1, 1, true }, 66 1, { 16, 0, 0 }, false, false, 1, 1, true },
67 { V4L2_PIX_FMT_XRGB555, V4L2_MBUS_FMT_ARGB8888_1X32, 67 { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
68 VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 68 VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
69 VI6_RPF_DSWAP_P_WDS, 69 VI6_RPF_DSWAP_P_WDS,
70 1, { 16, 0, 0 }, false, false, 1, 1, false }, 70 1, { 16, 0, 0 }, false, false, 1, 1, false },
71 { V4L2_PIX_FMT_RGB565, V4L2_MBUS_FMT_ARGB8888_1X32, 71 { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
72 VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 72 VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
73 VI6_RPF_DSWAP_P_WDS, 73 VI6_RPF_DSWAP_P_WDS,
74 1, { 16, 0, 0 }, false, false, 1, 1, false }, 74 1, { 16, 0, 0 }, false, false, 1, 1, false },
75 { V4L2_PIX_FMT_BGR24, V4L2_MBUS_FMT_ARGB8888_1X32, 75 { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
76 VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 76 VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
77 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 77 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
78 1, { 24, 0, 0 }, false, false, 1, 1, false }, 78 1, { 24, 0, 0 }, false, false, 1, 1, false },
79 { V4L2_PIX_FMT_RGB24, V4L2_MBUS_FMT_ARGB8888_1X32, 79 { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
80 VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 80 VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
81 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 81 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
82 1, { 24, 0, 0 }, false, false, 1, 1, false }, 82 1, { 24, 0, 0 }, false, false, 1, 1, false },
83 { V4L2_PIX_FMT_ABGR32, V4L2_MBUS_FMT_ARGB8888_1X32, 83 { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
84 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 84 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
85 1, { 32, 0, 0 }, false, false, 1, 1, true }, 85 1, { 32, 0, 0 }, false, false, 1, 1, true },
86 { V4L2_PIX_FMT_XBGR32, V4L2_MBUS_FMT_ARGB8888_1X32, 86 { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
87 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 87 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
88 1, { 32, 0, 0 }, false, false, 1, 1, false }, 88 1, { 32, 0, 0 }, false, false, 1, 1, false },
89 { V4L2_PIX_FMT_ARGB32, V4L2_MBUS_FMT_ARGB8888_1X32, 89 { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
90 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 90 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
91 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 91 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
92 1, { 32, 0, 0 }, false, false, 1, 1, true }, 92 1, { 32, 0, 0 }, false, false, 1, 1, true },
93 { V4L2_PIX_FMT_XRGB32, V4L2_MBUS_FMT_ARGB8888_1X32, 93 { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
94 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 94 VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
95 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 95 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
96 1, { 32, 0, 0 }, false, false, 1, 1, false }, 96 1, { 32, 0, 0 }, false, false, 1, 1, false },
97 { V4L2_PIX_FMT_UYVY, V4L2_MBUS_FMT_AYUV8_1X32, 97 { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
98 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 98 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
99 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 99 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
100 1, { 16, 0, 0 }, false, false, 2, 1, false }, 100 1, { 16, 0, 0 }, false, false, 2, 1, false },
101 { V4L2_PIX_FMT_VYUY, V4L2_MBUS_FMT_AYUV8_1X32, 101 { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
102 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 102 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
103 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 103 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
104 1, { 16, 0, 0 }, false, true, 2, 1, false }, 104 1, { 16, 0, 0 }, false, true, 2, 1, false },
105 { V4L2_PIX_FMT_YUYV, V4L2_MBUS_FMT_AYUV8_1X32, 105 { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
106 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 106 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
107 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 107 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
108 1, { 16, 0, 0 }, true, false, 2, 1, false }, 108 1, { 16, 0, 0 }, true, false, 2, 1, false },
109 { V4L2_PIX_FMT_YVYU, V4L2_MBUS_FMT_AYUV8_1X32, 109 { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
110 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 110 VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
111 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 111 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
112 1, { 16, 0, 0 }, true, true, 2, 1, false }, 112 1, { 16, 0, 0 }, true, true, 2, 1, false },
113 { V4L2_PIX_FMT_NV12M, V4L2_MBUS_FMT_AYUV8_1X32, 113 { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
114 VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 114 VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
115 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 115 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
116 2, { 8, 16, 0 }, false, false, 2, 2, false }, 116 2, { 8, 16, 0 }, false, false, 2, 2, false },
117 { V4L2_PIX_FMT_NV21M, V4L2_MBUS_FMT_AYUV8_1X32, 117 { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
118 VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 118 VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
119 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 119 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
120 2, { 8, 16, 0 }, false, true, 2, 2, false }, 120 2, { 8, 16, 0 }, false, true, 2, 2, false },
121 { V4L2_PIX_FMT_NV16M, V4L2_MBUS_FMT_AYUV8_1X32, 121 { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
122 VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 122 VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
123 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 123 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
124 2, { 8, 16, 0 }, false, false, 2, 1, false }, 124 2, { 8, 16, 0 }, false, false, 2, 1, false },
125 { V4L2_PIX_FMT_NV61M, V4L2_MBUS_FMT_AYUV8_1X32, 125 { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
126 VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 126 VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
127 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 127 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
128 2, { 8, 16, 0 }, false, true, 2, 1, false }, 128 2, { 8, 16, 0 }, false, true, 2, 1, false },
129 { V4L2_PIX_FMT_YUV420M, V4L2_MBUS_FMT_AYUV8_1X32, 129 { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
130 VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | 130 VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
131 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 131 VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
132 3, { 8, 8, 8 }, false, false, 2, 2, false }, 132 3, { 8, 8, 8 }, false, false, 2, 2, false },
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 9cf6731fb816..284f789265e0 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1279,10 +1279,12 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
1279 strlcpy(capability->bus_info, radio->bus_type, 1279 strlcpy(capability->bus_info, radio->bus_type,
1280 sizeof(capability->bus_info)); 1280 sizeof(capability->bus_info));
1281 1281
1282 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | 1282 capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
1283 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_AUDIO | 1283 V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
1284 V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR | 1284 V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
1285 V4L2_CAP_RDS_OUTPUT; 1285 V4L2_CAP_RDS_OUTPUT;
1286 capability->capabilities = capability->device_caps |
1287 V4L2_CAP_DEVICE_CAPS;
1286 1288
1287 return 0; 1289 return 0;
1288} 1290}
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index a47502a330f0..2de5439b9c79 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -34,7 +34,7 @@
34#include <media/v4l2-fh.h> 34#include <media/v4l2-fh.h>
35#include <media/v4l2-ctrls.h> 35#include <media/v4l2-ctrls.h>
36#include <media/v4l2-event.h> 36#include <media/v4l2-event.h>
37#include <media/radio-si4713.h> 37#include "si4713.h"
38 38
39/* module parameters */ 39/* module parameters */
40static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */ 40static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
@@ -153,7 +153,6 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
153{ 153{
154 struct radio_si4713_platform_data *pdata = pdev->dev.platform_data; 154 struct radio_si4713_platform_data *pdata = pdev->dev.platform_data;
155 struct radio_si4713_device *rsdev; 155 struct radio_si4713_device *rsdev;
156 struct i2c_adapter *adapter;
157 struct v4l2_subdev *sd; 156 struct v4l2_subdev *sd;
158 int rval = 0; 157 int rval = 0;
159 158
@@ -177,20 +176,11 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
177 goto exit; 176 goto exit;
178 } 177 }
179 178
180 adapter = i2c_get_adapter(pdata->i2c_bus); 179 sd = i2c_get_clientdata(pdata->subdev);
181 if (!adapter) { 180 rval = v4l2_device_register_subdev(&rsdev->v4l2_dev, sd);
182 dev_err(&pdev->dev, "Cannot get i2c adapter %d\n", 181 if (rval) {
183 pdata->i2c_bus);
184 rval = -ENODEV;
185 goto unregister_v4l2_dev;
186 }
187
188 sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter,
189 pdata->subdev_board_info, NULL);
190 if (!sd) {
191 dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n"); 182 dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
192 rval = -ENODEV; 183 goto unregister_v4l2_dev;
193 goto put_adapter;
194 } 184 }
195 185
196 rsdev->radio_dev = radio_si4713_vdev_template; 186 rsdev->radio_dev = radio_si4713_vdev_template;
@@ -202,14 +192,12 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
202 if (video_register_device(&rsdev->radio_dev, VFL_TYPE_RADIO, radio_nr)) { 192 if (video_register_device(&rsdev->radio_dev, VFL_TYPE_RADIO, radio_nr)) {
203 dev_err(&pdev->dev, "Could not register video device.\n"); 193 dev_err(&pdev->dev, "Could not register video device.\n");
204 rval = -EIO; 194 rval = -EIO;
205 goto put_adapter; 195 goto unregister_v4l2_dev;
206 } 196 }
207 dev_info(&pdev->dev, "New device successfully probed\n"); 197 dev_info(&pdev->dev, "New device successfully probed\n");
208 198
209 goto exit; 199 goto exit;
210 200
211put_adapter:
212 i2c_put_adapter(adapter);
213unregister_v4l2_dev: 201unregister_v4l2_dev:
214 v4l2_device_unregister(&rsdev->v4l2_dev); 202 v4l2_device_unregister(&rsdev->v4l2_dev);
215exit: 203exit:
@@ -220,14 +208,10 @@ exit:
220static int radio_si4713_pdriver_remove(struct platform_device *pdev) 208static int radio_si4713_pdriver_remove(struct platform_device *pdev)
221{ 209{
222 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); 210 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
223 struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
224 struct v4l2_subdev, list);
225 struct i2c_client *client = v4l2_get_subdevdata(sd);
226 struct radio_si4713_device *rsdev; 211 struct radio_si4713_device *rsdev;
227 212
228 rsdev = container_of(v4l2_dev, struct radio_si4713_device, v4l2_dev); 213 rsdev = container_of(v4l2_dev, struct radio_si4713_device, v4l2_dev);
229 video_unregister_device(&rsdev->radio_dev); 214 video_unregister_device(&rsdev->radio_dev);
230 i2c_put_adapter(client->adapter);
231 v4l2_device_unregister(&rsdev->v4l2_dev); 215 v4l2_device_unregister(&rsdev->v4l2_dev);
232 216
233 return 0; 217 return 0;
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index b5765557ea3d..c90004dac170 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/completion.h> 24#include <linux/completion.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/err.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
27#include <linux/i2c.h> 28#include <linux/i2c.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
@@ -366,16 +367,25 @@ static int si4713_powerup(struct si4713_device *sdev)
366 if (sdev->power_state) 367 if (sdev->power_state)
367 return 0; 368 return 0;
368 369
369 if (sdev->supplies) { 370 if (sdev->vdd) {
370 err = regulator_bulk_enable(sdev->supplies, sdev->supply_data); 371 err = regulator_enable(sdev->vdd);
371 if (err) { 372 if (err) {
372 v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err); 373 v4l2_err(&sdev->sd, "Failed to enable vdd: %d\n", err);
373 return err; 374 return err;
374 } 375 }
375 } 376 }
376 if (gpio_is_valid(sdev->gpio_reset)) { 377
378 if (sdev->vio) {
379 err = regulator_enable(sdev->vio);
380 if (err) {
381 v4l2_err(&sdev->sd, "Failed to enable vio: %d\n", err);
382 return err;
383 }
384 }
385
386 if (!IS_ERR(sdev->gpio_reset)) {
377 udelay(50); 387 udelay(50);
378 gpio_set_value(sdev->gpio_reset, 1); 388 gpiod_set_value(sdev->gpio_reset, 1);
379 } 389 }
380 390
381 if (client->irq) 391 if (client->irq)
@@ -397,13 +407,20 @@ static int si4713_powerup(struct si4713_device *sdev)
397 SI4713_STC_INT | SI4713_CTS); 407 SI4713_STC_INT | SI4713_CTS);
398 return err; 408 return err;
399 } 409 }
400 if (gpio_is_valid(sdev->gpio_reset)) 410 if (!IS_ERR(sdev->gpio_reset))
401 gpio_set_value(sdev->gpio_reset, 0); 411 gpiod_set_value(sdev->gpio_reset, 0);
402 if (sdev->supplies) { 412
403 err = regulator_bulk_disable(sdev->supplies, sdev->supply_data); 413
414 if (sdev->vdd) {
415 err = regulator_disable(sdev->vdd);
404 if (err) 416 if (err)
405 v4l2_err(&sdev->sd, 417 v4l2_err(&sdev->sd, "Failed to disable vdd: %d\n", err);
406 "Failed to disable supplies: %d\n", err); 418 }
419
420 if (sdev->vio) {
421 err = regulator_disable(sdev->vio);
422 if (err)
423 v4l2_err(&sdev->sd, "Failed to disable vio: %d\n", err);
407 } 424 }
408 425
409 return err; 426 return err;
@@ -430,14 +447,23 @@ static int si4713_powerdown(struct si4713_device *sdev)
430 v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n", 447 v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n",
431 resp[0]); 448 resp[0]);
432 v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n"); 449 v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
433 if (gpio_is_valid(sdev->gpio_reset)) 450 if (!IS_ERR(sdev->gpio_reset))
434 gpio_set_value(sdev->gpio_reset, 0); 451 gpiod_set_value(sdev->gpio_reset, 0);
435 if (sdev->supplies) { 452
436 err = regulator_bulk_disable(sdev->supplies, 453 if (sdev->vdd) {
437 sdev->supply_data); 454 err = regulator_disable(sdev->vdd);
438 if (err) 455 if (err) {
439 v4l2_err(&sdev->sd, 456 v4l2_err(&sdev->sd,
440 "Failed to disable supplies: %d\n", err); 457 "Failed to disable vdd: %d\n", err);
458 }
459 }
460
461 if (sdev->vio) {
462 err = regulator_disable(sdev->vio);
463 if (err) {
464 v4l2_err(&sdev->sd,
465 "Failed to disable vio: %d\n", err);
466 }
441 } 467 }
442 sdev->power_state = POWER_OFF; 468 sdev->power_state = POWER_OFF;
443 } 469 }
@@ -1420,38 +1446,51 @@ static int si4713_probe(struct i2c_client *client,
1420 const struct i2c_device_id *id) 1446 const struct i2c_device_id *id)
1421{ 1447{
1422 struct si4713_device *sdev; 1448 struct si4713_device *sdev;
1423 struct si4713_platform_data *pdata = client->dev.platform_data;
1424 struct v4l2_ctrl_handler *hdl; 1449 struct v4l2_ctrl_handler *hdl;
1425 int rval, i; 1450 struct si4713_platform_data *pdata = client->dev.platform_data;
1451 struct device_node *np = client->dev.of_node;
1452 struct radio_si4713_platform_data si4713_pdev_pdata;
1453 struct platform_device *si4713_pdev;
1454 int rval;
1426 1455
1427 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); 1456 sdev = devm_kzalloc(&client->dev, sizeof(*sdev), GFP_KERNEL);
1428 if (!sdev) { 1457 if (!sdev) {
1429 dev_err(&client->dev, "Failed to alloc video device.\n"); 1458 dev_err(&client->dev, "Failed to alloc video device.\n");
1430 rval = -ENOMEM; 1459 rval = -ENOMEM;
1431 goto exit; 1460 goto exit;
1432 } 1461 }
1433 1462
1434 sdev->gpio_reset = -1; 1463 sdev->gpio_reset = devm_gpiod_get(&client->dev, "reset");
1435 if (pdata && gpio_is_valid(pdata->gpio_reset)) { 1464 if (!IS_ERR(sdev->gpio_reset)) {
1436 rval = gpio_request(pdata->gpio_reset, "si4713 reset"); 1465 gpiod_direction_output(sdev->gpio_reset, 0);
1437 if (rval) { 1466 } else if (PTR_ERR(sdev->gpio_reset) == -ENOENT) {
1438 dev_err(&client->dev, 1467 dev_dbg(&client->dev, "No reset GPIO assigned\n");
1439 "Failed to request gpio: %d\n", rval); 1468 } else if (PTR_ERR(sdev->gpio_reset) == -ENOSYS) {
1440 goto free_sdev; 1469 dev_dbg(&client->dev, "No reset GPIO support\n");
1441 } 1470 } else {
1442 sdev->gpio_reset = pdata->gpio_reset; 1471 rval = PTR_ERR(sdev->gpio_reset);
1443 gpio_direction_output(sdev->gpio_reset, 0); 1472 dev_err(&client->dev, "Failed to request gpio: %d\n", rval);
1444 sdev->supplies = pdata->supplies; 1473 goto exit;
1445 } 1474 }
1446 1475
1447 for (i = 0; i < sdev->supplies; i++) 1476 sdev->vdd = devm_regulator_get_optional(&client->dev, "vdd");
1448 sdev->supply_data[i].supply = pdata->supply_names[i]; 1477 if (IS_ERR(sdev->vdd)) {
1478 rval = PTR_ERR(sdev->vdd);
1479 if (rval == -EPROBE_DEFER)
1480 goto exit;
1449 1481
1450 rval = regulator_bulk_get(&client->dev, sdev->supplies, 1482 dev_dbg(&client->dev, "no vdd regulator found: %d\n", rval);
1451 sdev->supply_data); 1483 sdev->vdd = NULL;
1452 if (rval) { 1484 }
1453 dev_err(&client->dev, "Cannot get regulators: %d\n", rval); 1485
1454 goto free_gpio; 1486 sdev->vio = devm_regulator_get_optional(&client->dev, "vio");
1487 if (IS_ERR(sdev->vio)) {
1488 rval = PTR_ERR(sdev->vio);
1489 if (rval == -EPROBE_DEFER)
1490 goto exit;
1491
1492 dev_dbg(&client->dev, "no vio regulator found: %d\n", rval);
1493 sdev->vio = NULL;
1455 } 1494 }
1456 1495
1457 v4l2_i2c_subdev_init(&sdev->sd, client, &si4713_subdev_ops); 1496 v4l2_i2c_subdev_init(&sdev->sd, client, &si4713_subdev_ops);
@@ -1554,12 +1593,12 @@ static int si4713_probe(struct i2c_client *client,
1554 sdev->sd.ctrl_handler = hdl; 1593 sdev->sd.ctrl_handler = hdl;
1555 1594
1556 if (client->irq) { 1595 if (client->irq) {
1557 rval = request_irq(client->irq, 1596 rval = devm_request_irq(&client->dev, client->irq,
1558 si4713_handler, IRQF_TRIGGER_FALLING, 1597 si4713_handler, IRQF_TRIGGER_FALLING,
1559 client->name, sdev); 1598 client->name, sdev);
1560 if (rval < 0) { 1599 if (rval < 0) {
1561 v4l2_err(&sdev->sd, "Could not request IRQ\n"); 1600 v4l2_err(&sdev->sd, "Could not request IRQ\n");
1562 goto put_reg; 1601 goto free_ctrls;
1563 } 1602 }
1564 v4l2_dbg(1, debug, &sdev->sd, "IRQ requested.\n"); 1603 v4l2_dbg(1, debug, &sdev->sd, "IRQ requested.\n");
1565 } else { 1604 } else {
@@ -1569,23 +1608,35 @@ static int si4713_probe(struct i2c_client *client,
1569 rval = si4713_initialize(sdev); 1608 rval = si4713_initialize(sdev);
1570 if (rval < 0) { 1609 if (rval < 0) {
1571 v4l2_err(&sdev->sd, "Failed to probe device information.\n"); 1610 v4l2_err(&sdev->sd, "Failed to probe device information.\n");
1572 goto free_irq; 1611 goto free_ctrls;
1573 } 1612 }
1574 1613
1614 if (!np && (!pdata || !pdata->is_platform_device))
1615 return 0;
1616
1617 si4713_pdev = platform_device_alloc("radio-si4713", -1);
1618 if (!si4713_pdev)
1619 goto put_main_pdev;
1620
1621 si4713_pdev_pdata.subdev = client;
1622 rval = platform_device_add_data(si4713_pdev, &si4713_pdev_pdata,
1623 sizeof(si4713_pdev_pdata));
1624 if (rval)
1625 goto put_main_pdev;
1626
1627 rval = platform_device_add(si4713_pdev);
1628 if (rval)
1629 goto put_main_pdev;
1630
1631 sdev->pd = si4713_pdev;
1632
1575 return 0; 1633 return 0;
1576 1634
1577free_irq: 1635put_main_pdev:
1578 if (client->irq) 1636 platform_device_put(si4713_pdev);
1579 free_irq(client->irq, sdev); 1637 v4l2_device_unregister_subdev(&sdev->sd);
1580free_ctrls: 1638free_ctrls:
1581 v4l2_ctrl_handler_free(hdl); 1639 v4l2_ctrl_handler_free(hdl);
1582put_reg:
1583 regulator_bulk_free(sdev->supplies, sdev->supply_data);
1584free_gpio:
1585 if (gpio_is_valid(sdev->gpio_reset))
1586 gpio_free(sdev->gpio_reset);
1587free_sdev:
1588 kfree(sdev);
1589exit: 1640exit:
1590 return rval; 1641 return rval;
1591} 1642}
@@ -1596,18 +1647,13 @@ static int si4713_remove(struct i2c_client *client)
1596 struct v4l2_subdev *sd = i2c_get_clientdata(client); 1647 struct v4l2_subdev *sd = i2c_get_clientdata(client);
1597 struct si4713_device *sdev = to_si4713_device(sd); 1648 struct si4713_device *sdev = to_si4713_device(sd);
1598 1649
1650 platform_device_unregister(sdev->pd);
1651
1599 if (sdev->power_state) 1652 if (sdev->power_state)
1600 si4713_set_power_state(sdev, POWER_DOWN); 1653 si4713_set_power_state(sdev, POWER_DOWN);
1601 1654
1602 if (client->irq > 0)
1603 free_irq(client->irq, sdev);
1604
1605 v4l2_device_unregister_subdev(sd); 1655 v4l2_device_unregister_subdev(sd);
1606 v4l2_ctrl_handler_free(sd->ctrl_handler); 1656 v4l2_ctrl_handler_free(sd->ctrl_handler);
1607 regulator_bulk_free(sdev->supplies, sdev->supply_data);
1608 if (gpio_is_valid(sdev->gpio_reset))
1609 gpio_free(sdev->gpio_reset);
1610 kfree(sdev);
1611 1657
1612 return 0; 1658 return 0;
1613} 1659}
diff --git a/drivers/media/radio/si4713/si4713.h b/drivers/media/radio/si4713/si4713.h
index ed700e387605..8a376e142188 100644
--- a/drivers/media/radio/si4713/si4713.h
+++ b/drivers/media/radio/si4713/si4713.h
@@ -15,7 +15,9 @@
15#ifndef SI4713_I2C_H 15#ifndef SI4713_I2C_H
16#define SI4713_I2C_H 16#define SI4713_I2C_H
17 17
18#include <linux/platform_device.h>
18#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <linux/gpio/consumer.h>
19#include <media/v4l2-subdev.h> 21#include <media/v4l2-subdev.h>
20#include <media/v4l2-ctrls.h> 22#include <media/v4l2-ctrls.h>
21#include <media/si4713.h> 23#include <media/si4713.h>
@@ -190,8 +192,6 @@
190#define MIN_ACOMP_THRESHOLD (-40) 192#define MIN_ACOMP_THRESHOLD (-40)
191#define MAX_ACOMP_GAIN 20 193#define MAX_ACOMP_GAIN 20
192 194
193#define SI4713_NUM_SUPPLIES 2
194
195/* 195/*
196 * si4713_device - private data 196 * si4713_device - private data
197 */ 197 */
@@ -236,9 +236,10 @@ struct si4713_device {
236 struct v4l2_ctrl *tune_ant_cap; 236 struct v4l2_ctrl *tune_ant_cap;
237 }; 237 };
238 struct completion work; 238 struct completion work;
239 unsigned supplies; 239 struct regulator *vdd;
240 struct regulator_bulk_data supply_data[SI4713_NUM_SUPPLIES]; 240 struct regulator *vio;
241 int gpio_reset; 241 struct gpio_desc *gpio_reset;
242 struct platform_device *pd;
242 u32 power_state; 243 u32 power_state;
243 u32 rds_enabled; 244 u32 rds_enabled;
244 u32 frequency; 245 u32 frequency;
@@ -246,4 +247,8 @@ struct si4713_device {
246 u32 stereo; 247 u32 stereo;
247 u32 tune_rnl; 248 u32 tune_rnl;
248}; 249};
250
251struct radio_si4713_platform_data {
252 struct i2c_client *subdev;
253};
249#endif /* ifndef SI4713_I2C_H */ 254#endif /* ifndef SI4713_I2C_H */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index b55012c11842..a5bd3f674bbd 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -198,10 +198,12 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
198 strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME, 198 strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME,
199 sizeof(capability->card)); 199 sizeof(capability->card));
200 sprintf(capability->bus_info, "UART"); 200 sprintf(capability->bus_info, "UART");
201 capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | 201 capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
202 V4L2_CAP_RADIO | V4L2_CAP_MODULATOR | 202 V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
203 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | 203 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
204 V4L2_CAP_RDS_CAPTURE; 204 V4L2_CAP_RDS_CAPTURE;
205 capability->capabilities = capability->device_caps |
206 V4L2_CAP_DEVICE_CAPS;
205 207
206 return 0; 208 return 0;
207} 209}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 8ce08107a69d..ddfab256b9a5 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -223,6 +223,17 @@ config IR_FINTEK
223 To compile this driver as a module, choose M here: the 223 To compile this driver as a module, choose M here: the
224 module will be called fintek-cir. 224 module will be called fintek-cir.
225 225
226config IR_MESON
227 tristate "Amlogic Meson IR remote receiver"
228 depends on RC_CORE
229 depends on ARCH_MESON || COMPILE_TEST
230 ---help---
231 Say Y if you want to use the IR remote receiver available
232 on Amlogic Meson SoCs.
233
234 To compile this driver as a module, choose M here: the
235 module will be called meson-ir.
236
226config IR_NUVOTON 237config IR_NUVOTON
227 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" 238 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
228 depends on PNP 239 depends on PNP
@@ -277,6 +288,21 @@ config IR_WINBOND_CIR
277 To compile this driver as a module, choose M here: the module will 288 To compile this driver as a module, choose M here: the module will
278 be called winbond_cir. 289 be called winbond_cir.
279 290
291config IR_IGORPLUGUSB
292 tristate "IgorPlug-USB IR Receiver"
293 depends on USB_ARCH_HAS_HCD
294 depends on RC_CORE
295 select USB
296 ---help---
297 Say Y here if you want to use the IgorPlug-USB IR Receiver by
298 Igor Cesko. This device is included on the Fit-PC2.
299
300 Note that this device can only record bursts of 36 IR pulses and
301 spaces, which is not enough for the NEC, Sanyo and RC-6 protocol.
302
303 To compile this driver as a module, choose M here: the module will
304 be called igorplugusb.
305
280config IR_IGUANA 306config IR_IGUANA
281 tristate "IguanaWorks USB IR Transceiver" 307 tristate "IguanaWorks USB IR Transceiver"
282 depends on USB_ARCH_HAS_HCD 308 depends on USB_ARCH_HAS_HCD
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 0989f940e9cf..379a5c0f1379 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_IR_IMON) += imon.o
22obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o 22obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
23obj-$(CONFIG_IR_MCEUSB) += mceusb.o 23obj-$(CONFIG_IR_MCEUSB) += mceusb.o
24obj-$(CONFIG_IR_FINTEK) += fintek-cir.o 24obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
25obj-$(CONFIG_IR_MESON) += meson-ir.o
25obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o 26obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
26obj-$(CONFIG_IR_ENE) += ene_ir.o 27obj-$(CONFIG_IR_ENE) += ene_ir.o
27obj-$(CONFIG_IR_REDRAT3) += redrat3.o 28obj-$(CONFIG_IR_REDRAT3) += redrat3.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
30obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o 31obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o
31obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o 32obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
32obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o 33obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
34obj-$(CONFIG_IR_IGORPLUGUSB) += igorplugusb.o
33obj-$(CONFIG_IR_IGUANA) += iguanair.o 35obj-$(CONFIG_IR_IGUANA) += iguanair.o
34obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o 36obj-$(CONFIG_IR_TTUSBIR) += ttusbir.o
35obj-$(CONFIG_RC_ST) += st_rc.o 37obj-$(CONFIG_RC_ST) += st_rc.o
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
new file mode 100644
index 000000000000..b36e51576f8e
--- /dev/null
+++ b/drivers/media/rc/igorplugusb.c
@@ -0,0 +1,261 @@
1/*
2 * IgorPlug-USB IR Receiver
3 *
4 * Copyright (C) 2014 Sean Young <sean@mess.org>
5 *
6 * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
7 * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
8 *
9 * Based on the lirc_igorplugusb.c driver:
10 * Copyright (C) 2004 Jan M. Hochstein
11 * <hochstein@algo.informatik.tu-darmstadt.de>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23#include <linux/device.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/usb.h>
27#include <linux/usb/input.h>
28#include <media/rc-core.h>
29
30#define DRIVER_DESC "IgorPlug-USB IR Receiver"
31#define DRIVER_NAME "igorplugusb"
32
33#define HEADERLEN 3
34#define BUFLEN 36
35#define MAX_PACKET (HEADERLEN + BUFLEN)
36
37#define SET_INFRABUFFER_EMPTY 1
38#define GET_INFRACODE 2
39
40
41struct igorplugusb {
42 struct rc_dev *rc;
43 struct device *dev;
44
45 struct urb *urb;
46 struct usb_ctrlrequest request;
47
48 struct timer_list timer;
49
50 uint8_t buf_in[MAX_PACKET];
51
52 char phys[64];
53};
54
55static void igorplugusb_cmd(struct igorplugusb *ir, int cmd);
56
57static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
58{
59 DEFINE_IR_RAW_EVENT(rawir);
60 unsigned i, start, overflow;
61
62 dev_dbg(ir->dev, "irdata: %*ph (len=%u)", len, ir->buf_in, len);
63
64 /*
65 * If more than 36 pulses and spaces follow each other, the igorplugusb
66 * overwrites its buffer from the beginning. The overflow value is the
67 * last offset which was not overwritten. Everything from this offset
68 * onwards occurred before everything until this offset.
69 */
70 overflow = ir->buf_in[2];
71 i = start = overflow + HEADERLEN;
72
73 if (start >= len) {
74 dev_err(ir->dev, "receive overflow invalid: %u", overflow);
75 } else {
76 if (overflow > 0)
77 dev_warn(ir->dev, "receive overflow, at least %u lost",
78 overflow);
79
80 do {
81 rawir.duration = ir->buf_in[i] * 85333;
82 rawir.pulse = i & 1;
83
84 ir_raw_event_store_with_filter(ir->rc, &rawir);
85
86 if (++i == len)
87 i = HEADERLEN;
88 } while (i != start);
89
90 /* add a trailing space */
91 rawir.duration = ir->rc->timeout;
92 rawir.pulse = false;
93 ir_raw_event_store_with_filter(ir->rc, &rawir);
94
95 ir_raw_event_handle(ir->rc);
96 }
97
98 igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
99}
100
101static void igorplugusb_callback(struct urb *urb)
102{
103 struct usb_ctrlrequest *req;
104 struct igorplugusb *ir = urb->context;
105
106 req = (struct usb_ctrlrequest *)urb->setup_packet;
107
108 switch (urb->status) {
109 case 0:
110 if (req->bRequest == GET_INFRACODE &&
111 urb->actual_length > HEADERLEN)
112 igorplugusb_irdata(ir, urb->actual_length);
113 else /* request IR */
114 mod_timer(&ir->timer, jiffies + msecs_to_jiffies(50));
115 break;
116 case -EPROTO:
117 case -ECONNRESET:
118 case -ENOENT:
119 case -ESHUTDOWN:
120 usb_unlink_urb(urb);
121 return;
122 default:
123 dev_warn(ir->dev, "Error: urb status = %d\n", urb->status);
124 igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
125 break;
126 }
127}
128
129static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
130{
131 int ret;
132
133 ir->request.bRequest = cmd;
134 ir->urb->transfer_flags = 0;
135 ret = usb_submit_urb(ir->urb, GFP_ATOMIC);
136 if (ret)
137 dev_err(ir->dev, "submit urb failed: %d", ret);
138}
139
140static void igorplugusb_timer(unsigned long data)
141{
142 struct igorplugusb *ir = (struct igorplugusb *)data;
143
144 igorplugusb_cmd(ir, GET_INFRACODE);
145}
146
147static int igorplugusb_probe(struct usb_interface *intf,
148 const struct usb_device_id *id)
149{
150 struct usb_device *udev;
151 struct usb_host_interface *idesc;
152 struct usb_endpoint_descriptor *ep;
153 struct igorplugusb *ir;
154 struct rc_dev *rc;
155 int ret;
156
157 udev = interface_to_usbdev(intf);
158 idesc = intf->cur_altsetting;
159
160 if (idesc->desc.bNumEndpoints != 1) {
161 dev_err(&intf->dev, "incorrect number of endpoints");
162 return -ENODEV;
163 }
164
165 ep = &idesc->endpoint[0].desc;
166 if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_control(ep)) {
167 dev_err(&intf->dev, "endpoint incorrect");
168 return -ENODEV;
169 }
170
171 ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
172 if (!ir)
173 return -ENOMEM;
174
175 ir->dev = &intf->dev;
176
177 setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir);
178
179 ir->request.bRequest = GET_INFRACODE;
180 ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
181 ir->request.wLength = cpu_to_le16(sizeof(ir->buf_in));
182
183 ir->urb = usb_alloc_urb(0, GFP_KERNEL);
184 if (!ir->urb)
185 return -ENOMEM;
186
187 usb_fill_control_urb(ir->urb, udev,
188 usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
189 ir->buf_in, sizeof(ir->buf_in), igorplugusb_callback, ir);
190
191 usb_make_path(udev, ir->phys, sizeof(ir->phys));
192
193 rc = rc_allocate_device();
194 rc->input_name = DRIVER_DESC;
195 rc->input_phys = ir->phys;
196 usb_to_input_id(udev, &rc->input_id);
197 rc->dev.parent = &intf->dev;
198 rc->driver_type = RC_DRIVER_IR_RAW;
199 /*
200 * This device can only store 36 pulses + spaces, which is not enough
201 * for the NEC protocol and many others.
202 */
203 rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_RC6_6A_20 |
204 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
205 RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
206
207 rc->priv = ir;
208 rc->driver_name = DRIVER_NAME;
209 rc->map_name = RC_MAP_HAUPPAUGE;
210 rc->timeout = MS_TO_NS(100);
211 rc->rx_resolution = 85333;
212
213 ir->rc = rc;
214 ret = rc_register_device(rc);
215 if (ret) {
216 dev_err(&intf->dev, "failed to register rc device: %d", ret);
217 rc_free_device(rc);
218 usb_free_urb(ir->urb);
219 return ret;
220 }
221
222 usb_set_intfdata(intf, ir);
223
224 igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
225
226 return 0;
227}
228
229static void igorplugusb_disconnect(struct usb_interface *intf)
230{
231 struct igorplugusb *ir = usb_get_intfdata(intf);
232
233 rc_unregister_device(ir->rc);
234 del_timer_sync(&ir->timer);
235 usb_set_intfdata(intf, NULL);
236 usb_kill_urb(ir->urb);
237 usb_free_urb(ir->urb);
238}
239
240static struct usb_device_id igorplugusb_table[] = {
241 /* Igor Plug USB (Atmel's Manufact. ID) */
242 { USB_DEVICE(0x03eb, 0x0002) },
243 /* Fit PC2 Infrared Adapter */
244 { USB_DEVICE(0x03eb, 0x21fe) },
245 /* Terminating entry */
246 { }
247};
248
249static struct usb_driver igorplugusb_driver = {
250 .name = DRIVER_NAME,
251 .probe = igorplugusb_probe,
252 .disconnect = igorplugusb_disconnect,
253 .id_table = igorplugusb_table
254};
255
256module_usb_driver(igorplugusb_driver);
257
258MODULE_DESCRIPTION(DRIVER_DESC);
259MODULE_AUTHOR("Sean Young <sean@mess.org>");
260MODULE_LICENSE("GPL");
261MODULE_DEVICE_TABLE(usb, igorplugusb_table);
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index 03ba9fc170fb..580715c7fc5e 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -1,6 +1,7 @@
1config IR_IMG 1config IR_IMG
2 tristate "ImgTec IR Decoder" 2 tristate "ImgTec IR Decoder"
3 depends on RC_CORE 3 depends on RC_CORE
4 depends on METAG || MIPS || COMPILE_TEST
4 select IR_IMG_HW if !IR_IMG_RAW 5 select IR_IMG_HW if !IR_IMG_RAW
5 help 6 help
6 Say Y or M here if you want to use the ImgTec infrared decoder 7 Say Y or M here if you want to use the ImgTec infrared decoder
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index a0cac2f09109..77c78de4f5bf 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -166,7 +166,6 @@ MODULE_DEVICE_TABLE(of, img_ir_match);
166static struct platform_driver img_ir_driver = { 166static struct platform_driver img_ir_driver = {
167 .driver = { 167 .driver = {
168 .name = "img-ir", 168 .name = "img-ir",
169 .owner = THIS_MODULE,
170 .of_match_table = img_ir_match, 169 .of_match_table = img_ir_match,
171 .pm = &img_ir_pmops, 170 .pm = &img_ir_pmops,
172 }, 171 },
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index ec49f94425fc..2fd47c9bf5d8 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -530,6 +530,22 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
530 u32 ir_status, irq_en; 530 u32 ir_status, irq_en;
531 spin_lock_irq(&priv->lock); 531 spin_lock_irq(&priv->lock);
532 532
533 /*
534 * First record that the protocol is being stopped so that the end timer
535 * isn't restarted while we're trying to stop it.
536 */
537 hw->stopping = true;
538
539 /*
540 * Release the lock to stop the end timer, since the end timer handler
541 * acquires the lock and we don't want to deadlock waiting for it.
542 */
543 spin_unlock_irq(&priv->lock);
544 del_timer_sync(&hw->end_timer);
545 spin_lock_irq(&priv->lock);
546
547 hw->stopping = false;
548
533 /* switch off and disable interrupts */ 549 /* switch off and disable interrupts */
534 img_ir_write(priv, IMG_IR_CONTROL, 0); 550 img_ir_write(priv, IMG_IR_CONTROL, 0);
535 irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE); 551 irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
@@ -541,12 +557,13 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
541 if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) { 557 if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
542 ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2); 558 ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
543 img_ir_write(priv, IMG_IR_STATUS, ir_status); 559 img_ir_write(priv, IMG_IR_STATUS, ir_status);
544 img_ir_read(priv, IMG_IR_DATA_LW);
545 img_ir_read(priv, IMG_IR_DATA_UP);
546 } 560 }
547 561
548 /* stop the end timer and switch back to normal mode */ 562 /* always read data to clear buffer if IR wakes the device */
549 del_timer_sync(&hw->end_timer); 563 img_ir_read(priv, IMG_IR_DATA_LW);
564 img_ir_read(priv, IMG_IR_DATA_UP);
565
566 /* switch back to normal mode */
550 hw->mode = IMG_IR_M_NORMAL; 567 hw->mode = IMG_IR_M_NORMAL;
551 568
552 /* clear the wakeup scancode filter */ 569 /* clear the wakeup scancode filter */
@@ -817,7 +834,8 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
817 } 834 }
818 835
819 836
820 if (dec->repeat) { 837 /* we mustn't update the end timer while trying to stop it */
838 if (dec->repeat && !hw->stopping) {
821 unsigned long interval; 839 unsigned long interval;
822 840
823 img_ir_begin_repeat(priv); 841 img_ir_begin_repeat(priv);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
index 8fcc16c32c5b..5c2b216c5fe3 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.h
+++ b/drivers/media/rc/img-ir/img-ir-hw.h
@@ -186,9 +186,6 @@ struct img_ir_reg_timings {
186 struct img_ir_timing_regvals rtimings; 186 struct img_ir_timing_regvals rtimings;
187}; 187};
188 188
189int img_ir_register_decoder(struct img_ir_decoder *dec);
190void img_ir_unregister_decoder(struct img_ir_decoder *dec);
191
192struct img_ir_priv; 189struct img_ir_priv;
193 190
194#ifdef CONFIG_IR_IMG_HW 191#ifdef CONFIG_IR_IMG_HW
@@ -214,6 +211,8 @@ enum img_ir_mode {
214 * @flags: IMG_IR_F_*. 211 * @flags: IMG_IR_F_*.
215 * @filters: HW filters (derived from scancode filters). 212 * @filters: HW filters (derived from scancode filters).
216 * @mode: Current decode mode. 213 * @mode: Current decode mode.
214 * @stopping: Indicates that decoder is being taken down and timers
215 * should not be restarted.
217 * @suspend_irqen: Saved IRQ enable mask over suspend. 216 * @suspend_irqen: Saved IRQ enable mask over suspend.
218 */ 217 */
219struct img_ir_priv_hw { 218struct img_ir_priv_hw {
@@ -229,6 +228,7 @@ struct img_ir_priv_hw {
229 struct img_ir_filter filters[RC_FILTER_MAX]; 228 struct img_ir_filter filters[RC_FILTER_MAX];
230 229
231 enum img_ir_mode mode; 230 enum img_ir_mode mode;
231 bool stopping;
232 u32 suspend_irqen; 232 u32 suspend_irqen;
233}; 233};
234 234
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index ed2c8a1ed8ca..98893a8332c7 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
42 return -EINVAL; 42 return -EINVAL;
43 43
44 /* Packet start */ 44 /* Packet start */
45 if (ev.reset) 45 if (ev.reset) {
46 return 0; 46 /* Userspace expects a long space event before the start of
47 * the signal to use as a sync. This may be done with repeat
48 * packets and normal samples. But if a reset has been sent
49 * then we assume that a long time has passed, so we send a
50 * space with the maximum time value. */
51 sample = LIRC_SPACE(LIRC_VALUE_MASK);
52 IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
47 53
48 /* Carrier reports */ 54 /* Carrier reports */
49 if (ev.carrier_report) { 55 } else if (ev.carrier_report) {
50 sample = LIRC_FREQUENCY(ev.carrier); 56 sample = LIRC_FREQUENCY(ev.carrier);
51 IR_dprintk(2, "carrier report (freq: %d)\n", sample); 57 IR_dprintk(2, "carrier report (freq: %d)\n", sample);
52 58
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f1f098e22f7e..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -259,8 +259,8 @@ again:
259 case 32: 259 case 32:
260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { 260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
261 protocol = RC_TYPE_RC6_MCE; 261 protocol = RC_TYPE_RC6_MCE;
262 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
263 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); 262 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
263 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
264 } else { 264 } else {
265 protocol = RC_BIT_RC6_6A_32; 265 protocol = RC_BIT_RC6_6A_32;
266 toggle = 0; 266 toggle = 0;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 249d2fbc8f37..1e0545a67959 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -518,8 +518,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
518 518
519 WARN_ON(mutex_lock_killable(&lirc_dev_lock)); 519 WARN_ON(mutex_lock_killable(&lirc_dev_lock));
520 520
521 if (ir->d.rdev) 521 rc_close(ir->d.rdev);
522 rc_close(ir->d.rdev);
523 522
524 ir->open--; 523 ir->open--;
525 if (ir->attached) { 524 if (ir->attached) {
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
new file mode 100644
index 000000000000..fcc3b82d1454
--- /dev/null
+++ b/drivers/media/rc/meson-ir.c
@@ -0,0 +1,216 @@
1/*
2 * Driver for Amlogic Meson IR remote receiver
3 *
4 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program. If not, see <http://www.gnu.org/licenses/>.
12 */
13
14#include <linux/device.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/spinlock.h>
22
23#include <media/rc-core.h>
24
25#define DRIVER_NAME "meson-ir"
26
27#define IR_DEC_LDR_ACTIVE 0x00
28#define IR_DEC_LDR_IDLE 0x04
29#define IR_DEC_LDR_REPEAT 0x08
30#define IR_DEC_BIT_0 0x0c
31#define IR_DEC_REG0 0x10
32#define IR_DEC_FRAME 0x14
33#define IR_DEC_STATUS 0x18
34#define IR_DEC_REG1 0x1c
35
36#define REG0_RATE_MASK (BIT(11) - 1)
37
38#define REG1_MODE_MASK (BIT(7) | BIT(8))
39#define REG1_MODE_NEC (0 << 7)
40#define REG1_MODE_GENERAL (2 << 7)
41
42#define REG1_TIME_IV_SHIFT 16
43#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
44
45#define REG1_IRQSEL_MASK (BIT(2) | BIT(3))
46#define REG1_IRQSEL_NEC_MODE (0 << 2)
47#define REG1_IRQSEL_RISE_FALL (1 << 2)
48#define REG1_IRQSEL_FALL (2 << 2)
49#define REG1_IRQSEL_RISE (3 << 2)
50
51#define REG1_RESET BIT(0)
52#define REG1_ENABLE BIT(15)
53
54#define STATUS_IR_DEC_IN BIT(8)
55
56#define MESON_TRATE 10 /* us */
57
58struct meson_ir {
59 void __iomem *reg;
60 struct rc_dev *rc;
61 int irq;
62 spinlock_t lock;
63};
64
65static void meson_ir_set_mask(struct meson_ir *ir, unsigned int reg,
66 u32 mask, u32 value)
67{
68 u32 data;
69
70 data = readl(ir->reg + reg);
71 data &= ~mask;
72 data |= (value & mask);
73 writel(data, ir->reg + reg);
74}
75
76static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
77{
78 struct meson_ir *ir = dev_id;
79 u32 duration;
80 DEFINE_IR_RAW_EVENT(rawir);
81
82 spin_lock(&ir->lock);
83
84 duration = readl(ir->reg + IR_DEC_REG1);
85 duration = (duration & REG1_TIME_IV_MASK) >> REG1_TIME_IV_SHIFT;
86 rawir.duration = US_TO_NS(duration * MESON_TRATE);
87
88 rawir.pulse = !!(readl(ir->reg + IR_DEC_STATUS) & STATUS_IR_DEC_IN);
89
90 ir_raw_event_store_with_filter(ir->rc, &rawir);
91 ir_raw_event_handle(ir->rc);
92
93 spin_unlock(&ir->lock);
94
95 return IRQ_HANDLED;
96}
97
98static int meson_ir_probe(struct platform_device *pdev)
99{
100 struct device *dev = &pdev->dev;
101 struct device_node *node = dev->of_node;
102 struct resource *res;
103 const char *map_name;
104 struct meson_ir *ir;
105 int ret;
106
107 ir = devm_kzalloc(dev, sizeof(struct meson_ir), GFP_KERNEL);
108 if (!ir)
109 return -ENOMEM;
110
111 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
112 ir->reg = devm_ioremap_resource(dev, res);
113 if (IS_ERR(ir->reg)) {
114 dev_err(dev, "failed to map registers\n");
115 return PTR_ERR(ir->reg);
116 }
117
118 ir->irq = platform_get_irq(pdev, 0);
119 if (ir->irq < 0) {
120 dev_err(dev, "no irq resource\n");
121 return ir->irq;
122 }
123
124 ir->rc = rc_allocate_device();
125 if (!ir->rc) {
126 dev_err(dev, "failed to allocate rc device\n");
127 return -ENOMEM;
128 }
129
130 ir->rc->priv = ir;
131 ir->rc->input_name = DRIVER_NAME;
132 ir->rc->input_phys = DRIVER_NAME "/input0";
133 ir->rc->input_id.bustype = BUS_HOST;
134 map_name = of_get_property(node, "linux,rc-map-name", NULL);
135 ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
136 ir->rc->dev.parent = dev;
137 ir->rc->driver_type = RC_DRIVER_IR_RAW;
138 ir->rc->allowed_protocols = RC_BIT_ALL;
139 ir->rc->rx_resolution = US_TO_NS(MESON_TRATE);
140 ir->rc->timeout = MS_TO_NS(200);
141 ir->rc->driver_name = DRIVER_NAME;
142
143 spin_lock_init(&ir->lock);
144 platform_set_drvdata(pdev, ir);
145
146 ret = rc_register_device(ir->rc);
147 if (ret) {
148 dev_err(dev, "failed to register rc device\n");
149 goto out_free;
150 }
151
152 ret = devm_request_irq(dev, ir->irq, meson_ir_irq, 0, "ir-meson", ir);
153 if (ret) {
154 dev_err(dev, "failed to request irq\n");
155 goto out_unreg;
156 }
157
158 /* Reset the decoder */
159 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
160 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
161 /* Set general operation mode */
162 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL);
163 /* Set rate */
164 meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
165 /* IRQ on rising and falling edges */
166 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_IRQSEL_MASK,
167 REG1_IRQSEL_RISE_FALL);
168 /* Enable the decoder */
169 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, REG1_ENABLE);
170
171 dev_info(dev, "receiver initialized\n");
172
173 return 0;
174out_unreg:
175 rc_unregister_device(ir->rc);
176 ir->rc = NULL;
177out_free:
178 rc_free_device(ir->rc);
179
180 return ret;
181}
182
183static int meson_ir_remove(struct platform_device *pdev)
184{
185 struct meson_ir *ir = platform_get_drvdata(pdev);
186 unsigned long flags;
187
188 /* Disable the decoder */
189 spin_lock_irqsave(&ir->lock, flags);
190 meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, 0);
191 spin_unlock_irqrestore(&ir->lock, flags);
192
193 rc_unregister_device(ir->rc);
194
195 return 0;
196}
197
198static const struct of_device_id meson_ir_match[] = {
199 { .compatible = "amlogic,meson6-ir" },
200 { },
201};
202
203static struct platform_driver meson_ir_driver = {
204 .probe = meson_ir_probe,
205 .remove = meson_ir_remove,
206 .driver = {
207 .name = DRIVER_NAME,
208 .of_match_table = meson_ir_match,
209 },
210};
211
212module_platform_driver(meson_ir_driver);
213
214MODULE_DESCRIPTION("Amlogic Meson IR remote receiver driver");
215MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
216MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 8d3b74c5a717..86ffcd54339e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1298,8 +1298,7 @@ void rc_free_device(struct rc_dev *dev)
1298 if (!dev) 1298 if (!dev)
1299 return; 1299 return;
1300 1300
1301 if (dev->input_dev) 1301 input_free_device(dev->input_dev);
1302 input_free_device(dev->input_dev);
1303 1302
1304 put_device(&dev->dev); 1303 put_device(&dev->dev);
1305 1304
@@ -1414,13 +1413,16 @@ int rc_register_device(struct rc_dev *dev)
1414 ir_raw_init(); 1413 ir_raw_init();
1415 raw_init = true; 1414 raw_init = true;
1416 } 1415 }
1416 /* calls ir_register_device so unlock mutex here*/
1417 mutex_unlock(&dev->lock);
1417 rc = ir_raw_event_register(dev); 1418 rc = ir_raw_event_register(dev);
1419 mutex_lock(&dev->lock);
1418 if (rc < 0) 1420 if (rc < 0)
1419 goto out_input; 1421 goto out_input;
1420 } 1422 }
1421 1423
1422 if (dev->change_protocol) { 1424 if (dev->change_protocol) {
1423 u64 rc_type = (1 << rc_map->rc_type); 1425 u64 rc_type = (1ll << rc_map->rc_type);
1424 if (dev->driver_type == RC_DRIVER_IR_RAW) 1426 if (dev->driver_type == RC_DRIVER_IR_RAW)
1425 rc_type |= RC_BIT_LIRC; 1427 rc_type |= RC_BIT_LIRC;
1426 rc = dev->change_protocol(dev, &rc_type); 1428 rc = dev->change_protocol(dev, &rc_type);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 795b394a5d84..c4def66f9aa2 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -966,7 +966,7 @@ static int redrat3_dev_probe(struct usb_interface *intf,
966 966
967 rr3->ep_in = ep_in; 967 rr3->ep_in = ep_in;
968 rr3->bulk_in_buf = usb_alloc_coherent(udev, 968 rr3->bulk_in_buf = usb_alloc_coherent(udev,
969 le16_to_cpu(ep_in->wMaxPacketSize), GFP_ATOMIC, &rr3->dma_in); 969 le16_to_cpu(ep_in->wMaxPacketSize), GFP_KERNEL, &rr3->dma_in);
970 if (!rr3->bulk_in_buf) { 970 if (!rr3->bulk_in_buf) {
971 dev_err(dev, "Read buffer allocation failure\n"); 971 dev_err(dev, "Read buffer allocation failure\n");
972 goto error; 972 goto error;
@@ -975,6 +975,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
975 pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress); 975 pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
976 usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf, 976 usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf,
977 le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3); 977 le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3);
978 rr3->read_urb->transfer_dma = rr3->dma_in;
979 rr3->read_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
978 980
979 rr3->ep_out = ep_out; 981 rr3->ep_out = ep_out;
980 rr3->udev = udev; 982 rr3->udev = udev;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index f039dc2a21cf..42e5a01b9192 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -232,6 +232,14 @@ config MEDIA_TUNER_M88TS2022
232 help 232 help
233 Montage M88TS2022 silicon tuner driver. 233 Montage M88TS2022 silicon tuner driver.
234 234
235config MEDIA_TUNER_M88RS6000T
236 tristate "Montage M88RS6000 internal tuner"
237 depends on MEDIA_SUPPORT && I2C
238 select REGMAP_I2C
239 default m if !MEDIA_SUBDRV_AUTOSELECT
240 help
241 Montage M88RS6000 internal tuner.
242
235config MEDIA_TUNER_TUA9001 243config MEDIA_TUNER_TUA9001
236 tristate "Infineon TUA 9001 silicon tuner" 244 tristate "Infineon TUA 9001 silicon tuner"
237 depends on MEDIA_SUPPORT && I2C 245 depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 49fcf8033848..da4fe6ef73e7 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_MEDIA_TUNER_IT913X) += it913x.o
41obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o 41obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
42obj-$(CONFIG_MEDIA_TUNER_MXL301RF) += mxl301rf.o 42obj-$(CONFIG_MEDIA_TUNER_MXL301RF) += mxl301rf.o
43obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o 43obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o
44obj-$(CONFIG_MEDIA_TUNER_M88RS6000T) += m88rs6000t.o
44 45
45ccflags-y += -I$(srctree)/drivers/media/dvb-core 46ccflags-y += -I$(srctree)/drivers/media/dvb-core
46ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 47ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
new file mode 100644
index 000000000000..d4c13fe6e7b3
--- /dev/null
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -0,0 +1,744 @@
1/*
2 * Driver for the internal tuner of Montage M88RS6000
3 *
4 * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "m88rs6000t.h"
18#include <linux/regmap.h>
19
20struct m88rs6000t_dev {
21 struct m88rs6000t_config cfg;
22 struct i2c_client *client;
23 struct regmap *regmap;
24 u32 frequency_khz;
25};
26
27struct m88rs6000t_reg_val {
28 u8 reg;
29 u8 val;
30};
31
32/* set demod main mclk and ts mclk */
33static int m88rs6000t_set_demod_mclk(struct dvb_frontend *fe)
34{
35 struct m88rs6000t_dev *dev = fe->tuner_priv;
36 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
37 u8 reg11, reg15, reg16, reg1D, reg1E, reg1F;
38 u8 N, f0 = 0, f1 = 0, f2 = 0, f3 = 0;
39 u16 pll_div_fb;
40 u32 div, ts_mclk;
41 unsigned int utmp;
42 int ret;
43
44 /* select demod main mclk */
45 ret = regmap_read(dev->regmap, 0x15, &utmp);
46 if (ret)
47 goto err;
48 reg15 = utmp;
49 if (c->symbol_rate > 45010000) {
50 reg11 = 0x0E;
51 reg15 |= 0x02;
52 reg16 = 115; /* mclk = 110.25MHz */
53 } else {
54 reg11 = 0x0A;
55 reg15 &= ~0x02;
56 reg16 = 96; /* mclk = 96MHz */
57 }
58
59 /* set ts mclk */
60 if (c->delivery_system == SYS_DVBS)
61 ts_mclk = 96000;
62 else
63 ts_mclk = 144000;
64
65 pll_div_fb = (reg15 & 0x01) << 8;
66 pll_div_fb += reg16;
67 pll_div_fb += 32;
68
69 div = 36000 * pll_div_fb;
70 div /= ts_mclk;
71
72 if (div <= 32) {
73 N = 2;
74 f0 = 0;
75 f1 = div / 2;
76 f2 = div - f1;
77 f3 = 0;
78 } else if (div <= 48) {
79 N = 3;
80 f0 = div / 3;
81 f1 = (div - f0) / 2;
82 f2 = div - f0 - f1;
83 f3 = 0;
84 } else if (div <= 64) {
85 N = 4;
86 f0 = div / 4;
87 f1 = (div - f0) / 3;
88 f2 = (div - f0 - f1) / 2;
89 f3 = div - f0 - f1 - f2;
90 } else {
91 N = 4;
92 f0 = 16;
93 f1 = 16;
94 f2 = 16;
95 f3 = 16;
96 }
97
98 if (f0 == 16)
99 f0 = 0;
100 if (f1 == 16)
101 f1 = 0;
102 if (f2 == 16)
103 f2 = 0;
104 if (f3 == 16)
105 f3 = 0;
106
107 ret = regmap_read(dev->regmap, 0x1D, &utmp);
108 if (ret)
109 goto err;
110 reg1D = utmp;
111 reg1D &= ~0x03;
112 reg1D |= N - 1;
113 reg1E = ((f3 << 4) + f2) & 0xFF;
114 reg1F = ((f1 << 4) + f0) & 0xFF;
115
116 /* program and recalibrate demod PLL */
117 ret = regmap_write(dev->regmap, 0x05, 0x40);
118 if (ret)
119 goto err;
120 ret = regmap_write(dev->regmap, 0x11, 0x08);
121 if (ret)
122 goto err;
123 ret = regmap_write(dev->regmap, 0x15, reg15);
124 if (ret)
125 goto err;
126 ret = regmap_write(dev->regmap, 0x16, reg16);
127 if (ret)
128 goto err;
129 ret = regmap_write(dev->regmap, 0x1D, reg1D);
130 if (ret)
131 goto err;
132 ret = regmap_write(dev->regmap, 0x1E, reg1E);
133 if (ret)
134 goto err;
135 ret = regmap_write(dev->regmap, 0x1F, reg1F);
136 if (ret)
137 goto err;
138 ret = regmap_write(dev->regmap, 0x17, 0xc1);
139 if (ret)
140 goto err;
141 ret = regmap_write(dev->regmap, 0x17, 0x81);
142 if (ret)
143 goto err;
144 usleep_range(5000, 50000);
145 ret = regmap_write(dev->regmap, 0x05, 0x00);
146 if (ret)
147 goto err;
148 ret = regmap_write(dev->regmap, 0x11, reg11);
149 if (ret)
150 goto err;
151 usleep_range(5000, 50000);
152err:
153 if (ret)
154 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
155 return ret;
156}
157
158static int m88rs6000t_set_pll_freq(struct m88rs6000t_dev *dev,
159 u32 tuner_freq_MHz)
160{
161 u32 fcry_KHz, ulNDiv1, ulNDiv2, ulNDiv;
162 u8 refDiv, ucLoDiv1, ucLomod1, ucLoDiv2, ucLomod2, ucLoDiv, ucLomod;
163 u8 reg27, reg29, reg42, reg42buf;
164 unsigned int utmp;
165 int ret;
166
167 fcry_KHz = 27000; /* in kHz */
168 refDiv = 27;
169
170 ret = regmap_write(dev->regmap, 0x36, (refDiv - 8));
171 if (ret)
172 goto err;
173 ret = regmap_write(dev->regmap, 0x31, 0x00);
174 if (ret)
175 goto err;
176 ret = regmap_write(dev->regmap, 0x2c, 0x02);
177 if (ret)
178 goto err;
179
180 if (tuner_freq_MHz >= 1550) {
181 ucLoDiv1 = 2;
182 ucLomod1 = 0;
183 ucLoDiv2 = 2;
184 ucLomod2 = 0;
185 } else if (tuner_freq_MHz >= 1380) {
186 ucLoDiv1 = 3;
187 ucLomod1 = 16;
188 ucLoDiv2 = 2;
189 ucLomod2 = 0;
190 } else if (tuner_freq_MHz >= 1070) {
191 ucLoDiv1 = 3;
192 ucLomod1 = 16;
193 ucLoDiv2 = 3;
194 ucLomod2 = 16;
195 } else if (tuner_freq_MHz >= 1000) {
196 ucLoDiv1 = 3;
197 ucLomod1 = 16;
198 ucLoDiv2 = 4;
199 ucLomod2 = 64;
200 } else if (tuner_freq_MHz >= 775) {
201 ucLoDiv1 = 4;
202 ucLomod1 = 64;
203 ucLoDiv2 = 4;
204 ucLomod2 = 64;
205 } else if (tuner_freq_MHz >= 700) {
206 ucLoDiv1 = 6;
207 ucLomod1 = 48;
208 ucLoDiv2 = 4;
209 ucLomod2 = 64;
210 } else if (tuner_freq_MHz >= 520) {
211 ucLoDiv1 = 6;
212 ucLomod1 = 48;
213 ucLoDiv2 = 6;
214 ucLomod2 = 48;
215 } else {
216 ucLoDiv1 = 8;
217 ucLomod1 = 96;
218 ucLoDiv2 = 8;
219 ucLomod2 = 96;
220 }
221
222 ulNDiv1 = ((tuner_freq_MHz * ucLoDiv1 * 1000) * refDiv
223 / fcry_KHz - 1024) / 2;
224 ulNDiv2 = ((tuner_freq_MHz * ucLoDiv2 * 1000) * refDiv
225 / fcry_KHz - 1024) / 2;
226
227 reg27 = (((ulNDiv1 >> 8) & 0x0F) + ucLomod1) & 0x7F;
228 ret = regmap_write(dev->regmap, 0x27, reg27);
229 if (ret)
230 goto err;
231 ret = regmap_write(dev->regmap, 0x28, (u8)(ulNDiv1 & 0xFF));
232 if (ret)
233 goto err;
234 reg29 = (((ulNDiv2 >> 8) & 0x0F) + ucLomod2) & 0x7f;
235 ret = regmap_write(dev->regmap, 0x29, reg29);
236 if (ret)
237 goto err;
238 ret = regmap_write(dev->regmap, 0x2a, (u8)(ulNDiv2 & 0xFF));
239 if (ret)
240 goto err;
241 ret = regmap_write(dev->regmap, 0x2F, 0xf5);
242 if (ret)
243 goto err;
244 ret = regmap_write(dev->regmap, 0x30, 0x05);
245 if (ret)
246 goto err;
247 ret = regmap_write(dev->regmap, 0x08, 0x1f);
248 if (ret)
249 goto err;
250 ret = regmap_write(dev->regmap, 0x08, 0x3f);
251 if (ret)
252 goto err;
253 ret = regmap_write(dev->regmap, 0x09, 0x20);
254 if (ret)
255 goto err;
256 ret = regmap_write(dev->regmap, 0x09, 0x00);
257 if (ret)
258 goto err;
259 ret = regmap_write(dev->regmap, 0x3e, 0x11);
260 if (ret)
261 goto err;
262 ret = regmap_write(dev->regmap, 0x08, 0x2f);
263 if (ret)
264 goto err;
265 ret = regmap_write(dev->regmap, 0x08, 0x3f);
266 if (ret)
267 goto err;
268 ret = regmap_write(dev->regmap, 0x09, 0x10);
269 if (ret)
270 goto err;
271 ret = regmap_write(dev->regmap, 0x09, 0x00);
272 if (ret)
273 goto err;
274 usleep_range(2000, 50000);
275
276 ret = regmap_read(dev->regmap, 0x42, &utmp);
277 if (ret)
278 goto err;
279 reg42 = utmp;
280
281 ret = regmap_write(dev->regmap, 0x3e, 0x10);
282 if (ret)
283 goto err;
284 ret = regmap_write(dev->regmap, 0x08, 0x2f);
285 if (ret)
286 goto err;
287 ret = regmap_write(dev->regmap, 0x08, 0x3f);
288 if (ret)
289 goto err;
290 ret = regmap_write(dev->regmap, 0x09, 0x10);
291 if (ret)
292 goto err;
293 ret = regmap_write(dev->regmap, 0x09, 0x00);
294 if (ret)
295 goto err;
296 usleep_range(2000, 50000);
297
298 ret = regmap_read(dev->regmap, 0x42, &utmp);
299 if (ret)
300 goto err;
301 reg42buf = utmp;
302 if (reg42buf < reg42) {
303 ret = regmap_write(dev->regmap, 0x3e, 0x11);
304 if (ret)
305 goto err;
306 }
307 usleep_range(5000, 50000);
308
309 ret = regmap_read(dev->regmap, 0x2d, &utmp);
310 if (ret)
311 goto err;
312 ret = regmap_write(dev->regmap, 0x2d, utmp);
313 if (ret)
314 goto err;
315 ret = regmap_read(dev->regmap, 0x2e, &utmp);
316 if (ret)
317 goto err;
318 ret = regmap_write(dev->regmap, 0x2e, utmp);
319 if (ret)
320 goto err;
321
322 ret = regmap_read(dev->regmap, 0x27, &utmp);
323 if (ret)
324 goto err;
325 reg27 = utmp & 0x70;
326 ret = regmap_read(dev->regmap, 0x83, &utmp);
327 if (ret)
328 goto err;
329 if (reg27 == (utmp & 0x70)) {
330 ucLoDiv = ucLoDiv1;
331 ulNDiv = ulNDiv1;
332 ucLomod = ucLomod1 / 16;
333 } else {
334 ucLoDiv = ucLoDiv2;
335 ulNDiv = ulNDiv2;
336 ucLomod = ucLomod2 / 16;
337 }
338
339 if ((ucLoDiv == 3) || (ucLoDiv == 6)) {
340 refDiv = 18;
341 ret = regmap_write(dev->regmap, 0x36, (refDiv - 8));
342 if (ret)
343 goto err;
344 ulNDiv = ((tuner_freq_MHz * ucLoDiv * 1000) * refDiv
345 / fcry_KHz - 1024) / 2;
346 }
347
348 reg27 = (0x80 + ((ucLomod << 4) & 0x70)
349 + ((ulNDiv >> 8) & 0x0F)) & 0xFF;
350 ret = regmap_write(dev->regmap, 0x27, reg27);
351 if (ret)
352 goto err;
353 ret = regmap_write(dev->regmap, 0x28, (u8)(ulNDiv & 0xFF));
354 if (ret)
355 goto err;
356 ret = regmap_write(dev->regmap, 0x29, 0x80);
357 if (ret)
358 goto err;
359 ret = regmap_write(dev->regmap, 0x31, 0x03);
360 if (ret)
361 goto err;
362
363 if (ucLoDiv == 3)
364 utmp = 0xCE;
365 else
366 utmp = 0x8A;
367 ret = regmap_write(dev->regmap, 0x3b, utmp);
368 if (ret)
369 goto err;
370
371 dev->frequency_khz = fcry_KHz * (ulNDiv * 2 + 1024) / refDiv / ucLoDiv;
372
373 dev_dbg(&dev->client->dev,
374 "actual tune frequency=%d\n", dev->frequency_khz);
375err:
376 if (ret)
377 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
378 return ret;
379}
380
381static int m88rs6000t_set_bb(struct m88rs6000t_dev *dev,
382 u32 symbol_rate_KSs, s32 lpf_offset_KHz)
383{
384 u32 f3dB;
385 u8 reg40;
386
387 f3dB = symbol_rate_KSs * 9 / 14 + 2000;
388 f3dB += lpf_offset_KHz;
389 f3dB = clamp_val(f3dB, 6000U, 43000U);
390 reg40 = f3dB / 1000;
391 return regmap_write(dev->regmap, 0x40, reg40);
392}
393
394static int m88rs6000t_set_params(struct dvb_frontend *fe)
395{
396 struct m88rs6000t_dev *dev = fe->tuner_priv;
397 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
398 int ret;
399 s32 lpf_offset_KHz;
400 u32 realFreq, freq_MHz;
401
402 dev_dbg(&dev->client->dev,
403 "frequency=%d symbol_rate=%d\n",
404 c->frequency, c->symbol_rate);
405
406 if (c->symbol_rate < 5000000)
407 lpf_offset_KHz = 3000;
408 else
409 lpf_offset_KHz = 0;
410
411 realFreq = c->frequency + lpf_offset_KHz;
412 /* set tuner pll.*/
413 freq_MHz = (realFreq + 500) / 1000;
414 ret = m88rs6000t_set_pll_freq(dev, freq_MHz);
415 if (ret)
416 goto err;
417 ret = m88rs6000t_set_bb(dev, c->symbol_rate / 1000, lpf_offset_KHz);
418 if (ret)
419 goto err;
420 ret = regmap_write(dev->regmap, 0x00, 0x01);
421 if (ret)
422 goto err;
423 ret = regmap_write(dev->regmap, 0x00, 0x00);
424 if (ret)
425 goto err;
426 /* set demod mlck */
427 ret = m88rs6000t_set_demod_mclk(fe);
428err:
429 if (ret)
430 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
431 return ret;
432}
433
434static int m88rs6000t_init(struct dvb_frontend *fe)
435{
436 struct m88rs6000t_dev *dev = fe->tuner_priv;
437 int ret;
438
439 dev_dbg(&dev->client->dev, "%s:\n", __func__);
440
441 ret = regmap_update_bits(dev->regmap, 0x11, 0x08, 0x08);
442 if (ret)
443 goto err;
444 usleep_range(5000, 50000);
445 ret = regmap_update_bits(dev->regmap, 0x10, 0x01, 0x01);
446 if (ret)
447 goto err;
448 usleep_range(10000, 50000);
449 ret = regmap_write(dev->regmap, 0x07, 0x7d);
450err:
451 if (ret)
452 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
453 return ret;
454}
455
456static int m88rs6000t_sleep(struct dvb_frontend *fe)
457{
458 struct m88rs6000t_dev *dev = fe->tuner_priv;
459 int ret;
460
461 dev_dbg(&dev->client->dev, "%s:\n", __func__);
462
463 ret = regmap_write(dev->regmap, 0x07, 0x6d);
464 if (ret)
465 goto err;
466 usleep_range(5000, 10000);
467err:
468 if (ret)
469 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
470 return ret;
471}
472
473static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
474{
475 struct m88rs6000t_dev *dev = fe->tuner_priv;
476
477 dev_dbg(&dev->client->dev, "\n");
478
479 *frequency = dev->frequency_khz;
480 return 0;
481}
482
483static int m88rs6000t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
484{
485 struct m88rs6000t_dev *dev = fe->tuner_priv;
486
487 dev_dbg(&dev->client->dev, "\n");
488
489 *frequency = 0; /* Zero-IF */
490 return 0;
491}
492
493
494static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
495{
496 struct m88rs6000t_dev *dev = fe->tuner_priv;
497 unsigned int val, i;
498 int ret;
499 u16 gain;
500 u32 PGA2_cri_GS = 46, PGA2_crf_GS = 290, TIA_GS = 290;
501 u32 RF_GC = 1200, IF_GC = 1100, BB_GC = 300;
502 u32 PGA2_GC = 300, TIA_GC = 300, PGA2_cri = 0, PGA2_crf = 0;
503 u32 RFG = 0, IFG = 0, BBG = 0, PGA2G = 0, TIAG = 0;
504 u32 RFGS[13] = {0, 245, 266, 268, 270, 285,
505 298, 295, 283, 285, 285, 300, 300};
506 u32 IFGS[12] = {0, 300, 230, 270, 270, 285,
507 295, 285, 290, 295, 295, 310};
508 u32 BBGS[14] = {0, 286, 275, 290, 294, 300, 290,
509 290, 285, 283, 260, 295, 290, 260};
510
511 ret = regmap_read(dev->regmap, 0x5A, &val);
512 if (ret)
513 goto err;
514 RF_GC = val & 0x0f;
515
516 ret = regmap_read(dev->regmap, 0x5F, &val);
517 if (ret)
518 goto err;
519 IF_GC = val & 0x0f;
520
521 ret = regmap_read(dev->regmap, 0x3F, &val);
522 if (ret)
523 goto err;
524 TIA_GC = (val >> 4) & 0x07;
525
526 ret = regmap_read(dev->regmap, 0x77, &val);
527 if (ret)
528 goto err;
529 BB_GC = (val >> 4) & 0x0f;
530
531 ret = regmap_read(dev->regmap, 0x76, &val);
532 if (ret)
533 goto err;
534 PGA2_GC = val & 0x3f;
535 PGA2_cri = PGA2_GC >> 2;
536 PGA2_crf = PGA2_GC & 0x03;
537
538 for (i = 0; i <= RF_GC; i++)
539 RFG += RFGS[i];
540
541 if (RF_GC == 0)
542 RFG += 400;
543 if (RF_GC == 1)
544 RFG += 300;
545 if (RF_GC == 2)
546 RFG += 200;
547 if (RF_GC == 3)
548 RFG += 100;
549
550 for (i = 0; i <= IF_GC; i++)
551 IFG += IFGS[i];
552
553 TIAG = TIA_GC * TIA_GS;
554
555 for (i = 0; i <= BB_GC; i++)
556 BBG += BBGS[i];
557
558 PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
559
560 gain = RFG + IFG - TIAG + BBG + PGA2G;
561
562 /* scale value to 0x0000-0xffff */
563 gain = clamp_val(gain, 1000U, 10500U);
564 *strength = (10500 - gain) * 0xffff / (10500 - 1000);
565err:
566 if (ret)
567 dev_dbg(&dev->client->dev, "failed=%d\n", ret);
568 return ret;
569}
570
571static const struct dvb_tuner_ops m88rs6000t_tuner_ops = {
572 .info = {
573 .name = "Montage M88RS6000 Internal Tuner",
574 .frequency_min = 950000,
575 .frequency_max = 2150000,
576 },
577
578 .init = m88rs6000t_init,
579 .sleep = m88rs6000t_sleep,
580 .set_params = m88rs6000t_set_params,
581 .get_frequency = m88rs6000t_get_frequency,
582 .get_if_frequency = m88rs6000t_get_if_frequency,
583 .get_rf_strength = m88rs6000t_get_rf_strength,
584};
585
586static int m88rs6000t_probe(struct i2c_client *client,
587 const struct i2c_device_id *id)
588{
589 struct m88rs6000t_config *cfg = client->dev.platform_data;
590 struct dvb_frontend *fe = cfg->fe;
591 struct m88rs6000t_dev *dev;
592 int ret, i;
593 unsigned int utmp;
594 static const struct regmap_config regmap_config = {
595 .reg_bits = 8,
596 .val_bits = 8,
597 };
598 static const struct m88rs6000t_reg_val reg_vals[] = {
599 {0x10, 0xfb},
600 {0x24, 0x38},
601 {0x11, 0x0a},
602 {0x12, 0x00},
603 {0x2b, 0x1c},
604 {0x44, 0x48},
605 {0x54, 0x24},
606 {0x55, 0x06},
607 {0x59, 0x00},
608 {0x5b, 0x4c},
609 {0x60, 0x8b},
610 {0x61, 0xf4},
611 {0x65, 0x07},
612 {0x6d, 0x6f},
613 {0x6e, 0x31},
614 {0x3c, 0xf3},
615 {0x37, 0x0f},
616 {0x48, 0x28},
617 {0x49, 0xd8},
618 {0x70, 0x66},
619 {0x71, 0xCF},
620 {0x72, 0x81},
621 {0x73, 0xA7},
622 {0x74, 0x4F},
623 {0x75, 0xFC},
624 };
625
626 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
627 if (!dev) {
628 ret = -ENOMEM;
629 dev_err(&client->dev, "kzalloc() failed\n");
630 goto err;
631 }
632
633 memcpy(&dev->cfg, cfg, sizeof(struct m88rs6000t_config));
634 dev->client = client;
635 dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
636 if (IS_ERR(dev->regmap)) {
637 ret = PTR_ERR(dev->regmap);
638 goto err;
639 }
640
641 ret = regmap_update_bits(dev->regmap, 0x11, 0x08, 0x08);
642 if (ret)
643 goto err;
644 usleep_range(5000, 50000);
645 ret = regmap_update_bits(dev->regmap, 0x10, 0x01, 0x01);
646 if (ret)
647 goto err;
648 usleep_range(10000, 50000);
649 ret = regmap_write(dev->regmap, 0x07, 0x7d);
650 if (ret)
651 goto err;
652 ret = regmap_write(dev->regmap, 0x04, 0x01);
653 if (ret)
654 goto err;
655
656 /* check tuner chip id */
657 ret = regmap_read(dev->regmap, 0x01, &utmp);
658 if (ret)
659 goto err;
660 dev_info(&dev->client->dev, "chip_id=%02x\n", utmp);
661 if (utmp != 0x64) {
662 ret = -ENODEV;
663 goto err;
664 }
665
666 /* tuner init. */
667 ret = regmap_write(dev->regmap, 0x05, 0x40);
668 if (ret)
669 goto err;
670 ret = regmap_write(dev->regmap, 0x11, 0x08);
671 if (ret)
672 goto err;
673 ret = regmap_write(dev->regmap, 0x15, 0x6c);
674 if (ret)
675 goto err;
676 ret = regmap_write(dev->regmap, 0x17, 0xc1);
677 if (ret)
678 goto err;
679 ret = regmap_write(dev->regmap, 0x17, 0x81);
680 if (ret)
681 goto err;
682 usleep_range(10000, 50000);
683 ret = regmap_write(dev->regmap, 0x05, 0x00);
684 if (ret)
685 goto err;
686 ret = regmap_write(dev->regmap, 0x11, 0x0a);
687 if (ret)
688 goto err;
689
690 for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
691 ret = regmap_write(dev->regmap,
692 reg_vals[i].reg, reg_vals[i].val);
693 if (ret)
694 goto err;
695 }
696
697 dev_info(&dev->client->dev, "Montage M88RS6000 internal tuner successfully identified\n");
698
699 fe->tuner_priv = dev;
700 memcpy(&fe->ops.tuner_ops, &m88rs6000t_tuner_ops,
701 sizeof(struct dvb_tuner_ops));
702 i2c_set_clientdata(client, dev);
703 return 0;
704err:
705 dev_dbg(&client->dev, "failed=%d\n", ret);
706 kfree(dev);
707 return ret;
708}
709
710static int m88rs6000t_remove(struct i2c_client *client)
711{
712 struct m88rs6000t_dev *dev = i2c_get_clientdata(client);
713 struct dvb_frontend *fe = dev->cfg.fe;
714
715 dev_dbg(&client->dev, "\n");
716
717 memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
718 fe->tuner_priv = NULL;
719 kfree(dev);
720
721 return 0;
722}
723
724static const struct i2c_device_id m88rs6000t_id[] = {
725 {"m88rs6000t", 0},
726 {}
727};
728MODULE_DEVICE_TABLE(i2c, m88rs6000t_id);
729
730static struct i2c_driver m88rs6000t_driver = {
731 .driver = {
732 .owner = THIS_MODULE,
733 .name = "m88rs6000t",
734 },
735 .probe = m88rs6000t_probe,
736 .remove = m88rs6000t_remove,
737 .id_table = m88rs6000t_id,
738};
739
740module_i2c_driver(m88rs6000t_driver);
741
742MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
743MODULE_DESCRIPTION("Montage M88RS6000 internal tuner driver");
744MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/m88rs6000t.h b/drivers/media/tuners/m88rs6000t.h
new file mode 100644
index 000000000000..264c13e2cd39
--- /dev/null
+++ b/drivers/media/tuners/m88rs6000t.h
@@ -0,0 +1,29 @@
1/*
2 * Driver for the internal tuner of Montage M88RS6000
3 *
4 * Copyright (C) 2014 Max nibble <nibble.max@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _M88RS6000T_H_
18#define _M88RS6000T_H_
19
20#include "dvb_frontend.h"
21
22struct m88rs6000t_config {
23 /*
24 * pointer to DVB frontend
25 */
26 struct dvb_frontend *fe;
27};
28
29#endif
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
index caa542346891..066e5431da93 100644
--- a/drivers/media/tuners/m88ts2022.c
+++ b/drivers/media/tuners/m88ts2022.c
@@ -488,6 +488,7 @@ static int m88ts2022_probe(struct i2c_client *client,
488 case 0x83: 488 case 0x83:
489 break; 489 break;
490 default: 490 default:
491 ret = -ENODEV;
491 goto err; 492 goto err;
492 } 493 }
493 494
@@ -505,6 +506,7 @@ static int m88ts2022_probe(struct i2c_client *client,
505 u8tmp = 0x6c; 506 u8tmp = 0x6c;
506 break; 507 break;
507 default: 508 default:
509 ret = -EINVAL;
508 goto err; 510 goto err;
509 } 511 }
510 512
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 69e453ef0a1a..1810ad66888e 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -374,7 +374,6 @@ static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
374 mxl5007t_set_if_freq_bits(state, cfg->if_freq_hz, cfg->invert_if); 374 mxl5007t_set_if_freq_bits(state, cfg->if_freq_hz, cfg->invert_if);
375 mxl5007t_set_xtal_freq_bits(state, cfg->xtal_freq_hz); 375 mxl5007t_set_xtal_freq_bits(state, cfg->xtal_freq_hz);
376 376
377 set_reg_bits(state->tab_init, 0x04, 0x01, cfg->loop_thru_enable);
378 set_reg_bits(state->tab_init, 0x03, 0x08, cfg->clk_out_enable << 3); 377 set_reg_bits(state->tab_init, 0x03, 0x08, cfg->clk_out_enable << 3);
379 set_reg_bits(state->tab_init, 0x03, 0x07, cfg->clk_out_amp); 378 set_reg_bits(state->tab_init, 0x03, 0x07, cfg->clk_out_amp);
380 379
@@ -531,10 +530,6 @@ static int mxl5007t_tuner_init(struct mxl5007t_state *state,
531 struct reg_pair_t *init_regs; 530 struct reg_pair_t *init_regs;
532 int ret; 531 int ret;
533 532
534 ret = mxl5007t_soft_reset(state);
535 if (mxl_fail(ret))
536 goto fail;
537
538 /* calculate initialization reg array */ 533 /* calculate initialization reg array */
539 init_regs = mxl5007t_calc_init_regs(state, mode); 534 init_regs = mxl5007t_calc_init_regs(state, mode);
540 535
@@ -900,7 +895,32 @@ struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
900 /* existing tuner instance */ 895 /* existing tuner instance */
901 break; 896 break;
902 } 897 }
898
899 if (fe->ops.i2c_gate_ctrl)
900 fe->ops.i2c_gate_ctrl(fe, 1);
901
902 ret = mxl5007t_soft_reset(state);
903
904 if (fe->ops.i2c_gate_ctrl)
905 fe->ops.i2c_gate_ctrl(fe, 0);
906
907 if (mxl_fail(ret))
908 goto fail;
909
910 if (fe->ops.i2c_gate_ctrl)
911 fe->ops.i2c_gate_ctrl(fe, 1);
912
913 ret = mxl5007t_write_reg(state, 0x04,
914 state->config->loop_thru_enable);
915
916 if (fe->ops.i2c_gate_ctrl)
917 fe->ops.i2c_gate_ctrl(fe, 0);
918
919 if (mxl_fail(ret))
920 goto fail;
921
903 fe->tuner_priv = state; 922 fe->tuner_priv = state;
923
904 mutex_unlock(&mxl5007t_list_mutex); 924 mutex_unlock(&mxl5007t_list_mutex);
905 925
906 memcpy(&fe->ops.tuner_ops, &mxl5007t_tuner_ops, 926 memcpy(&fe->ops.tuner_ops, &mxl5007t_tuner_ops,
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index a759742cae7b..8e040cf9cf13 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -959,6 +959,18 @@ static int r820t_set_tv_standard(struct r820t_priv *priv,
959 lt_att = 0x00; /* r31[7], lt att enable */ 959 lt_att = 0x00; /* r31[7], lt att enable */
960 flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */ 960 flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
961 polyfil_cur = 0x60; /* r25[6:5]:min */ 961 polyfil_cur = 0x60; /* r25[6:5]:min */
962 } else if (delsys == SYS_DVBC_ANNEX_A) {
963 if_khz = 5070;
964 filt_cal_lo = 73500;
965 filt_gain = 0x10; /* +3db, 6mhz on */
966 img_r = 0x00; /* image negative */
967 filt_q = 0x10; /* r10[4]:low q(1'b1) */
968 hp_cor = 0x0b; /* 1.7m disable, +0cap, 1.0mhz */
969 ext_enable = 0x40; /* r30[6]=1 ext enable; r30[5]:1 ext at lna max-1 */
970 loop_through = 0x00; /* r5[7], lt on */
971 lt_att = 0x00; /* r31[7], lt att enable */
972 flt_ext_widest = 0x00; /* r15[7]: flt_ext_wide off */
973 polyfil_cur = 0x60; /* r25[6:5]:min */
962 } else { 974 } else {
963 if (bw <= 6) { 975 if (bw <= 6) {
964 if_khz = 3570; 976 if_khz = 3570;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index cf97142e01e6..2180de9d654a 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Silicon Labs Si2147/2157/2158 silicon tuner driver 2 * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
3 * 3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> 4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 * 5 *
@@ -93,8 +93,13 @@ static int si2157_init(struct dvb_frontend *fe)
93 goto warm; 93 goto warm;
94 94
95 /* power up */ 95 /* power up */
96 memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15); 96 if (s->chiptype == SI2157_CHIPTYPE_SI2146) {
97 cmd.wlen = 15; 97 memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
98 cmd.wlen = 9;
99 } else {
100 memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
101 cmd.wlen = 15;
102 }
98 cmd.rlen = 1; 103 cmd.rlen = 1;
99 ret = si2157_cmd_execute(s, &cmd); 104 ret = si2157_cmd_execute(s, &cmd);
100 if (ret) 105 if (ret)
@@ -112,17 +117,20 @@ static int si2157_init(struct dvb_frontend *fe)
112 cmd.args[4] << 0; 117 cmd.args[4] << 0;
113 118
114 #define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0) 119 #define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
120 #define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
115 #define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0) 121 #define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
116 #define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0) 122 #define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
123 #define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
117 124
118 switch (chip_id) { 125 switch (chip_id) {
119 case SI2158_A20: 126 case SI2158_A20:
127 case SI2148_A20:
120 fw_file = SI2158_A20_FIRMWARE; 128 fw_file = SI2158_A20_FIRMWARE;
121 break; 129 break;
122 case SI2157_A30: 130 case SI2157_A30:
123 case SI2147_A30: 131 case SI2147_A30:
132 case SI2146_A10:
124 goto skip_fw_download; 133 goto skip_fw_download;
125 break;
126 default: 134 default:
127 dev_err(&s->client->dev, 135 dev_err(&s->client->dev,
128 "unknown chip version Si21%d-%c%c%c\n", 136 "unknown chip version Si21%d-%c%c%c\n",
@@ -149,7 +157,7 @@ static int si2157_init(struct dvb_frontend *fe)
149 dev_err(&s->client->dev, "firmware file '%s' is invalid\n", 157 dev_err(&s->client->dev, "firmware file '%s' is invalid\n",
150 fw_file); 158 fw_file);
151 ret = -EINVAL; 159 ret = -EINVAL;
152 goto err; 160 goto fw_release_exit;
153 } 161 }
154 162
155 dev_info(&s->client->dev, "downloading firmware from file '%s'\n", 163 dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
@@ -165,7 +173,7 @@ static int si2157_init(struct dvb_frontend *fe)
165 dev_err(&s->client->dev, 173 dev_err(&s->client->dev,
166 "firmware download failed=%d\n", 174 "firmware download failed=%d\n",
167 ret); 175 ret);
168 goto err; 176 goto fw_release_exit;
169 } 177 }
170 } 178 }
171 179
@@ -187,10 +195,9 @@ warm:
187 s->active = true; 195 s->active = true;
188 return 0; 196 return 0;
189 197
198fw_release_exit:
199 release_firmware(fw);
190err: 200err:
191 if (fw)
192 release_firmware(fw);
193
194 dev_dbg(&s->client->dev, "failed=%d\n", ret); 201 dev_dbg(&s->client->dev, "failed=%d\n", ret);
195 return ret; 202 return ret;
196} 203}
@@ -250,6 +257,9 @@ static int si2157_set_params(struct dvb_frontend *fe)
250 case SYS_ATSC: 257 case SYS_ATSC:
251 delivery_system = 0x00; 258 delivery_system = 0x00;
252 break; 259 break;
260 case SYS_DVBC_ANNEX_B:
261 delivery_system = 0x10;
262 break;
253 case SYS_DVBT: 263 case SYS_DVBT:
254 case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */ 264 case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
255 delivery_system = 0x20; 265 delivery_system = 0x20;
@@ -272,7 +282,10 @@ static int si2157_set_params(struct dvb_frontend *fe)
272 if (ret) 282 if (ret)
273 goto err; 283 goto err;
274 284
275 memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6); 285 if (s->chiptype == SI2157_CHIPTYPE_SI2146)
286 memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
287 else
288 memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
276 cmd.wlen = 6; 289 cmd.wlen = 6;
277 cmd.rlen = 4; 290 cmd.rlen = 4;
278 ret = si2157_cmd_execute(s, &cmd); 291 ret = si2157_cmd_execute(s, &cmd);
@@ -305,7 +318,7 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
305 318
306static const struct dvb_tuner_ops si2157_ops = { 319static const struct dvb_tuner_ops si2157_ops = {
307 .info = { 320 .info = {
308 .name = "Silicon Labs Si2157/Si2158", 321 .name = "Silicon Labs Si2146/2147/2148/2157/2158",
309 .frequency_min = 110000000, 322 .frequency_min = 110000000,
310 .frequency_max = 862000000, 323 .frequency_max = 862000000,
311 }, 324 },
@@ -336,6 +349,7 @@ static int si2157_probe(struct i2c_client *client,
336 s->fe = cfg->fe; 349 s->fe = cfg->fe;
337 s->inversion = cfg->inversion; 350 s->inversion = cfg->inversion;
338 s->fw_loaded = false; 351 s->fw_loaded = false;
352 s->chiptype = (u8)id->driver_data;
339 mutex_init(&s->i2c_mutex); 353 mutex_init(&s->i2c_mutex);
340 354
341 /* check if the tuner is there */ 355 /* check if the tuner is there */
@@ -352,7 +366,10 @@ static int si2157_probe(struct i2c_client *client,
352 i2c_set_clientdata(client, s); 366 i2c_set_clientdata(client, s);
353 367
354 dev_info(&s->client->dev, 368 dev_info(&s->client->dev,
355 "Silicon Labs Si2157/Si2158 successfully attached\n"); 369 "Silicon Labs %s successfully attached\n",
370 s->chiptype == SI2157_CHIPTYPE_SI2146 ?
371 "Si2146" : "Si2147/2148/2157/2158");
372
356 return 0; 373 return 0;
357err: 374err:
358 dev_dbg(&client->dev, "failed=%d\n", ret); 375 dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -377,6 +394,7 @@ static int si2157_remove(struct i2c_client *client)
377 394
378static const struct i2c_device_id si2157_id[] = { 395static const struct i2c_device_id si2157_id[] = {
379 {"si2157", 0}, 396 {"si2157", 0},
397 {"si2146", 1},
380 {} 398 {}
381}; 399};
382MODULE_DEVICE_TABLE(i2c, si2157_id); 400MODULE_DEVICE_TABLE(i2c, si2157_id);
@@ -393,7 +411,7 @@ static struct i2c_driver si2157_driver = {
393 411
394module_i2c_driver(si2157_driver); 412module_i2c_driver(si2157_driver);
395 413
396MODULE_DESCRIPTION("Silicon Labs Si2157/Si2158 silicon tuner driver"); 414MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
397MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); 415MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
398MODULE_LICENSE("GPL"); 416MODULE_LICENSE("GPL");
399MODULE_FIRMWARE(SI2158_A20_FIRMWARE); 417MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index d3b19cadb4a1..a564c4a9fba7 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Silicon Labs Si2147/2157/2158 silicon tuner driver 2 * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
3 * 3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> 4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 * 5 *
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index e71ffafed951..d6e07cdd2a07 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Silicon Labs Si2147/2157/2158 silicon tuner driver 2 * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
3 * 3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> 4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 * 5 *
@@ -28,9 +28,13 @@ struct si2157 {
28 bool active; 28 bool active;
29 bool fw_loaded; 29 bool fw_loaded;
30 bool inversion; 30 bool inversion;
31 u8 chiptype;
31}; 32};
32 33
33/* firmare command struct */ 34#define SI2157_CHIPTYPE_SI2157 0
35#define SI2157_CHIPTYPE_SI2146 1
36
37/* firmware command struct */
34#define SI2157_ARGLEN 30 38#define SI2157_ARGLEN 30
35struct si2157_cmd { 39struct si2157_cmd {
36 u8 args[SI2157_ARGLEN]; 40 u8 args[SI2157_ARGLEN];
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 86e5e3110118..a26bb33102b8 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -178,7 +178,7 @@ int tda18271_read_extended(struct dvb_frontend *fe)
178 (i != R_EB17) && 178 (i != R_EB17) &&
179 (i != R_EB19) && 179 (i != R_EB19) &&
180 (i != R_EB20)) 180 (i != R_EB20))
181 regs[i] = regdump[i]; 181 regs[i] = regdump[i];
182 } 182 }
183 183
184 if (tda18271_debug & DBG_REG) 184 if (tda18271_debug & DBG_REG)
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 803a0e63d47e..2a039de8ab9a 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -62,6 +62,7 @@ struct xc5000_priv {
62 unsigned int mode; 62 unsigned int mode;
63 u8 rf_mode; 63 u8 rf_mode;
64 u8 radio_input; 64 u8 radio_input;
65 u16 output_amp;
65 66
66 int chip_id; 67 int chip_id;
67 u16 pll_register_no; 68 u16 pll_register_no;
@@ -744,7 +745,9 @@ static int xc5000_tune_digital(struct dvb_frontend *fe)
744 return -EIO; 745 return -EIO;
745 } 746 }
746 747
747 xc_write_reg(priv, XREG_OUTPUT_AMP, 0x8a); 748 dprintk(1, "%s() setting OUTPUT_AMP to 0x%x\n",
749 __func__, priv->output_amp);
750 xc_write_reg(priv, XREG_OUTPUT_AMP, priv->output_amp);
748 751
749 xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL); 752 xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
750 753
@@ -1333,8 +1336,7 @@ static int xc5000_release(struct dvb_frontend *fe)
1333 1336
1334 if (priv) { 1337 if (priv) {
1335 cancel_delayed_work(&priv->timer_sleep); 1338 cancel_delayed_work(&priv->timer_sleep);
1336 if (priv->firmware) 1339 release_firmware(priv->firmware);
1337 release_firmware(priv->firmware);
1338 hybrid_tuner_release_state(priv); 1340 hybrid_tuner_release_state(priv);
1339 } 1341 }
1340 1342
@@ -1358,6 +1360,9 @@ static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
1358 if (p->radio_input) 1360 if (p->radio_input)
1359 priv->radio_input = p->radio_input; 1361 priv->radio_input = p->radio_input;
1360 1362
1363 if (p->output_amp)
1364 priv->output_amp = p->output_amp;
1365
1361 return 0; 1366 return 0;
1362} 1367}
1363 1368
@@ -1438,6 +1443,12 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
1438 it can be overridden if this is a hybrid driver */ 1443 it can be overridden if this is a hybrid driver */
1439 priv->chip_id = (cfg->chip_id) ? cfg->chip_id : 0; 1444 priv->chip_id = (cfg->chip_id) ? cfg->chip_id : 0;
1440 1445
1446 /* don't override output_amp if it's already been set
1447 unless explicitly specified */
1448 if ((priv->output_amp == 0) || (cfg->output_amp))
1449 /* use default output_amp value if none specified */
1450 priv->output_amp = (cfg->output_amp) ? cfg->output_amp : 0x8a;
1451
1441 /* Check if firmware has been loaded. It is possible that another 1452 /* Check if firmware has been loaded. It is possible that another
1442 instance of the driver has loaded the firmware. 1453 instance of the driver has loaded the firmware.
1443 */ 1454 */
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 7245cae19f0c..6aa534f17a30 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -36,6 +36,7 @@ struct xc5000_config {
36 u32 if_khz; 36 u32 if_khz;
37 u8 radio_input; 37 u8 radio_input;
38 u16 xtal_khz; 38 u16 xtal_khz;
39 u16 output_amp;
39 40
40 int chip_id; 41 int chip_id;
41}; 42};
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 9eb77ac2153b..da87f1cc31a9 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
36 au0828_clear(dev, REG_000, 0x10); 36 au0828_clear(dev, REG_000, 0x10);
37} 37}
38 38
39/*
40 * WARNING: There's a quirks table at sound/usb/quirks-table.h
41 * that should also be updated every time a new device with V4L2 support
42 * is added here.
43 */
39struct au0828_board au0828_boards[] = { 44struct au0828_board au0828_boards[] = {
40 [AU0828_BOARD_UNKNOWN] = { 45 [AU0828_BOARD_UNKNOWN] = {
41 .name = "Unknown board", 46 .name = "Unknown board",
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bc064803b6c7..082ae6ba492f 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -153,6 +153,14 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
153 153
154 dprintk(1, "%s()\n", __func__); 154 dprintk(1, "%s()\n", __func__);
155 155
156 /* there is a small window after disconnect, before
157 dev->usbdev is NULL, for poll (e.g: IR) try to access
158 the device and fill the dmesg with error messages.
159 Set the status so poll routines can check and avoid
160 access after disconnect.
161 */
162 dev->dev_state = DEV_DISCONNECTED;
163
156 au0828_rc_unregister(dev); 164 au0828_rc_unregister(dev);
157 /* Digital TV */ 165 /* Digital TV */
158 au0828_dvb_unregister(dev); 166 au0828_dvb_unregister(dev);
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 00ab1563d142..c267d76f5b3c 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -88,12 +88,14 @@ static struct xc5000_config hauppauge_xc5000a_config = {
88 .i2c_address = 0x61, 88 .i2c_address = 0x61,
89 .if_khz = 6000, 89 .if_khz = 6000,
90 .chip_id = XC5000A, 90 .chip_id = XC5000A,
91 .output_amp = 0x8f,
91}; 92};
92 93
93static struct xc5000_config hauppauge_xc5000c_config = { 94static struct xc5000_config hauppauge_xc5000c_config = {
94 .i2c_address = 0x61, 95 .i2c_address = 0x61,
95 .if_khz = 6000, 96 .if_khz = 6000,
96 .chip_id = XC5000C, 97 .chip_id = XC5000C,
98 .output_amp = 0x8f,
97}; 99};
98 100
99static struct mxl5007t_config mxl5007t_hvr950q_config = { 101static struct mxl5007t_config mxl5007t_hvr950q_config = {
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 63995f97dc65..b0f067971979 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -129,6 +129,10 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
129 int prv_bit, bit, width; 129 int prv_bit, bit, width;
130 bool first = true; 130 bool first = true;
131 131
132 /* do nothing if device is disconnected */
133 if (ir->dev->dev_state == DEV_DISCONNECTED)
134 return 0;
135
132 /* Check IR int */ 136 /* Check IR int */
133 rc = au8522_rc_read(ir, 0xe1, -1, buf, 1); 137 rc = au8522_rc_read(ir, 0xe1, -1, buf, 1);
134 if (rc < 0 || !(buf[0] & (1 << 4))) { 138 if (rc < 0 || !(buf[0] & (1 << 4))) {
@@ -255,8 +259,11 @@ static void au0828_rc_stop(struct rc_dev *rc)
255 259
256 cancel_delayed_work_sync(&ir->work); 260 cancel_delayed_work_sync(&ir->work);
257 261
258 /* Disable IR */ 262 /* do nothing if device is disconnected */
259 au8522_rc_clear(ir, 0xe0, 1 << 4); 263 if (ir->dev->dev_state != DEV_DISCONNECTED) {
264 /* Disable IR */
265 au8522_rc_clear(ir, 0xe0, 1 << 4);
266 }
260} 267}
261 268
262static int au0828_probe_i2c_ir(struct au0828_dev *dev) 269static int au0828_probe_i2c_ir(struct au0828_dev *dev)
@@ -363,8 +370,7 @@ void au0828_rc_unregister(struct au0828_dev *dev)
363 if (!ir) 370 if (!ir)
364 return; 371 return;
365 372
366 if (ir->rc) 373 rc_unregister_device(ir->rc);
367 rc_unregister_device(ir->rc);
368 374
369 /* done */ 375 /* done */
370 kfree(ir); 376 kfree(ir);
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 569aa298c03f..173c0e287a08 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_CX231XX
7 select VIDEOBUF_VMALLOC 7 select VIDEOBUF_VMALLOC
8 select VIDEO_CX25840 8 select VIDEO_CX25840
9 select VIDEO_CX2341X 9 select VIDEO_CX2341X
10 select I2C_MUX
10 11
11 ---help--- 12 ---help---
12 This is a video4linux driver for Conexant 231xx USB based TV cards. 13 This is a video4linux driver for Conexant 231xx USB based TV cards.
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 459bb0e98971..3f295b4d1a3d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -24,6 +24,8 @@
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 25 */
26 26
27#include "cx231xx.h"
28
27#include <linux/module.h> 29#include <linux/module.h>
28#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
29#include <linux/init.h> 31#include <linux/init.h>
@@ -37,9 +39,6 @@
37#include <media/v4l2-event.h> 39#include <media/v4l2-event.h>
38#include <media/cx2341x.h> 40#include <media/cx2341x.h>
39#include <media/tuner.h> 41#include <media/tuner.h>
40#include <linux/usb.h>
41
42#include "cx231xx.h"
43 42
44#define CX231xx_FIRM_IMAGE_SIZE 376836 43#define CX231xx_FIRM_IMAGE_SIZE 376836
45#define CX231xx_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw" 44#define CX231xx_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
@@ -90,10 +89,10 @@ static unsigned int v4l_debug = 1;
90module_param(v4l_debug, int, 0644); 89module_param(v4l_debug, int, 0644);
91MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages"); 90MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
92 91
93#define dprintk(level, fmt, arg...)\ 92#define dprintk(level, fmt, arg...) \
94 do { if (v4l_debug >= level) \ 93 do { \
95 pr_info("%s: " fmt, \ 94 if (v4l_debug >= level) \
96 (dev) ? dev->name : "cx231xx[?]", ## arg); \ 95 printk(KERN_DEBUG pr_fmt(fmt), ## arg); \
97 } while (0) 96 } while (0)
98 97
99static struct cx231xx_tvnorm cx231xx_tvnorms[] = { 98static struct cx231xx_tvnorm cx231xx_tvnorms[] = {
@@ -988,29 +987,34 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
988 IVTV_REG_APU, 0); 987 IVTV_REG_APU, 0);
989 988
990 if (retval != 0) { 989 if (retval != 0) {
991 pr_err("%s: Error with mc417_register_write\n", __func__); 990 dev_err(dev->dev,
991 "%s: Error with mc417_register_write\n", __func__);
992 return -1; 992 return -1;
993 } 993 }
994 994
995 retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME, 995 retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
996 &dev->udev->dev); 996 dev->dev);
997 997
998 if (retval != 0) { 998 if (retval != 0) {
999 pr_err("ERROR: Hotplug firmware request failed (%s).\n", 999 dev_err(dev->dev,
1000 "ERROR: Hotplug firmware request failed (%s).\n",
1000 CX231xx_FIRM_IMAGE_NAME); 1001 CX231xx_FIRM_IMAGE_NAME);
1001 pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n"); 1002 dev_err(dev->dev,
1003 "Please fix your hotplug setup, the board will not work without firmware loaded!\n");
1002 return -1; 1004 return -1;
1003 } 1005 }
1004 1006
1005 if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) { 1007 if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
1006 pr_err("ERROR: Firmware size mismatch (have %zd, expected %d)\n", 1008 dev_err(dev->dev,
1009 "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
1007 firmware->size, CX231xx_FIRM_IMAGE_SIZE); 1010 firmware->size, CX231xx_FIRM_IMAGE_SIZE);
1008 release_firmware(firmware); 1011 release_firmware(firmware);
1009 return -1; 1012 return -1;
1010 } 1013 }
1011 1014
1012 if (0 != memcmp(firmware->data, magic, 8)) { 1015 if (0 != memcmp(firmware->data, magic, 8)) {
1013 pr_err("ERROR: Firmware magic mismatch, wrong file?\n"); 1016 dev_err(dev->dev,
1017 "ERROR: Firmware magic mismatch, wrong file?\n");
1014 release_firmware(firmware); 1018 release_firmware(firmware);
1015 return -1; 1019 return -1;
1016 } 1020 }
@@ -1057,7 +1061,8 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
1057 retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS, 1061 retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS,
1058 IVTV_CMD_HW_BLOCKS_RST); 1062 IVTV_CMD_HW_BLOCKS_RST);
1059 if (retval < 0) { 1063 if (retval < 0) {
1060 pr_err("%s: Error with mc417_register_write\n", 1064 dev_err(dev->dev,
1065 "%s: Error with mc417_register_write\n",
1061 __func__); 1066 __func__);
1062 return retval; 1067 return retval;
1063 } 1068 }
@@ -1069,7 +1074,8 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
1069 retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8); 1074 retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
1070 1075
1071 if (retval < 0) { 1076 if (retval < 0) {
1072 pr_err("%s: Error with mc417_register_write\n", 1077 dev_err(dev->dev,
1078 "%s: Error with mc417_register_write\n",
1073 __func__); 1079 __func__);
1074 return retval; 1080 return retval;
1075 } 1081 }
@@ -1114,28 +1120,31 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
1114 cx231xx_disable656(dev); 1120 cx231xx_disable656(dev);
1115 retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ 1121 retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
1116 if (retval < 0) { 1122 if (retval < 0) {
1117 dprintk(2, "%s() PING OK\n", __func__); 1123 dprintk(2, "%s: PING OK\n", __func__);
1118 retval = cx231xx_load_firmware(dev); 1124 retval = cx231xx_load_firmware(dev);
1119 if (retval < 0) { 1125 if (retval < 0) {
1120 pr_err("%s() f/w load failed\n", __func__); 1126 dev_err(dev->dev,
1127 "%s: f/w load failed\n", __func__);
1121 return retval; 1128 return retval;
1122 } 1129 }
1123 retval = cx231xx_find_mailbox(dev); 1130 retval = cx231xx_find_mailbox(dev);
1124 if (retval < 0) { 1131 if (retval < 0) {
1125 pr_err("%s() mailbox < 0, error\n", 1132 dev_err(dev->dev, "%s: mailbox < 0, error\n",
1126 __func__); 1133 __func__);
1127 return -1; 1134 return -1;
1128 } 1135 }
1129 dev->cx23417_mailbox = retval; 1136 dev->cx23417_mailbox = retval;
1130 retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); 1137 retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
1131 if (retval < 0) { 1138 if (retval < 0) {
1132 pr_err("ERROR: cx23417 firmware ping failed!\n"); 1139 dev_err(dev->dev,
1140 "ERROR: cx23417 firmware ping failed!\n");
1133 return -1; 1141 return -1;
1134 } 1142 }
1135 retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, 1143 retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
1136 &version); 1144 &version);
1137 if (retval < 0) { 1145 if (retval < 0) {
1138 pr_err("ERROR: cx23417 firmware get encoder: version failed!\n"); 1146 dev_err(dev->dev,
1147 "ERROR: cx23417 firmware get encoder: version failed!\n");
1139 return -1; 1148 return -1;
1140 } 1149 }
1141 dprintk(1, "cx23417 firmware version is 0x%08x\n", version); 1150 dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
@@ -1416,8 +1425,9 @@ static int bb_buf_prepare(struct videobuf_queue *q,
1416 if (!dev->video_mode.bulk_ctl.num_bufs) 1425 if (!dev->video_mode.bulk_ctl.num_bufs)
1417 urb_init = 1; 1426 urb_init = 1;
1418 } 1427 }
1419 /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n", 1428 dev_dbg(dev->dev,
1420 urb_init, dev->video_mode.max_pkt_size);*/ 1429 "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
1430 urb_init, dev->video_mode.max_pkt_size);
1421 dev->mode_tv = 1; 1431 dev->mode_tv = 1;
1422 1432
1423 if (urb_init) { 1433 if (urb_init) {
@@ -1688,7 +1698,7 @@ static int mpeg_open(struct file *file)
1688 sizeof(struct cx231xx_buffer), fh, &dev->lock); 1698 sizeof(struct cx231xx_buffer), fh, &dev->lock);
1689/* 1699/*
1690 videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops, 1700 videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
1691 &dev->udev->dev, &dev->ts1.slock, 1701 dev->dev, &dev->ts1.slock,
1692 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1702 V4L2_BUF_TYPE_VIDEO_CAPTURE,
1693 V4L2_FIELD_INTERLACED, 1703 V4L2_FIELD_INTERLACED,
1694 sizeof(struct cx231xx_buffer), 1704 sizeof(struct cx231xx_buffer),
@@ -1798,7 +1808,6 @@ static unsigned int mpeg_poll(struct file *file,
1798static int mpeg_mmap(struct file *file, struct vm_area_struct *vma) 1808static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
1799{ 1809{
1800 struct cx231xx_fh *fh = file->private_data; 1810 struct cx231xx_fh *fh = file->private_data;
1801 struct cx231xx *dev = fh->dev;
1802 1811
1803 dprintk(2, "%s()\n", __func__); 1812 dprintk(2, "%s()\n", __func__);
1804 1813
@@ -1878,7 +1887,7 @@ static int cx231xx_s_video_encoding(struct cx2341x_handler *cxhdl, u32 val)
1878 /* fix videodecoder resolution */ 1887 /* fix videodecoder resolution */
1879 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1); 1888 fmt.width = cxhdl->width / (is_mpeg1 ? 2 : 1);
1880 fmt.height = cxhdl->height; 1889 fmt.height = cxhdl->height;
1881 fmt.code = V4L2_MBUS_FMT_FIXED; 1890 fmt.code = MEDIA_BUS_FMT_FIXED;
1882 v4l2_subdev_call(dev->sd_cx25840, video, s_mbus_fmt, &fmt); 1891 v4l2_subdev_call(dev->sd_cx25840, video, s_mbus_fmt, &fmt);
1883 return 0; 1892 return 0;
1884} 1893}
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9b925874d392..de4ae5eb4830 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -20,8 +20,8 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include "cx231xx.h"
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/usb.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/sound.h> 26#include <linux/sound.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
@@ -37,25 +37,18 @@
37#include <sound/initval.h> 37#include <sound/initval.h>
38#include <sound/control.h> 38#include <sound/control.h>
39#include <media/v4l2-common.h> 39#include <media/v4l2-common.h>
40#include "cx231xx.h"
41 40
42static int debug; 41static int debug;
43module_param(debug, int, 0644); 42module_param(debug, int, 0644);
44MODULE_PARM_DESC(debug, "activates debug info"); 43MODULE_PARM_DESC(debug, "activates debug info");
45 44
46#define dprintk(fmt, arg...) do { \
47 if (debug) \
48 printk(KERN_INFO "cx231xx-audio %s: " fmt, \
49 __func__, ##arg); \
50 } while (0)
51
52static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 45static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
53 46
54static int cx231xx_isoc_audio_deinit(struct cx231xx *dev) 47static int cx231xx_isoc_audio_deinit(struct cx231xx *dev)
55{ 48{
56 int i; 49 int i;
57 50
58 dprintk("Stopping isoc\n"); 51 dev_dbg(dev->dev, "Stopping isoc\n");
59 52
60 for (i = 0; i < CX231XX_AUDIO_BUFS; i++) { 53 for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
61 if (dev->adev.urb[i]) { 54 if (dev->adev.urb[i]) {
@@ -79,7 +72,7 @@ static int cx231xx_bulk_audio_deinit(struct cx231xx *dev)
79{ 72{
80 int i; 73 int i;
81 74
82 dprintk("Stopping bulk\n"); 75 dev_dbg(dev->dev, "Stopping bulk\n");
83 76
84 for (i = 0; i < CX231XX_AUDIO_BUFS; i++) { 77 for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
85 if (dev->adev.urb[i]) { 78 if (dev->adev.urb[i]) {
@@ -123,7 +116,8 @@ static void cx231xx_audio_isocirq(struct urb *urb)
123 case -ESHUTDOWN: 116 case -ESHUTDOWN:
124 return; 117 return;
125 default: /* error */ 118 default: /* error */
126 dprintk("urb completition error %d.\n", urb->status); 119 dev_dbg(dev->dev, "urb completition error %d.\n",
120 urb->status);
127 break; 121 break;
128 } 122 }
129 123
@@ -182,8 +176,9 @@ static void cx231xx_audio_isocirq(struct urb *urb)
182 176
183 status = usb_submit_urb(urb, GFP_ATOMIC); 177 status = usb_submit_urb(urb, GFP_ATOMIC);
184 if (status < 0) { 178 if (status < 0) {
185 cx231xx_errdev("resubmit of audio urb failed (error=%i)\n", 179 dev_err(dev->dev,
186 status); 180 "resubmit of audio urb failed (error=%i)\n",
181 status);
187 } 182 }
188 return; 183 return;
189} 184}
@@ -211,7 +206,8 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
211 case -ESHUTDOWN: 206 case -ESHUTDOWN:
212 return; 207 return;
213 default: /* error */ 208 default: /* error */
214 dprintk("urb completition error %d.\n", urb->status); 209 dev_dbg(dev->dev, "urb completition error %d.\n",
210 urb->status);
215 break; 211 break;
216 } 212 }
217 213
@@ -266,8 +262,9 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
266 262
267 status = usb_submit_urb(urb, GFP_ATOMIC); 263 status = usb_submit_urb(urb, GFP_ATOMIC);
268 if (status < 0) { 264 if (status < 0) {
269 cx231xx_errdev("resubmit of audio urb failed (error=%i)\n", 265 dev_err(dev->dev,
270 status); 266 "resubmit of audio urb failed (error=%i)\n",
267 status);
271 } 268 }
272 return; 269 return;
273} 270}
@@ -277,7 +274,8 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
277 int i, errCode; 274 int i, errCode;
278 int sb_size; 275 int sb_size;
279 276
280 cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__); 277 dev_dbg(dev->dev,
278 "%s: Starting ISO AUDIO transfers\n", __func__);
281 279
282 if (dev->state & DEV_DISCONNECTED) 280 if (dev->state & DEV_DISCONNECTED)
283 return -ENODEV; 281 return -ENODEV;
@@ -295,7 +293,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
295 memset(dev->adev.transfer_buffer[i], 0x80, sb_size); 293 memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
296 urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC); 294 urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
297 if (!urb) { 295 if (!urb) {
298 cx231xx_errdev("usb_alloc_urb failed!\n"); 296 dev_err(dev->dev, "usb_alloc_urb failed!\n");
299 for (j = 0; j < i; j++) { 297 for (j = 0; j < i; j++) {
300 usb_free_urb(dev->adev.urb[j]); 298 usb_free_urb(dev->adev.urb[j]);
301 kfree(dev->adev.transfer_buffer[j]); 299 kfree(dev->adev.transfer_buffer[j]);
@@ -338,7 +336,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
338 int i, errCode; 336 int i, errCode;
339 int sb_size; 337 int sb_size;
340 338
341 cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__); 339 dev_dbg(dev->dev,
340 "%s: Starting BULK AUDIO transfers\n", __func__);
342 341
343 if (dev->state & DEV_DISCONNECTED) 342 if (dev->state & DEV_DISCONNECTED)
344 return -ENODEV; 343 return -ENODEV;
@@ -356,7 +355,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
356 memset(dev->adev.transfer_buffer[i], 0x80, sb_size); 355 memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
357 urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC); 356 urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
358 if (!urb) { 357 if (!urb) {
359 cx231xx_errdev("usb_alloc_urb failed!\n"); 358 dev_err(dev->dev, "usb_alloc_urb failed!\n");
360 for (j = 0; j < i; j++) { 359 for (j = 0; j < i; j++) {
361 usb_free_urb(dev->adev.urb[j]); 360 usb_free_urb(dev->adev.urb[j]);
362 kfree(dev->adev.transfer_buffer[j]); 361 kfree(dev->adev.transfer_buffer[j]);
@@ -392,8 +391,9 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
392 size_t size) 391 size_t size)
393{ 392{
394 struct snd_pcm_runtime *runtime = subs->runtime; 393 struct snd_pcm_runtime *runtime = subs->runtime;
394 struct cx231xx *dev = snd_pcm_substream_chip(subs);
395 395
396 dprintk("Allocating vbuffer\n"); 396 dev_dbg(dev->dev, "Allocating vbuffer\n");
397 if (runtime->dma_area) { 397 if (runtime->dma_area) {
398 if (runtime->dma_bytes > size) 398 if (runtime->dma_bytes > size)
399 return 0; 399 return 0;
@@ -436,16 +436,12 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
436 struct snd_pcm_runtime *runtime = substream->runtime; 436 struct snd_pcm_runtime *runtime = substream->runtime;
437 int ret = 0; 437 int ret = 0;
438 438
439 dprintk("opening device and trying to acquire exclusive lock\n"); 439 dev_dbg(dev->dev,
440 440 "opening device and trying to acquire exclusive lock\n");
441 if (!dev) {
442 cx231xx_errdev("BUG: cx231xx can't find device struct."
443 " Can't proceed with open\n");
444 return -ENODEV;
445 }
446 441
447 if (dev->state & DEV_DISCONNECTED) { 442 if (dev->state & DEV_DISCONNECTED) {
448 cx231xx_errdev("Can't open. the device was removed.\n"); 443 dev_err(dev->dev,
444 "Can't open. the device was removed.\n");
449 return -ENODEV; 445 return -ENODEV;
450 } 446 }
451 447
@@ -458,7 +454,8 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
458 ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0); 454 ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
459 mutex_unlock(&dev->lock); 455 mutex_unlock(&dev->lock);
460 if (ret < 0) { 456 if (ret < 0) {
461 cx231xx_errdev("failed to set alternate setting !\n"); 457 dev_err(dev->dev,
458 "failed to set alternate setting !\n");
462 459
463 return ret; 460 return ret;
464 } 461 }
@@ -484,7 +481,7 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
484 int ret; 481 int ret;
485 struct cx231xx *dev = snd_pcm_substream_chip(substream); 482 struct cx231xx *dev = snd_pcm_substream_chip(substream);
486 483
487 dprintk("closing device\n"); 484 dev_dbg(dev->dev, "closing device\n");
488 485
489 /* inform hardware to stop streaming */ 486 /* inform hardware to stop streaming */
490 mutex_lock(&dev->lock); 487 mutex_lock(&dev->lock);
@@ -494,7 +491,8 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
494 /* 1 - 48000 samples per sec */ 491 /* 1 - 48000 samples per sec */
495 ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0); 492 ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
496 if (ret < 0) { 493 if (ret < 0) {
497 cx231xx_errdev("failed to set alternate setting !\n"); 494 dev_err(dev->dev,
495 "failed to set alternate setting !\n");
498 496
499 mutex_unlock(&dev->lock); 497 mutex_unlock(&dev->lock);
500 return ret; 498 return ret;
@@ -504,10 +502,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
504 mutex_unlock(&dev->lock); 502 mutex_unlock(&dev->lock);
505 503
506 if (dev->adev.users == 0 && dev->adev.shutdown == 1) { 504 if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
507 dprintk("audio users: %d\n", dev->adev.users); 505 dev_dbg(dev->dev, "audio users: %d\n", dev->adev.users);
508 dprintk("disabling audio stream!\n"); 506 dev_dbg(dev->dev, "disabling audio stream!\n");
509 dev->adev.shutdown = 0; 507 dev->adev.shutdown = 0;
510 dprintk("released lock\n"); 508 dev_dbg(dev->dev, "released lock\n");
511 if (atomic_read(&dev->stream_started) > 0) { 509 if (atomic_read(&dev->stream_started) > 0) {
512 atomic_set(&dev->stream_started, 0); 510 atomic_set(&dev->stream_started, 0);
513 schedule_work(&dev->wq_trigger); 511 schedule_work(&dev->wq_trigger);
@@ -519,9 +517,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
519static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream, 517static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
520 struct snd_pcm_hw_params *hw_params) 518 struct snd_pcm_hw_params *hw_params)
521{ 519{
520 struct cx231xx *dev = snd_pcm_substream_chip(substream);
522 int ret; 521 int ret;
523 522
524 dprintk("Setting capture parameters\n"); 523 dev_dbg(dev->dev, "Setting capture parameters\n");
525 524
526 ret = snd_pcm_alloc_vmalloc_buffer(substream, 525 ret = snd_pcm_alloc_vmalloc_buffer(substream,
527 params_buffer_bytes(hw_params)); 526 params_buffer_bytes(hw_params));
@@ -543,7 +542,7 @@ static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
543{ 542{
544 struct cx231xx *dev = snd_pcm_substream_chip(substream); 543 struct cx231xx *dev = snd_pcm_substream_chip(substream);
545 544
546 dprintk("Stop capture, if needed\n"); 545 dev_dbg(dev->dev, "Stop capture, if needed\n");
547 546
548 if (atomic_read(&dev->stream_started) > 0) { 547 if (atomic_read(&dev->stream_started) > 0) {
549 atomic_set(&dev->stream_started, 0); 548 atomic_set(&dev->stream_started, 0);
@@ -568,7 +567,7 @@ static void audio_trigger(struct work_struct *work)
568 struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger); 567 struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
569 568
570 if (atomic_read(&dev->stream_started)) { 569 if (atomic_read(&dev->stream_started)) {
571 dprintk("starting capture"); 570 dev_dbg(dev->dev, "starting capture");
572 if (is_fw_load(dev) == 0) 571 if (is_fw_load(dev) == 0)
573 cx25840_call(dev, core, load_fw); 572 cx25840_call(dev, core, load_fw);
574 if (dev->USE_ISO) 573 if (dev->USE_ISO)
@@ -576,7 +575,7 @@ static void audio_trigger(struct work_struct *work)
576 else 575 else
577 cx231xx_init_audio_bulk(dev); 576 cx231xx_init_audio_bulk(dev);
578 } else { 577 } else {
579 dprintk("stopping capture"); 578 dev_dbg(dev->dev, "stopping capture");
580 cx231xx_isoc_audio_deinit(dev); 579 cx231xx_isoc_audio_deinit(dev);
581 } 580 }
582} 581}
@@ -662,10 +661,10 @@ static int cx231xx_audio_init(struct cx231xx *dev)
662 return 0; 661 return 0;
663 } 662 }
664 663
665 cx231xx_info("cx231xx-audio.c: probing for cx231xx " 664 dev_dbg(dev->dev,
666 "non standard usbaudio\n"); 665 "probing for cx231xx non standard usbaudio\n");
667 666
668 err = snd_card_new(&dev->udev->dev, index[devnr], "Cx231xx Audio", 667 err = snd_card_new(dev->dev, index[devnr], "Cx231xx Audio",
669 THIS_MODULE, 0, &card); 668 THIS_MODULE, 0, &card);
670 if (err < 0) 669 if (err < 0)
671 return err; 670 return err;
@@ -707,14 +706,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
707 bEndpointAddress; 706 bEndpointAddress;
708 707
709 adev->num_alt = uif->num_altsetting; 708 adev->num_alt = uif->num_altsetting;
710 cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n", 709 dev_info(dev->dev,
711 adev->end_point_addr, adev->num_alt); 710 "audio EndPoint Addr 0x%x, Alternate settings: %i\n",
711 adev->end_point_addr, adev->num_alt);
712 adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL); 712 adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
713 713
714 if (adev->alt_max_pkt_size == NULL) { 714 if (adev->alt_max_pkt_size == NULL)
715 cx231xx_errdev("out of memory!\n");
716 return -ENOMEM; 715 return -ENOMEM;
717 }
718 716
719 for (i = 0; i < adev->num_alt; i++) { 717 for (i = 0; i < adev->num_alt; i++) {
720 u16 tmp = 718 u16 tmp =
@@ -722,8 +720,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
722 wMaxPacketSize); 720 wMaxPacketSize);
723 adev->alt_max_pkt_size[i] = 721 adev->alt_max_pkt_size[i] =
724 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 722 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
725 cx231xx_info("Alternate setting %i, max size= %i\n", i, 723 dev_dbg(dev->dev,
726 adev->alt_max_pkt_size[i]); 724 "audio alternate setting %i, max size= %i\n", i,
725 adev->alt_max_pkt_size[i]);
727 } 726 }
728 727
729 return 0; 728 return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 40a69879fc0a..39e887925e3d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -22,12 +22,12 @@
22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#include "cx231xx.h"
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/list.h> 27#include <linux/list.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/bitmap.h> 30#include <linux/bitmap.h>
30#include <linux/usb.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
@@ -36,7 +36,6 @@
36#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
37#include <media/v4l2-ioctl.h> 37#include <media/v4l2-ioctl.h>
38 38
39#include "cx231xx.h"
40#include "cx231xx-dif.h" 39#include "cx231xx-dif.h"
41 40
42#define TUNER_MODE_FM_RADIO 0 41#define TUNER_MODE_FM_RADIO 0
@@ -83,10 +82,10 @@ void initGPIO(struct cx231xx *dev)
83 cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0); 82 cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0);
84 83
85 verve_read_byte(dev, 0x07, &val); 84 verve_read_byte(dev, 0x07, &val);
86 cx231xx_info(" verve_read_byte address0x07=0x%x\n", val); 85 dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val);
87 verve_write_byte(dev, 0x07, 0xF4); 86 verve_write_byte(dev, 0x07, 0xF4);
88 verve_read_byte(dev, 0x07, &val); 87 verve_read_byte(dev, 0x07, &val);
89 cx231xx_info(" verve_read_byte address0x07=0x%x\n", val); 88 dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val);
90 89
91 cx231xx_capture_start(dev, 1, Vbi); 90 cx231xx_capture_start(dev, 1, Vbi);
92 91
@@ -156,22 +155,25 @@ int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count)
156 while (afe_power_status != 0x18) { 155 while (afe_power_status != 0x18) {
157 status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18); 156 status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18);
158 if (status < 0) { 157 if (status < 0) {
159 cx231xx_info( 158 dev_dbg(dev->dev,
160 ": Init Super Block failed in send cmd\n"); 159 "%s: Init Super Block failed in send cmd\n",
160 __func__);
161 break; 161 break;
162 } 162 }
163 163
164 status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); 164 status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status);
165 afe_power_status &= 0xff; 165 afe_power_status &= 0xff;
166 if (status < 0) { 166 if (status < 0) {
167 cx231xx_info( 167 dev_dbg(dev->dev,
168 ": Init Super Block failed in receive cmd\n"); 168 "%s: Init Super Block failed in receive cmd\n",
169 __func__);
169 break; 170 break;
170 } 171 }
171 i++; 172 i++;
172 if (i == 10) { 173 if (i == 10) {
173 cx231xx_info( 174 dev_dbg(dev->dev,
174 ": Init Super Block force break in loop !!!!\n"); 175 "%s: Init Super Block force break in loop !!!!\n",
176 __func__);
175 status = -1; 177 status = -1;
176 break; 178 break;
177 } 179 }
@@ -410,7 +412,7 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
410 status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 412 status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3,
411 0x00); 413 0x00);
412 } else { 414 } else {
413 cx231xx_info("Invalid AV mode input\n"); 415 dev_dbg(dev->dev, "Invalid AV mode input\n");
414 status = -1; 416 status = -1;
415 } 417 }
416 break; 418 break;
@@ -467,7 +469,7 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
467 status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 469 status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3,
468 0x40); 470 0x40);
469 } else { 471 } else {
470 cx231xx_info("Invalid AV mode input\n"); 472 dev_dbg(dev->dev, "Invalid AV mode input\n");
471 status = -1; 473 status = -1;
472 } 474 }
473 } /* switch */ 475 } /* switch */
@@ -573,9 +575,9 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
573 status = cx231xx_set_power_mode(dev, 575 status = cx231xx_set_power_mode(dev,
574 POLARIS_AVMODE_ENXTERNAL_AV); 576 POLARIS_AVMODE_ENXTERNAL_AV);
575 if (status < 0) { 577 if (status < 0) {
576 cx231xx_errdev("%s: set_power_mode : Failed to" 578 dev_err(dev->dev,
577 " set Power - errCode [%d]!\n", 579 "%s: Failed to set Power - errCode [%d]!\n",
578 __func__, status); 580 __func__, status);
579 return status; 581 return status;
580 } 582 }
581 } 583 }
@@ -591,8 +593,8 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
591 status = cx231xx_set_power_mode(dev, 593 status = cx231xx_set_power_mode(dev,
592 POLARIS_AVMODE_ANALOGT_TV); 594 POLARIS_AVMODE_ANALOGT_TV);
593 if (status < 0) { 595 if (status < 0) {
594 cx231xx_errdev("%s: set_power_mode:Failed" 596 dev_err(dev->dev,
595 " to set Power - errCode [%d]!\n", 597 "%s: Failed to set Power - errCode [%d]!\n",
596 __func__, status); 598 __func__, status);
597 return status; 599 return status;
598 } 600 }
@@ -608,8 +610,8 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
608 610
609 break; 611 break;
610 default: 612 default:
611 cx231xx_errdev("%s: set_power_mode : Unknown Input %d !\n", 613 dev_err(dev->dev, "%s: Unknown Input %d !\n",
612 __func__, INPUT(input)->type); 614 __func__, INPUT(input)->type);
613 break; 615 break;
614 } 616 }
615 617
@@ -628,8 +630,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
628 if (pin_type != dev->video_input) { 630 if (pin_type != dev->video_input) {
629 status = cx231xx_afe_adjust_ref_count(dev, pin_type); 631 status = cx231xx_afe_adjust_ref_count(dev, pin_type);
630 if (status < 0) { 632 if (status < 0) {
631 cx231xx_errdev("%s: adjust_ref_count :Failed to set" 633 dev_err(dev->dev,
632 "AFE input mux - errCode [%d]!\n", 634 "%s: adjust_ref_count :Failed to set AFE input mux - errCode [%d]!\n",
633 __func__, status); 635 __func__, status);
634 return status; 636 return status;
635 } 637 }
@@ -638,9 +640,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
638 /* call afe block to set video inputs */ 640 /* call afe block to set video inputs */
639 status = cx231xx_afe_set_input_mux(dev, input); 641 status = cx231xx_afe_set_input_mux(dev, input);
640 if (status < 0) { 642 if (status < 0) {
641 cx231xx_errdev("%s: set_input_mux :Failed to set" 643 dev_err(dev->dev,
642 " AFE input mux - errCode [%d]!\n", 644 "%s: set_input_mux :Failed to set AFE input mux - errCode [%d]!\n",
643 __func__, status); 645 __func__, status);
644 return status; 646 return status;
645 } 647 }
646 648
@@ -670,8 +672,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
670 /* Tell DIF object to go to baseband mode */ 672 /* Tell DIF object to go to baseband mode */
671 status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); 673 status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
672 if (status < 0) { 674 if (status < 0) {
673 cx231xx_errdev("%s: cx231xx_dif set to By pass" 675 dev_err(dev->dev,
674 " mode- errCode [%d]!\n", 676 "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
675 __func__, status); 677 __func__, status);
676 return status; 678 return status;
677 } 679 }
@@ -715,8 +717,8 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
715 /* Tell DIF object to go to baseband mode */ 717 /* Tell DIF object to go to baseband mode */
716 status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); 718 status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
717 if (status < 0) { 719 if (status < 0) {
718 cx231xx_errdev("%s: cx231xx_dif set to By pass" 720 dev_err(dev->dev,
719 " mode- errCode [%d]!\n", 721 "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
720 __func__, status); 722 __func__, status);
721 return status; 723 return status;
722 } 724 }
@@ -790,9 +792,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
790 status = cx231xx_dif_set_standard(dev, 792 status = cx231xx_dif_set_standard(dev,
791 DIF_USE_BASEBAND); 793 DIF_USE_BASEBAND);
792 if (status < 0) { 794 if (status < 0) {
793 cx231xx_errdev("%s: cx231xx_dif set to By pass" 795 dev_err(dev->dev,
794 " mode- errCode [%d]!\n", 796 "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
795 __func__, status); 797 __func__, status);
796 return status; 798 return status;
797 } 799 }
798 800
@@ -826,9 +828,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
826 /* Reinitialize the DIF */ 828 /* Reinitialize the DIF */
827 status = cx231xx_dif_set_standard(dev, dev->norm); 829 status = cx231xx_dif_set_standard(dev, dev->norm);
828 if (status < 0) { 830 if (status < 0) {
829 cx231xx_errdev("%s: cx231xx_dif set to By pass" 831 dev_err(dev->dev,
830 " mode- errCode [%d]!\n", 832 "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n",
831 __func__, status); 833 __func__, status);
832 return status; 834 return status;
833 } 835 }
834 836
@@ -970,14 +972,14 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
970{ 972{
971 int status = 0; 973 int status = 0;
972 974
973 cx231xx_info("do_mode_ctrl_overrides : 0x%x\n", 975 dev_dbg(dev->dev, "%s: 0x%x\n",
974 (unsigned int)dev->norm); 976 __func__, (unsigned int)dev->norm);
975 977
976 /* Change the DFE_CTRL3 bp_percent to fix flagging */ 978 /* Change the DFE_CTRL3 bp_percent to fix flagging */
977 status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280); 979 status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280);
978 980
979 if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) { 981 if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) {
980 cx231xx_info("do_mode_ctrl_overrides NTSC\n"); 982 dev_dbg(dev->dev, "%s: NTSC\n", __func__);
981 983
982 /* Move the close caption lines out of active video, 984 /* Move the close caption lines out of active video,
983 adjust the active video start point */ 985 adjust the active video start point */
@@ -1004,7 +1006,7 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
1004 (FLD_HBLANK_CNT, 0x79)); 1006 (FLD_HBLANK_CNT, 0x79));
1005 1007
1006 } else if (dev->norm & V4L2_STD_SECAM) { 1008 } else if (dev->norm & V4L2_STD_SECAM) {
1007 cx231xx_info("do_mode_ctrl_overrides SECAM\n"); 1009 dev_dbg(dev->dev, "%s: SECAM\n", __func__);
1008 status = cx231xx_read_modify_write_i2c_dword(dev, 1010 status = cx231xx_read_modify_write_i2c_dword(dev,
1009 VID_BLK_I2C_ADDRESS, 1011 VID_BLK_I2C_ADDRESS,
1010 VERT_TIM_CTRL, 1012 VERT_TIM_CTRL,
@@ -1031,7 +1033,7 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
1031 cx231xx_set_field 1033 cx231xx_set_field
1032 (FLD_HBLANK_CNT, 0x85)); 1034 (FLD_HBLANK_CNT, 0x85));
1033 } else { 1035 } else {
1034 cx231xx_info("do_mode_ctrl_overrides PAL\n"); 1036 dev_dbg(dev->dev, "%s: PAL\n", __func__);
1035 status = cx231xx_read_modify_write_i2c_dword(dev, 1037 status = cx231xx_read_modify_write_i2c_dword(dev,
1036 VID_BLK_I2C_ADDRESS, 1038 VID_BLK_I2C_ADDRESS,
1037 VERT_TIM_CTRL, 1039 VERT_TIM_CTRL,
@@ -1206,7 +1208,8 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
1206 /* This is just a casual suggestion to people adding 1208 /* This is just a casual suggestion to people adding
1207 new boards in case they use a tuner type we don't 1209 new boards in case they use a tuner type we don't
1208 currently know about */ 1210 currently know about */
1209 printk(KERN_INFO "Unknown tuner type configuring SIF"); 1211 dev_info(dev->dev,
1212 "Unknown tuner type configuring SIF");
1210 break; 1213 break;
1211 } 1214 }
1212 break; 1215 break;
@@ -1270,8 +1273,13 @@ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
1270 int status = 0; 1273 int status = 0;
1271 bool current_is_port_3; 1274 bool current_is_port_3;
1272 1275
1273 if (dev->board.dont_use_port_3) 1276 /*
1274 is_port_3 = false; 1277 * Should this code check dev->port_3_switch_enabled first
1278 * to skip unnecessary reading of the register?
1279 * If yes, the flag dev->port_3_switch_enabled must be initialized
1280 * correctly.
1281 */
1282
1275 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, 1283 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
1276 PWR_CTL_EN, value, 4); 1284 PWR_CTL_EN, value, 4);
1277 if (status < 0) 1285 if (status < 0)
@@ -1288,12 +1296,13 @@ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
1288 else 1296 else
1289 value[0] &= ~I2C_DEMOD_EN; 1297 value[0] &= ~I2C_DEMOD_EN;
1290 1298
1291 cx231xx_info("Changing the i2c master port to %d\n",
1292 is_port_3 ? 3 : 1);
1293
1294 status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 1299 status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
1295 PWR_CTL_EN, value, 4); 1300 PWR_CTL_EN, value, 4);
1296 1301
1302 /* remember status of the switch for usage in is_tuner */
1303 if (status >= 0)
1304 dev->port_3_switch_enabled = is_port_3;
1305
1297 return status; 1306 return status;
1298 1307
1299} 1308}
@@ -1325,113 +1334,131 @@ void cx231xx_dump_HH_reg(struct cx231xx *dev)
1325 1334
1326 for (i = 0x100; i < 0x140; i++) { 1335 for (i = 0x100; i < 0x140; i++) {
1327 vid_blk_read_word(dev, i, &value); 1336 vid_blk_read_word(dev, i, &value);
1328 cx231xx_info("reg0x%x=0x%x\n", i, value); 1337 dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
1329 i = i+3; 1338 i = i+3;
1330 } 1339 }
1331 1340
1332 for (i = 0x300; i < 0x400; i++) { 1341 for (i = 0x300; i < 0x400; i++) {
1333 vid_blk_read_word(dev, i, &value); 1342 vid_blk_read_word(dev, i, &value);
1334 cx231xx_info("reg0x%x=0x%x\n", i, value); 1343 dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
1335 i = i+3; 1344 i = i+3;
1336 } 1345 }
1337 1346
1338 for (i = 0x400; i < 0x440; i++) { 1347 for (i = 0x400; i < 0x440; i++) {
1339 vid_blk_read_word(dev, i, &value); 1348 vid_blk_read_word(dev, i, &value);
1340 cx231xx_info("reg0x%x=0x%x\n", i, value); 1349 dev_dbg(dev->dev, "reg0x%x=0x%x\n", i, value);
1341 i = i+3; 1350 i = i+3;
1342 } 1351 }
1343 1352
1344 vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); 1353 vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
1345 cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value); 1354 dev_dbg(dev->dev, "AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
1346 vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390); 1355 vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
1347 vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); 1356 vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
1348 cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value); 1357 dev_dbg(dev->dev, "AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
1349} 1358}
1350 1359
1351void cx231xx_dump_SC_reg(struct cx231xx *dev) 1360#if 0
1361static void cx231xx_dump_SC_reg(struct cx231xx *dev)
1352{ 1362{
1353 u8 value[4] = { 0, 0, 0, 0 }; 1363 u8 value[4] = { 0, 0, 0, 0 };
1354 cx231xx_info("cx231xx_dump_SC_reg!\n"); 1364 dev_dbg(dev->dev, "%s!\n", __func__);
1355 1365
1356 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, 1366 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
1357 value, 4); 1367 value, 4);
1358 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0], 1368 dev_dbg(dev->dev,
1359 value[1], value[2], value[3]); 1369 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
1370 value[1], value[2], value[3]);
1360 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG, 1371 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
1361 value, 4); 1372 value, 4);
1362 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0], 1373 dev_dbg(dev->dev,
1363 value[1], value[2], value[3]); 1374 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
1375 value[1], value[2], value[3]);
1364 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG, 1376 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
1365 value, 4); 1377 value, 4);
1366 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0], 1378 dev_dbg(dev->dev,
1367 value[1], value[2], value[3]); 1379 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
1380 value[1], value[2], value[3]);
1368 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG, 1381 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
1369 value, 4); 1382 value, 4);
1370 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0], 1383 dev_dbg(dev->dev,
1371 value[1], value[2], value[3]); 1384 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
1385 value[1], value[2], value[3]);
1372 1386
1373 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG, 1387 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
1374 value, 4); 1388 value, 4);
1375 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0], 1389 dev_dbg(dev->dev,
1376 value[1], value[2], value[3]); 1390 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
1391 value[1], value[2], value[3]);
1377 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG, 1392 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
1378 value, 4); 1393 value, 4);
1379 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0], 1394 dev_dbg(dev->dev,
1380 value[1], value[2], value[3]); 1395 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
1396 value[1], value[2], value[3]);
1381 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, 1397 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
1382 value, 4); 1398 value, 4);
1383 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0], 1399 dev_dbg(dev->dev,
1384 value[1], value[2], value[3]); 1400 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
1401 value[1], value[2], value[3]);
1385 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1, 1402 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
1386 value, 4); 1403 value, 4);
1387 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0], 1404 dev_dbg(dev->dev,
1388 value[1], value[2], value[3]); 1405 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
1406 value[1], value[2], value[3]);
1389 1407
1390 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2, 1408 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
1391 value, 4); 1409 value, 4);
1392 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0], 1410 dev_dbg(dev->dev,
1393 value[1], value[2], value[3]); 1411 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
1412 value[1], value[2], value[3]);
1394 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3, 1413 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
1395 value, 4); 1414 value, 4);
1396 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0], 1415 dev_dbg(dev->dev,
1397 value[1], value[2], value[3]); 1416 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
1417 value[1], value[2], value[3]);
1398 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0, 1418 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
1399 value, 4); 1419 value, 4);
1400 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0], 1420 dev_dbg(dev->dev,
1401 value[1], value[2], value[3]); 1421 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
1422 value[1], value[2], value[3]);
1402 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1, 1423 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
1403 value, 4); 1424 value, 4);
1404 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0], 1425 dev_dbg(dev->dev,
1405 value[1], value[2], value[3]); 1426 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
1427 value[1], value[2], value[3]);
1406 1428
1407 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2, 1429 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
1408 value, 4); 1430 value, 4);
1409 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0], 1431 dev_dbg(dev->dev,
1410 value[1], value[2], value[3]); 1432 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
1433 value[1], value[2], value[3]);
1411 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN, 1434 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
1412 value, 4); 1435 value, 4);
1413 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0], 1436 dev_dbg(dev->dev,
1414 value[1], value[2], value[3]); 1437 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
1438 value[1], value[2], value[3]);
1415 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG, 1439 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
1416 value, 4); 1440 value, 4);
1417 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0], 1441 dev_dbg(dev->dev,
1418 value[1], value[2], value[3]); 1442 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
1443 value[1], value[2], value[3]);
1419 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1, 1444 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
1420 value, 4); 1445 value, 4);
1421 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0], 1446 dev_dbg(dev->dev,
1422 value[1], value[2], value[3]); 1447 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
1448 value[1], value[2], value[3]);
1423 1449
1424 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2, 1450 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
1425 value, 4); 1451 value, 4);
1426 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0], 1452 dev_dbg(dev->dev,
1427 value[1], value[2], value[3]); 1453 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
1454 value[1], value[2], value[3]);
1428 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, 1455 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
1429 value, 4); 1456 value, 4);
1430 cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], 1457 dev_dbg(dev->dev,
1431 value[1], value[2], value[3]); 1458 "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
1432 1459 value[1], value[2], value[3]);
1433
1434} 1460}
1461#endif
1435 1462
1436void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev) 1463void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
1437 1464
@@ -1497,7 +1524,7 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
1497 u32 standard = 0; 1524 u32 standard = 0;
1498 u8 value[4] = { 0, 0, 0, 0 }; 1525 u8 value[4] = { 0, 0, 0, 0 };
1499 1526
1500 cx231xx_info("Enter cx231xx_set_Colibri_For_LowIF()\n"); 1527 dev_dbg(dev->dev, "Enter cx231xx_set_Colibri_For_LowIF()\n");
1501 value[0] = (u8) 0x6F; 1528 value[0] = (u8) 0x6F;
1502 value[1] = (u8) 0x6F; 1529 value[1] = (u8) 0x6F;
1503 value[2] = (u8) 0x6F; 1530 value[2] = (u8) 0x6F;
@@ -1517,7 +1544,7 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
1517 colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode, 1544 colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode,
1518 standard); 1545 standard);
1519 1546
1520 cx231xx_info("colibri_carrier_offset=%d, standard=0x%x\n", 1547 dev_dbg(dev->dev, "colibri_carrier_offset=%d, standard=0x%x\n",
1521 colibri_carrier_offset, standard); 1548 colibri_carrier_offset, standard);
1522 1549
1523 /* Set the band Pass filter for DIF*/ 1550 /* Set the band Pass filter for DIF*/
@@ -1551,8 +1578,8 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
1551 u64 pll_freq_u64 = 0; 1578 u64 pll_freq_u64 = 0;
1552 u32 i = 0; 1579 u32 i = 0;
1553 1580
1554 cx231xx_info("if_freq=%d;spectral_invert=0x%x;mode=0x%x\n", 1581 dev_dbg(dev->dev, "if_freq=%d;spectral_invert=0x%x;mode=0x%x\n",
1555 if_freq, spectral_invert, mode); 1582 if_freq, spectral_invert, mode);
1556 1583
1557 1584
1558 if (mode == TUNER_MODE_FM_RADIO) { 1585 if (mode == TUNER_MODE_FM_RADIO) {
@@ -1595,8 +1622,7 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
1595 if_freq = 16000000; 1622 if_freq = 16000000;
1596 } 1623 }
1597 1624
1598 cx231xx_info("Enter IF=%zu\n", 1625 dev_dbg(dev->dev, "Enter IF=%zu\n", ARRAY_SIZE(Dif_set_array));
1599 ARRAY_SIZE(Dif_set_array));
1600 for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) { 1626 for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) {
1601 if (Dif_set_array[i].if_freq == if_freq) { 1627 if (Dif_set_array[i].if_freq == if_freq) {
1602 vid_blk_write_word(dev, 1628 vid_blk_write_word(dev,
@@ -1708,7 +1734,7 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
1708 u32 dif_misc_ctrl_value = 0; 1734 u32 dif_misc_ctrl_value = 0;
1709 u32 func_mode = 0; 1735 u32 func_mode = 0;
1710 1736
1711 cx231xx_info("%s: setStandard to %x\n", __func__, standard); 1737 dev_dbg(dev->dev, "%s: setStandard to %x\n", __func__, standard);
1712 1738
1713 status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); 1739 status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value);
1714 if (standard != DIF_USE_BASEBAND) 1740 if (standard != DIF_USE_BASEBAND)
@@ -2111,8 +2137,8 @@ int cx231xx_tuner_post_channel_change(struct cx231xx *dev)
2111{ 2137{
2112 int status = 0; 2138 int status = 0;
2113 u32 dwval; 2139 u32 dwval;
2114 cx231xx_info("cx231xx_tuner_post_channel_change dev->tuner_type =0%d\n", 2140 dev_dbg(dev->dev, "%s: dev->tuner_type =0%d\n",
2115 dev->tuner_type); 2141 __func__, dev->tuner_type);
2116 /* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for 2142 /* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for
2117 * SECAM L/B/D standards */ 2143 * SECAM L/B/D standards */
2118 status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); 2144 status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval);
@@ -2213,8 +2239,8 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
2213 if (dev->power_mode != mode) 2239 if (dev->power_mode != mode)
2214 dev->power_mode = mode; 2240 dev->power_mode = mode;
2215 else { 2241 else {
2216 cx231xx_info(" setPowerMode::mode = %d, No Change req.\n", 2242 dev_dbg(dev->dev, "%s: mode = %d, No Change req.\n",
2217 mode); 2243 __func__, mode);
2218 return 0; 2244 return 0;
2219 } 2245 }
2220 2246
@@ -2264,7 +2290,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
2264 case POLARIS_AVMODE_ANALOGT_TV: 2290 case POLARIS_AVMODE_ANALOGT_TV:
2265 2291
2266 tmp |= PWR_DEMOD_EN; 2292 tmp |= PWR_DEMOD_EN;
2267 tmp |= (I2C_DEMOD_EN);
2268 value[0] = (u8) tmp; 2293 value[0] = (u8) tmp;
2269 value[1] = (u8) (tmp >> 8); 2294 value[1] = (u8) (tmp >> 8);
2270 value[2] = (u8) (tmp >> 16); 2295 value[2] = (u8) (tmp >> 16);
@@ -2317,9 +2342,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
2317 } 2342 }
2318 2343
2319 if (dev->board.tuner_type != TUNER_ABSENT) { 2344 if (dev->board.tuner_type != TUNER_ABSENT) {
2320 /* Enable tuner */
2321 cx231xx_enable_i2c_port_3(dev, true);
2322
2323 /* reset the Tuner */ 2345 /* reset the Tuner */
2324 if (dev->board.tuner_gpio) 2346 if (dev->board.tuner_gpio)
2325 cx231xx_gpio_set(dev, dev->board.tuner_gpio); 2347 cx231xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -2363,7 +2385,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
2363 } 2385 }
2364 2386
2365 tmp &= (~PWR_AV_MODE); 2387 tmp &= (~PWR_AV_MODE);
2366 tmp |= POLARIS_AVMODE_DIGITAL | I2C_DEMOD_EN; 2388 tmp |= POLARIS_AVMODE_DIGITAL;
2367 value[0] = (u8) tmp; 2389 value[0] = (u8) tmp;
2368 value[1] = (u8) (tmp >> 8); 2390 value[1] = (u8) (tmp >> 8);
2369 value[2] = (u8) (tmp >> 16); 2391 value[2] = (u8) (tmp >> 16);
@@ -2384,15 +2406,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
2384 } 2406 }
2385 2407
2386 if (dev->board.tuner_type != TUNER_ABSENT) { 2408 if (dev->board.tuner_type != TUNER_ABSENT) {
2387 /*
2388 * Enable tuner
2389 * Hauppauge Exeter seems to need to do something different!
2390 */
2391 if (dev->model == CX231XX_BOARD_HAUPPAUGE_EXETER)
2392 cx231xx_enable_i2c_port_3(dev, false);
2393 else
2394 cx231xx_enable_i2c_port_3(dev, true);
2395
2396 /* reset the Tuner */ 2409 /* reset the Tuner */
2397 if (dev->board.tuner_gpio) 2410 if (dev->board.tuner_gpio)
2398 cx231xx_gpio_set(dev, dev->board.tuner_gpio); 2411 cx231xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -2466,7 +2479,7 @@ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask)
2466 u32 tmp = 0; 2479 u32 tmp = 0;
2467 int status = 0; 2480 int status = 0;
2468 2481
2469 cx231xx_info("cx231xx_start_stream():: ep_mask = %x\n", ep_mask); 2482 dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask);
2470 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, 2483 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
2471 value, 4); 2484 value, 4);
2472 if (status < 0) 2485 if (status < 0)
@@ -2491,7 +2504,7 @@ int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask)
2491 u32 tmp = 0; 2504 u32 tmp = 0;
2492 int status = 0; 2505 int status = 0;
2493 2506
2494 cx231xx_info("cx231xx_stop_stream():: ep_mask = %x\n", ep_mask); 2507 dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask);
2495 status = 2508 status =
2496 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); 2509 cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4);
2497 if (status < 0) 2510 if (status < 0)
@@ -2519,61 +2532,72 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
2519 if (dev->udev->speed == USB_SPEED_HIGH) { 2532 if (dev->udev->speed == USB_SPEED_HIGH) {
2520 switch (media_type) { 2533 switch (media_type) {
2521 case Audio: 2534 case Audio:
2522 cx231xx_info("%s: Audio enter HANC\n", __func__); 2535 dev_dbg(dev->dev,
2536 "%s: Audio enter HANC\n", __func__);
2523 status = 2537 status =
2524 cx231xx_mode_register(dev, TS_MODE_REG, 0x9300); 2538 cx231xx_mode_register(dev, TS_MODE_REG, 0x9300);
2525 break; 2539 break;
2526 2540
2527 case Vbi: 2541 case Vbi:
2528 cx231xx_info("%s: set vanc registers\n", __func__); 2542 dev_dbg(dev->dev,
2543 "%s: set vanc registers\n", __func__);
2529 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300); 2544 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300);
2530 break; 2545 break;
2531 2546
2532 case Sliced_cc: 2547 case Sliced_cc:
2533 cx231xx_info("%s: set hanc registers\n", __func__); 2548 dev_dbg(dev->dev,
2549 "%s: set hanc registers\n", __func__);
2534 status = 2550 status =
2535 cx231xx_mode_register(dev, TS_MODE_REG, 0x1300); 2551 cx231xx_mode_register(dev, TS_MODE_REG, 0x1300);
2536 break; 2552 break;
2537 2553
2538 case Raw_Video: 2554 case Raw_Video:
2539 cx231xx_info("%s: set video registers\n", __func__); 2555 dev_dbg(dev->dev,
2556 "%s: set video registers\n", __func__);
2540 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); 2557 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
2541 break; 2558 break;
2542 2559
2543 case TS1_serial_mode: 2560 case TS1_serial_mode:
2544 cx231xx_info("%s: set ts1 registers", __func__); 2561 dev_dbg(dev->dev,
2545 2562 "%s: set ts1 registers", __func__);
2546 if (dev->board.has_417) { 2563
2547 cx231xx_info(" MPEG\n"); 2564 if (dev->board.has_417) {
2548 value &= 0xFFFFFFFC; 2565 dev_dbg(dev->dev,
2549 value |= 0x3; 2566 "%s: MPEG\n", __func__);
2550 2567 value &= 0xFFFFFFFC;
2551 status = cx231xx_mode_register(dev, TS_MODE_REG, value); 2568 value |= 0x3;
2552 2569
2553 val[0] = 0x04; 2570 status = cx231xx_mode_register(dev,
2554 val[1] = 0xA3; 2571 TS_MODE_REG, value);
2555 val[2] = 0x3B; 2572
2556 val[3] = 0x00; 2573 val[0] = 0x04;
2557 status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 2574 val[1] = 0xA3;
2558 TS1_CFG_REG, val, 4); 2575 val[2] = 0x3B;
2559 2576 val[3] = 0x00;
2560 val[0] = 0x00; 2577 status = cx231xx_write_ctrl_reg(dev,
2561 val[1] = 0x08; 2578 VRT_SET_REGISTER,
2562 val[2] = 0x00; 2579 TS1_CFG_REG, val, 4);
2563 val[3] = 0x08; 2580
2564 status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 2581 val[0] = 0x00;
2565 TS1_LENGTH_REG, val, 4); 2582 val[1] = 0x08;
2566 2583 val[2] = 0x00;
2567 } else { 2584 val[3] = 0x08;
2568 cx231xx_info(" BDA\n"); 2585 status = cx231xx_write_ctrl_reg(dev,
2569 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); 2586 VRT_SET_REGISTER,
2570 status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010); 2587 TS1_LENGTH_REG, val, 4);
2571 } 2588 } else {
2589 dev_dbg(dev->dev, "%s: BDA\n", __func__);
2590 status = cx231xx_mode_register(dev,
2591 TS_MODE_REG, 0x101);
2592 status = cx231xx_mode_register(dev,
2593 TS1_CFG_REG, 0x010);
2594 }
2572 break; 2595 break;
2573 2596
2574 case TS1_parallel_mode: 2597 case TS1_parallel_mode:
2575 cx231xx_info("%s: set ts1 parallel mode registers\n", 2598 dev_dbg(dev->dev,
2576 __func__); 2599 "%s: set ts1 parallel mode registers\n",
2600 __func__);
2577 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); 2601 status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
2578 status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400); 2602 status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400);
2579 break; 2603 break;
@@ -2926,8 +2950,9 @@ int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev)
2926 (nCnt > 0)); 2950 (nCnt > 0));
2927 2951
2928 if (nCnt == 0) 2952 if (nCnt == 0)
2929 cx231xx_info("No ACK after %d msec -GPIO I2C failed!", 2953 dev_dbg(dev->dev,
2930 nInit * 10); 2954 "No ACK after %d msec -GPIO I2C failed!",
2955 nInit * 10);
2931 2956
2932 /* 2957 /*
2933 * readAck 2958 * readAck
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 791f00c6276b..ae05d591f228 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -20,12 +20,12 @@
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include "cx231xx.h"
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
27#include <linux/i2c.h> 28#include <linux/i2c.h>
28#include <linux/usb.h>
29#include <media/tuner.h> 29#include <media/tuner.h>
30#include <media/tveeprom.h> 30#include <media/tveeprom.h>
31#include <media/v4l2-common.h> 31#include <media/v4l2-common.h>
@@ -35,7 +35,6 @@
35#include "xc5000.h" 35#include "xc5000.h"
36#include "tda18271.h" 36#include "tda18271.h"
37 37
38#include "cx231xx.h"
39 38
40static int tuner = -1; 39static int tuner = -1;
41module_param(tuner, int, 0444); 40module_param(tuner, int, 0444);
@@ -104,8 +103,8 @@ struct cx231xx_board cx231xx_boards[] = {
104 .ctl_pin_status_mask = 0xFFFFFFC4, 103 .ctl_pin_status_mask = 0xFFFFFFC4,
105 .agc_analog_digital_select_gpio = 0x0c, 104 .agc_analog_digital_select_gpio = 0x0c,
106 .gpio_pin_status_mask = 0x4001000, 105 .gpio_pin_status_mask = 0x4001000,
107 .tuner_i2c_master = 1, 106 .tuner_i2c_master = I2C_1_MUX_3,
108 .demod_i2c_master = 2, 107 .demod_i2c_master = I2C_2,
109 .has_dvb = 1, 108 .has_dvb = 1,
110 .demod_addr = 0x02, 109 .demod_addr = 0x02,
111 .norm = V4L2_STD_PAL, 110 .norm = V4L2_STD_PAL,
@@ -144,8 +143,8 @@ struct cx231xx_board cx231xx_boards[] = {
144 .ctl_pin_status_mask = 0xFFFFFFC4, 143 .ctl_pin_status_mask = 0xFFFFFFC4,
145 .agc_analog_digital_select_gpio = 0x0c, 144 .agc_analog_digital_select_gpio = 0x0c,
146 .gpio_pin_status_mask = 0x4001000, 145 .gpio_pin_status_mask = 0x4001000,
147 .tuner_i2c_master = 1, 146 .tuner_i2c_master = I2C_1_MUX_3,
148 .demod_i2c_master = 2, 147 .demod_i2c_master = I2C_2,
149 .has_dvb = 1, 148 .has_dvb = 1,
150 .demod_addr = 0x32, 149 .demod_addr = 0x32,
151 .norm = V4L2_STD_NTSC, 150 .norm = V4L2_STD_NTSC,
@@ -184,8 +183,8 @@ struct cx231xx_board cx231xx_boards[] = {
184 .ctl_pin_status_mask = 0xFFFFFFC4, 183 .ctl_pin_status_mask = 0xFFFFFFC4,
185 .agc_analog_digital_select_gpio = 0x1c, 184 .agc_analog_digital_select_gpio = 0x1c,
186 .gpio_pin_status_mask = 0x4001000, 185 .gpio_pin_status_mask = 0x4001000,
187 .tuner_i2c_master = 1, 186 .tuner_i2c_master = I2C_1_MUX_3,
188 .demod_i2c_master = 2, 187 .demod_i2c_master = I2C_2,
189 .has_dvb = 1, 188 .has_dvb = 1,
190 .demod_addr = 0x02, 189 .demod_addr = 0x02,
191 .norm = V4L2_STD_PAL, 190 .norm = V4L2_STD_PAL,
@@ -225,8 +224,8 @@ struct cx231xx_board cx231xx_boards[] = {
225 .ctl_pin_status_mask = 0xFFFFFFC4, 224 .ctl_pin_status_mask = 0xFFFFFFC4,
226 .agc_analog_digital_select_gpio = 0x1c, 225 .agc_analog_digital_select_gpio = 0x1c,
227 .gpio_pin_status_mask = 0x4001000, 226 .gpio_pin_status_mask = 0x4001000,
228 .tuner_i2c_master = 1, 227 .tuner_i2c_master = I2C_1_MUX_3,
229 .demod_i2c_master = 2, 228 .demod_i2c_master = I2C_2,
230 .has_dvb = 1, 229 .has_dvb = 1,
231 .demod_addr = 0x02, 230 .demod_addr = 0x02,
232 .norm = V4L2_STD_PAL, 231 .norm = V4L2_STD_PAL,
@@ -262,7 +261,6 @@ struct cx231xx_board cx231xx_boards[] = {
262 .norm = V4L2_STD_PAL, 261 .norm = V4L2_STD_PAL,
263 .no_alt_vanc = 1, 262 .no_alt_vanc = 1,
264 .external_av = 1, 263 .external_av = 1,
265 .dont_use_port_3 = 1,
266 /* Actually, it has a 417, but it isn't working correctly. 264 /* Actually, it has a 417, but it isn't working correctly.
267 * So set to 0 for now until someone can manage to get this 265 * So set to 0 for now until someone can manage to get this
268 * to work reliably. */ 266 * to work reliably. */
@@ -297,8 +295,8 @@ struct cx231xx_board cx231xx_boards[] = {
297 .ctl_pin_status_mask = 0xFFFFFFC4, 295 .ctl_pin_status_mask = 0xFFFFFFC4,
298 .agc_analog_digital_select_gpio = 0x0c, 296 .agc_analog_digital_select_gpio = 0x0c,
299 .gpio_pin_status_mask = 0x4001000, 297 .gpio_pin_status_mask = 0x4001000,
300 .tuner_i2c_master = 1, 298 .tuner_i2c_master = I2C_1_MUX_3,
301 .demod_i2c_master = 2, 299 .demod_i2c_master = I2C_2,
302 .has_dvb = 1, 300 .has_dvb = 1,
303 .demod_addr = 0x02, 301 .demod_addr = 0x02,
304 .norm = V4L2_STD_PAL, 302 .norm = V4L2_STD_PAL,
@@ -325,8 +323,8 @@ struct cx231xx_board cx231xx_boards[] = {
325 .ctl_pin_status_mask = 0xFFFFFFC4, 323 .ctl_pin_status_mask = 0xFFFFFFC4,
326 .agc_analog_digital_select_gpio = 0x0c, 324 .agc_analog_digital_select_gpio = 0x0c,
327 .gpio_pin_status_mask = 0x4001000, 325 .gpio_pin_status_mask = 0x4001000,
328 .tuner_i2c_master = 1, 326 .tuner_i2c_master = I2C_1_MUX_3,
329 .demod_i2c_master = 2, 327 .demod_i2c_master = I2C_2,
330 .has_dvb = 1, 328 .has_dvb = 1,
331 .demod_addr = 0x32, 329 .demod_addr = 0x32,
332 .norm = V4L2_STD_NTSC, 330 .norm = V4L2_STD_NTSC,
@@ -353,8 +351,8 @@ struct cx231xx_board cx231xx_boards[] = {
353 .ctl_pin_status_mask = 0xFFFFFFC4, 351 .ctl_pin_status_mask = 0xFFFFFFC4,
354 .agc_analog_digital_select_gpio = 0x0c, 352 .agc_analog_digital_select_gpio = 0x0c,
355 .gpio_pin_status_mask = 0x4001000, 353 .gpio_pin_status_mask = 0x4001000,
356 .tuner_i2c_master = 1, 354 .tuner_i2c_master = I2C_1_MUX_1,
357 .demod_i2c_master = 2, 355 .demod_i2c_master = I2C_2,
358 .has_dvb = 1, 356 .has_dvb = 1,
359 .demod_addr = 0x0e, 357 .demod_addr = 0x0e,
360 .norm = V4L2_STD_NTSC, 358 .norm = V4L2_STD_NTSC,
@@ -390,7 +388,6 @@ struct cx231xx_board cx231xx_boards[] = {
390 .norm = V4L2_STD_NTSC, 388 .norm = V4L2_STD_NTSC,
391 .no_alt_vanc = 1, 389 .no_alt_vanc = 1,
392 .external_av = 1, 390 .external_av = 1,
393 .dont_use_port_3 = 1,
394 .input = {{ 391 .input = {{
395 .type = CX231XX_VMUX_COMPOSITE1, 392 .type = CX231XX_VMUX_COMPOSITE1,
396 .vmux = CX231XX_VIN_2_1, 393 .vmux = CX231XX_VIN_2_1,
@@ -418,9 +415,9 @@ struct cx231xx_board cx231xx_boards[] = {
418 .tuner_scl_gpio = -1, 415 .tuner_scl_gpio = -1,
419 .tuner_sda_gpio = -1, 416 .tuner_sda_gpio = -1,
420 .gpio_pin_status_mask = 0x4001000, 417 .gpio_pin_status_mask = 0x4001000,
421 .tuner_i2c_master = 2, 418 .tuner_i2c_master = I2C_2,
422 .demod_i2c_master = 1, 419 .demod_i2c_master = I2C_1_MUX_3,
423 .ir_i2c_master = 2, 420 .ir_i2c_master = I2C_2,
424 .has_dvb = 1, 421 .has_dvb = 1,
425 .demod_addr = 0x10, 422 .demod_addr = 0x10,
426 .norm = V4L2_STD_PAL_M, 423 .norm = V4L2_STD_PAL_M,
@@ -456,9 +453,9 @@ struct cx231xx_board cx231xx_boards[] = {
456 .tuner_scl_gpio = -1, 453 .tuner_scl_gpio = -1,
457 .tuner_sda_gpio = -1, 454 .tuner_sda_gpio = -1,
458 .gpio_pin_status_mask = 0x4001000, 455 .gpio_pin_status_mask = 0x4001000,
459 .tuner_i2c_master = 2, 456 .tuner_i2c_master = I2C_2,
460 .demod_i2c_master = 1, 457 .demod_i2c_master = I2C_1_MUX_3,
461 .ir_i2c_master = 2, 458 .ir_i2c_master = I2C_2,
462 .has_dvb = 1, 459 .has_dvb = 1,
463 .demod_addr = 0x10, 460 .demod_addr = 0x10,
464 .norm = V4L2_STD_NTSC_M, 461 .norm = V4L2_STD_NTSC_M,
@@ -494,9 +491,9 @@ struct cx231xx_board cx231xx_boards[] = {
494 .tuner_scl_gpio = -1, 491 .tuner_scl_gpio = -1,
495 .tuner_sda_gpio = -1, 492 .tuner_sda_gpio = -1,
496 .gpio_pin_status_mask = 0x4001000, 493 .gpio_pin_status_mask = 0x4001000,
497 .tuner_i2c_master = 2, 494 .tuner_i2c_master = I2C_2,
498 .demod_i2c_master = 1, 495 .demod_i2c_master = I2C_1_MUX_3,
499 .ir_i2c_master = 2, 496 .ir_i2c_master = I2C_2,
500 .rc_map_name = RC_MAP_PIXELVIEW_002T, 497 .rc_map_name = RC_MAP_PIXELVIEW_002T,
501 .has_dvb = 1, 498 .has_dvb = 1,
502 .demod_addr = 0x10, 499 .demod_addr = 0x10,
@@ -532,7 +529,6 @@ struct cx231xx_board cx231xx_boards[] = {
532 .norm = V4L2_STD_NTSC, 529 .norm = V4L2_STD_NTSC,
533 .no_alt_vanc = 1, 530 .no_alt_vanc = 1,
534 .external_av = 1, 531 .external_av = 1,
535 .dont_use_port_3 = 1,
536 532
537 .input = {{ 533 .input = {{
538 .type = CX231XX_VMUX_COMPOSITE1, 534 .type = CX231XX_VMUX_COMPOSITE1,
@@ -587,7 +583,7 @@ struct cx231xx_board cx231xx_boards[] = {
587 .ctl_pin_status_mask = 0xFFFFFFC4, 583 .ctl_pin_status_mask = 0xFFFFFFC4,
588 .agc_analog_digital_select_gpio = 0x0c, 584 .agc_analog_digital_select_gpio = 0x0c,
589 .gpio_pin_status_mask = 0x4001000, 585 .gpio_pin_status_mask = 0x4001000,
590 .tuner_i2c_master = 1, 586 .tuner_i2c_master = I2C_1_MUX_3,
591 .norm = V4L2_STD_PAL, 587 .norm = V4L2_STD_PAL,
592 588
593 .input = {{ 589 .input = {{
@@ -622,7 +618,7 @@ struct cx231xx_board cx231xx_boards[] = {
622 .ctl_pin_status_mask = 0xFFFFFFC4, 618 .ctl_pin_status_mask = 0xFFFFFFC4,
623 .agc_analog_digital_select_gpio = 0x0c, 619 .agc_analog_digital_select_gpio = 0x0c,
624 .gpio_pin_status_mask = 0x4001000, 620 .gpio_pin_status_mask = 0x4001000,
625 .tuner_i2c_master = 1, 621 .tuner_i2c_master = I2C_1_MUX_3,
626 .norm = V4L2_STD_NTSC, 622 .norm = V4L2_STD_NTSC,
627 623
628 .input = {{ 624 .input = {{
@@ -656,7 +652,6 @@ struct cx231xx_board cx231xx_boards[] = {
656 .norm = V4L2_STD_NTSC, 652 .norm = V4L2_STD_NTSC,
657 .no_alt_vanc = 1, 653 .no_alt_vanc = 1,
658 .external_av = 1, 654 .external_av = 1,
659 .dont_use_port_3 = 1,
660 .input = {{ 655 .input = {{
661 .type = CX231XX_VMUX_COMPOSITE1, 656 .type = CX231XX_VMUX_COMPOSITE1,
662 .vmux = CX231XX_VIN_2_1, 657 .vmux = CX231XX_VIN_2_1,
@@ -677,13 +672,12 @@ struct cx231xx_board cx231xx_boards[] = {
677 .decoder = CX231XX_AVDECODER, 672 .decoder = CX231XX_AVDECODER,
678 .output_mode = OUT_MODE_VIP11, 673 .output_mode = OUT_MODE_VIP11,
679 .ctl_pin_status_mask = 0xFFFFFFC4, 674 .ctl_pin_status_mask = 0xFFFFFFC4,
680 .agc_analog_digital_select_gpio = 0x0c, 675 .agc_analog_digital_select_gpio = 0x0c,
681 /* According with PV CxPlrCAP.inf file */ 676 /* According with PV CxPlrCAP.inf file */
682 .gpio_pin_status_mask = 0x4001000, 677 .gpio_pin_status_mask = 0x4001000,
683 .norm = V4L2_STD_NTSC, 678 .norm = V4L2_STD_NTSC,
684 .no_alt_vanc = 1, 679 .no_alt_vanc = 1,
685 .external_av = 1, 680 .external_av = 1,
686 .dont_use_port_3 = 1,
687 /*.has_417 = 1, */ 681 /*.has_417 = 1, */
688 /* This board is believed to have a hardware encoding chip 682 /* This board is believed to have a hardware encoding chip
689 * supporting mpeg1/2/4, but as the 417 is apparently not 683 * supporting mpeg1/2/4, but as the 417 is apparently not
@@ -718,8 +712,8 @@ struct cx231xx_board cx231xx_boards[] = {
718 .ctl_pin_status_mask = 0xFFFFFFC4, 712 .ctl_pin_status_mask = 0xFFFFFFC4,
719 .agc_analog_digital_select_gpio = 0x0c, 713 .agc_analog_digital_select_gpio = 0x0c,
720 .gpio_pin_status_mask = 0x4001000, 714 .gpio_pin_status_mask = 0x4001000,
721 .tuner_i2c_master = 1, 715 .tuner_i2c_master = I2C_1_MUX_3,
722 .demod_i2c_master = 2, 716 .demod_i2c_master = I2C_2,
723 .has_dvb = 1, 717 .has_dvb = 1,
724 .demod_addr = 0x0e, 718 .demod_addr = 0x0e,
725 .norm = V4L2_STD_PAL, 719 .norm = V4L2_STD_PAL,
@@ -757,8 +751,8 @@ struct cx231xx_board cx231xx_boards[] = {
757 .ctl_pin_status_mask = 0xFFFFFFC4, 751 .ctl_pin_status_mask = 0xFFFFFFC4,
758 .agc_analog_digital_select_gpio = 0x0c, 752 .agc_analog_digital_select_gpio = 0x0c,
759 .gpio_pin_status_mask = 0x4001000, 753 .gpio_pin_status_mask = 0x4001000,
760 .tuner_i2c_master = 1, 754 .tuner_i2c_master = I2C_1_MUX_3,
761 .demod_i2c_master = 2, 755 .demod_i2c_master = I2C_2,
762 .has_dvb = 1, 756 .has_dvb = 1,
763 .demod_addr = 0x0e, 757 .demod_addr = 0x0e,
764 .norm = V4L2_STD_PAL, 758 .norm = V4L2_STD_PAL,
@@ -861,9 +855,9 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
861 855
862 if (dev->tuner_type == TUNER_XC5000) { 856 if (dev->tuner_type == TUNER_XC5000) {
863 if (command == XC5000_TUNER_RESET) { 857 if (command == XC5000_TUNER_RESET) {
864 cx231xx_info 858 dev_dbg(dev->dev,
865 ("Tuner CB: RESET: cmd %d : tuner type %d \n", 859 "Tuner CB: RESET: cmd %d : tuner type %d\n",
866 command, dev->tuner_type); 860 command, dev->tuner_type);
867 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 861 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit,
868 1); 862 1);
869 msleep(10); 863 msleep(10);
@@ -921,8 +915,8 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
921 915
922 cx231xx_set_model(dev); 916 cx231xx_set_model(dev);
923 917
924 cx231xx_info("Identified as %s (card=%d)\n", 918 dev_info(dev->dev, "Identified as %s (card=%d)\n",
925 dev->board.name, dev->model); 919 dev->board.name, dev->model);
926 920
927 /* set the direction for GPIO pins */ 921 /* set the direction for GPIO pins */
928 if (dev->board.tuner_gpio) { 922 if (dev->board.tuner_gpio) {
@@ -980,25 +974,22 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
980 974
981} 975}
982 976
983static int read_eeprom(struct cx231xx *dev, u8 *eedata, int len) 977static int read_eeprom(struct cx231xx *dev, struct i2c_client *client,
978 u8 *eedata, int len)
984{ 979{
985 int ret = 0; 980 int ret = 0;
986 u8 addr = 0xa0 >> 1;
987 u8 start_offset = 0; 981 u8 start_offset = 0;
988 int len_todo = len; 982 int len_todo = len;
989 u8 *eedata_cur = eedata; 983 u8 *eedata_cur = eedata;
990 int i; 984 int i;
991 struct i2c_msg msg_write = { .addr = addr, .flags = 0, 985 struct i2c_msg msg_write = { .addr = client->addr, .flags = 0,
992 .buf = &start_offset, .len = 1 }; 986 .buf = &start_offset, .len = 1 };
993 struct i2c_msg msg_read = { .addr = addr, .flags = I2C_M_RD }; 987 struct i2c_msg msg_read = { .addr = client->addr, .flags = I2C_M_RD };
994
995 /* mutex_lock(&dev->i2c_lock); */
996 cx231xx_enable_i2c_port_3(dev, false);
997 988
998 /* start reading at offset 0 */ 989 /* start reading at offset 0 */
999 ret = i2c_transfer(&dev->i2c_bus[1].i2c_adap, &msg_write, 1); 990 ret = i2c_transfer(client->adapter, &msg_write, 1);
1000 if (ret < 0) { 991 if (ret < 0) {
1001 cx231xx_err("Can't read eeprom\n"); 992 dev_err(dev->dev, "Can't read eeprom\n");
1002 return ret; 993 return ret;
1003 } 994 }
1004 995
@@ -1006,20 +997,18 @@ static int read_eeprom(struct cx231xx *dev, u8 *eedata, int len)
1006 msg_read.len = (len_todo > 64) ? 64 : len_todo; 997 msg_read.len = (len_todo > 64) ? 64 : len_todo;
1007 msg_read.buf = eedata_cur; 998 msg_read.buf = eedata_cur;
1008 999
1009 ret = i2c_transfer(&dev->i2c_bus[1].i2c_adap, &msg_read, 1); 1000 ret = i2c_transfer(client->adapter, &msg_read, 1);
1010 if (ret < 0) { 1001 if (ret < 0) {
1011 cx231xx_err("Can't read eeprom\n"); 1002 dev_err(dev->dev, "Can't read eeprom\n");
1012 return ret; 1003 return ret;
1013 } 1004 }
1014 eedata_cur += msg_read.len; 1005 eedata_cur += msg_read.len;
1015 len_todo -= msg_read.len; 1006 len_todo -= msg_read.len;
1016 } 1007 }
1017 1008
1018 cx231xx_enable_i2c_port_3(dev, true);
1019 /* mutex_unlock(&dev->i2c_lock); */
1020
1021 for (i = 0; i + 15 < len; i += 16) 1009 for (i = 0; i + 15 < len; i += 16)
1022 cx231xx_info("i2c eeprom %02x: %*ph\n", i, 16, &eedata[i]); 1010 dev_dbg(dev->dev, "i2c eeprom %02x: %*ph\n",
1011 i, 16, &eedata[i]);
1023 1012
1024 return 0; 1013 return 0;
1025} 1014}
@@ -1036,22 +1025,26 @@ void cx231xx_card_setup(struct cx231xx *dev)
1036 /* request some modules */ 1025 /* request some modules */
1037 if (dev->board.decoder == CX231XX_AVDECODER) { 1026 if (dev->board.decoder == CX231XX_AVDECODER) {
1038 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1027 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1039 &dev->i2c_bus[0].i2c_adap, 1028 cx231xx_get_i2c_adap(dev, I2C_0),
1040 "cx25840", 0x88 >> 1, NULL); 1029 "cx25840", 0x88 >> 1, NULL);
1041 if (dev->sd_cx25840 == NULL) 1030 if (dev->sd_cx25840 == NULL)
1042 cx231xx_info("cx25840 subdev registration failure\n"); 1031 dev_err(dev->dev,
1032 "cx25840 subdev registration failure\n");
1043 cx25840_call(dev, core, load_fw); 1033 cx25840_call(dev, core, load_fw);
1044 1034
1045 } 1035 }
1046 1036
1047 /* Initialize the tuner */ 1037 /* Initialize the tuner */
1048 if (dev->board.tuner_type != TUNER_ABSENT) { 1038 if (dev->board.tuner_type != TUNER_ABSENT) {
1039 struct i2c_adapter *tuner_i2c = cx231xx_get_i2c_adap(dev,
1040 dev->board.tuner_i2c_master);
1049 dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1041 dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1050 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 1042 tuner_i2c,
1051 "tuner", 1043 "tuner",
1052 dev->tuner_addr, NULL); 1044 dev->tuner_addr, NULL);
1053 if (dev->sd_tuner == NULL) 1045 if (dev->sd_tuner == NULL)
1054 cx231xx_info("tuner subdev registration failure\n"); 1046 dev_err(dev->dev,
1047 "tuner subdev registration failure\n");
1055 else 1048 else
1056 cx231xx_config_tuner(dev); 1049 cx231xx_config_tuner(dev);
1057 } 1050 }
@@ -1062,9 +1055,14 @@ void cx231xx_card_setup(struct cx231xx *dev)
1062 { 1055 {
1063 struct tveeprom tvee; 1056 struct tveeprom tvee;
1064 static u8 eeprom[256]; 1057 static u8 eeprom[256];
1058 struct i2c_client client;
1059
1060 memset(&client, 0, sizeof(client));
1061 client.adapter = cx231xx_get_i2c_adap(dev, I2C_1_MUX_1);
1062 client.addr = 0xa0 >> 1;
1065 1063
1066 read_eeprom(dev, eeprom, sizeof(eeprom)); 1064 read_eeprom(dev, &client, eeprom, sizeof(eeprom));
1067 tveeprom_hauppauge_analog(&dev->i2c_bus[1].i2c_client, 1065 tveeprom_hauppauge_analog(&client,
1068 &tvee, eeprom + 0xc0); 1066 &tvee, eeprom + 0xc0);
1069 break; 1067 break;
1070 } 1068 }
@@ -1152,7 +1150,7 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
1152 /* Query cx231xx to find what pcb config it is related to */ 1150 /* Query cx231xx to find what pcb config it is related to */
1153 retval = initialize_cx231xx(dev); 1151 retval = initialize_cx231xx(dev);
1154 if (retval < 0) { 1152 if (retval < 0) {
1155 cx231xx_errdev("Failed to read PCB config\n"); 1153 dev_err(dev->dev, "Failed to read PCB config\n");
1156 return retval; 1154 return retval;
1157 } 1155 }
1158 1156
@@ -1168,7 +1166,7 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
1168 1166
1169 retval = cx231xx_config(dev); 1167 retval = cx231xx_config(dev);
1170 if (retval) { 1168 if (retval) {
1171 cx231xx_errdev("error configuring device\n"); 1169 dev_err(dev->dev, "error configuring device\n");
1172 return -ENOMEM; 1170 return -ENOMEM;
1173 } 1171 }
1174 1172
@@ -1178,8 +1176,9 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
1178 /* register i2c bus */ 1176 /* register i2c bus */
1179 retval = cx231xx_dev_init(dev); 1177 retval = cx231xx_dev_init(dev);
1180 if (retval) { 1178 if (retval) {
1181 cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n", 1179 dev_err(dev->dev,
1182 __func__, retval); 1180 "%s: cx231xx_i2c_register - errCode [%d]!\n",
1181 __func__, retval);
1183 goto err_dev_init; 1182 goto err_dev_init;
1184 } 1183 }
1185 1184
@@ -1200,8 +1199,8 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
1200 1199
1201 retval = cx231xx_config(dev); 1200 retval = cx231xx_config(dev);
1202 if (retval) { 1201 if (retval) {
1203 cx231xx_errdev("%s: cx231xx_config - errCode [%d]!\n", 1202 dev_err(dev->dev, "%s: cx231xx_config - errCode [%d]!\n",
1204 __func__, retval); 1203 __func__, retval);
1205 goto err_dev_init; 1204 goto err_dev_init;
1206 } 1205 }
1207 1206
@@ -1217,11 +1216,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
1217 cx231xx_add_into_devlist(dev); 1216 cx231xx_add_into_devlist(dev);
1218 1217
1219 if (dev->board.has_417) { 1218 if (dev->board.has_417) {
1220 printk(KERN_INFO "attach 417 %d\n", dev->model); 1219 dev_info(dev->dev, "attach 417 %d\n", dev->model);
1221 if (cx231xx_417_register(dev) < 0) { 1220 if (cx231xx_417_register(dev) < 0) {
1222 printk(KERN_ERR 1221 dev_err(dev->dev,
1223 "%s() Failed to register 417 on VID_B\n", 1222 "%s() Failed to register 417 on VID_B\n",
1224 __func__); 1223 __func__);
1225 } 1224 }
1226 } 1225 }
1227 1226
@@ -1285,7 +1284,8 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1285 /* compute alternate max packet sizes for video */ 1284 /* compute alternate max packet sizes for video */
1286 idx = dev->current_pcb_config.hs_config_info[0].interface_info.video_index + 1; 1285 idx = dev->current_pcb_config.hs_config_info[0].interface_info.video_index + 1;
1287 if (idx >= dev->max_iad_interface_count) { 1286 if (idx >= dev->max_iad_interface_count) {
1288 cx231xx_errdev("Video PCB interface #%d doesn't exist\n", idx); 1287 dev_err(dev->dev,
1288 "Video PCB interface #%d doesn't exist\n", idx);
1289 return -ENODEV; 1289 return -ENODEV;
1290 } 1290 }
1291 1291
@@ -1294,28 +1294,29 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1294 dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress; 1294 dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
1295 dev->video_mode.num_alt = uif->num_altsetting; 1295 dev->video_mode.num_alt = uif->num_altsetting;
1296 1296
1297 cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n", 1297 dev_info(dev->dev,
1298 dev->video_mode.end_point_addr, 1298 "video EndPoint Addr 0x%x, Alternate settings: %i\n",
1299 dev->video_mode.num_alt); 1299 dev->video_mode.end_point_addr,
1300 dev->video_mode.num_alt);
1300 1301
1301 dev->video_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->video_mode.num_alt, GFP_KERNEL); 1302 dev->video_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->video_mode.num_alt, GFP_KERNEL);
1302 if (dev->video_mode.alt_max_pkt_size == NULL) { 1303 if (dev->video_mode.alt_max_pkt_size == NULL)
1303 cx231xx_errdev("out of memory!\n");
1304 return -ENOMEM; 1304 return -ENOMEM;
1305 }
1306 1305
1307 for (i = 0; i < dev->video_mode.num_alt; i++) { 1306 for (i = 0; i < dev->video_mode.num_alt; i++) {
1308 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); 1307 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
1309 dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 1308 dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
1310 cx231xx_info("Alternate setting %i, max size= %i\n", i, 1309 dev_dbg(dev->dev,
1311 dev->video_mode.alt_max_pkt_size[i]); 1310 "Alternate setting %i, max size= %i\n", i,
1311 dev->video_mode.alt_max_pkt_size[i]);
1312 } 1312 }
1313 1313
1314 /* VBI Init */ 1314 /* VBI Init */
1315 1315
1316 idx = dev->current_pcb_config.hs_config_info[0].interface_info.vanc_index + 1; 1316 idx = dev->current_pcb_config.hs_config_info[0].interface_info.vanc_index + 1;
1317 if (idx >= dev->max_iad_interface_count) { 1317 if (idx >= dev->max_iad_interface_count) {
1318 cx231xx_errdev("VBI PCB interface #%d doesn't exist\n", idx); 1318 dev_err(dev->dev,
1319 "VBI PCB interface #%d doesn't exist\n", idx);
1319 return -ENODEV; 1320 return -ENODEV;
1320 } 1321 }
1321 uif = udev->actconfig->interface[idx]; 1322 uif = udev->actconfig->interface[idx];
@@ -1325,16 +1326,15 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1325 bEndpointAddress; 1326 bEndpointAddress;
1326 1327
1327 dev->vbi_mode.num_alt = uif->num_altsetting; 1328 dev->vbi_mode.num_alt = uif->num_altsetting;
1328 cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n", 1329 dev_info(dev->dev,
1329 dev->vbi_mode.end_point_addr, 1330 "VBI EndPoint Addr 0x%x, Alternate settings: %i\n",
1330 dev->vbi_mode.num_alt); 1331 dev->vbi_mode.end_point_addr,
1332 dev->vbi_mode.num_alt);
1331 1333
1332 /* compute alternate max packet sizes for vbi */ 1334 /* compute alternate max packet sizes for vbi */
1333 dev->vbi_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->vbi_mode.num_alt, GFP_KERNEL); 1335 dev->vbi_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->vbi_mode.num_alt, GFP_KERNEL);
1334 if (dev->vbi_mode.alt_max_pkt_size == NULL) { 1336 if (dev->vbi_mode.alt_max_pkt_size == NULL)
1335 cx231xx_errdev("out of memory!\n");
1336 return -ENOMEM; 1337 return -ENOMEM;
1337 }
1338 1338
1339 for (i = 0; i < dev->vbi_mode.num_alt; i++) { 1339 for (i = 0; i < dev->vbi_mode.num_alt; i++) {
1340 u16 tmp = 1340 u16 tmp =
@@ -1342,8 +1342,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1342 desc.wMaxPacketSize); 1342 desc.wMaxPacketSize);
1343 dev->vbi_mode.alt_max_pkt_size[i] = 1343 dev->vbi_mode.alt_max_pkt_size[i] =
1344 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 1344 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
1345 cx231xx_info("Alternate setting %i, max size= %i\n", i, 1345 dev_dbg(dev->dev,
1346 dev->vbi_mode.alt_max_pkt_size[i]); 1346 "Alternate setting %i, max size= %i\n", i,
1347 dev->vbi_mode.alt_max_pkt_size[i]);
1347 } 1348 }
1348 1349
1349 /* Sliced CC VBI init */ 1350 /* Sliced CC VBI init */
@@ -1351,7 +1352,8 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1351 /* compute alternate max packet sizes for sliced CC */ 1352 /* compute alternate max packet sizes for sliced CC */
1352 idx = dev->current_pcb_config.hs_config_info[0].interface_info.hanc_index + 1; 1353 idx = dev->current_pcb_config.hs_config_info[0].interface_info.hanc_index + 1;
1353 if (idx >= dev->max_iad_interface_count) { 1354 if (idx >= dev->max_iad_interface_count) {
1354 cx231xx_errdev("Sliced CC PCB interface #%d doesn't exist\n", idx); 1355 dev_err(dev->dev,
1356 "Sliced CC PCB interface #%d doesn't exist\n", idx);
1355 return -ENODEV; 1357 return -ENODEV;
1356 } 1358 }
1357 uif = udev->actconfig->interface[idx]; 1359 uif = udev->actconfig->interface[idx];
@@ -1361,23 +1363,22 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
1361 bEndpointAddress; 1363 bEndpointAddress;
1362 1364
1363 dev->sliced_cc_mode.num_alt = uif->num_altsetting; 1365 dev->sliced_cc_mode.num_alt = uif->num_altsetting;
1364 cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n", 1366 dev_info(dev->dev,
1365 dev->sliced_cc_mode.end_point_addr, 1367 "sliced CC EndPoint Addr 0x%x, Alternate settings: %i\n",
1366 dev->sliced_cc_mode.num_alt); 1368 dev->sliced_cc_mode.end_point_addr,
1369 dev->sliced_cc_mode.num_alt);
1367 dev->sliced_cc_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->sliced_cc_mode.num_alt, GFP_KERNEL); 1370 dev->sliced_cc_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->sliced_cc_mode.num_alt, GFP_KERNEL);
1368 1371 if (dev->sliced_cc_mode.alt_max_pkt_size == NULL)
1369 if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) {
1370 cx231xx_errdev("out of memory!\n");
1371 return -ENOMEM; 1372 return -ENOMEM;
1372 }
1373 1373
1374 for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { 1374 for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
1375 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. 1375 u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
1376 desc.wMaxPacketSize); 1376 desc.wMaxPacketSize);
1377 dev->sliced_cc_mode.alt_max_pkt_size[i] = 1377 dev->sliced_cc_mode.alt_max_pkt_size[i] =
1378 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 1378 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
1379 cx231xx_info("Alternate setting %i, max size= %i\n", i, 1379 dev_dbg(dev->dev,
1380 dev->sliced_cc_mode.alt_max_pkt_size[i]); 1380 "Alternate setting %i, max size= %i\n", i,
1381 dev->sliced_cc_mode.alt_max_pkt_size[i]);
1381 } 1382 }
1382 1383
1383 return 0; 1384 return 0;
@@ -1391,6 +1392,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1391 const struct usb_device_id *id) 1392 const struct usb_device_id *id)
1392{ 1393{
1393 struct usb_device *udev; 1394 struct usb_device *udev;
1395 struct device *d = &interface->dev;
1394 struct usb_interface *uif; 1396 struct usb_interface *uif;
1395 struct cx231xx *dev = NULL; 1397 struct cx231xx *dev = NULL;
1396 int retval = -ENODEV; 1398 int retval = -ENODEV;
@@ -1401,6 +1403,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1401 struct usb_interface_assoc_descriptor *assoc_desc; 1403 struct usb_interface_assoc_descriptor *assoc_desc;
1402 1404
1403 ifnum = interface->altsetting[0].desc.bInterfaceNumber; 1405 ifnum = interface->altsetting[0].desc.bInterfaceNumber;
1406 udev = usb_get_dev(interface_to_usbdev(interface));
1404 1407
1405 /* 1408 /*
1406 * Interface number 0 - IR interface (handled by mceusb driver) 1409 * Interface number 0 - IR interface (handled by mceusb driver)
@@ -1414,18 +1417,16 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1414 nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS); 1417 nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS);
1415 if (nr >= CX231XX_MAXBOARDS) { 1418 if (nr >= CX231XX_MAXBOARDS) {
1416 /* No free device slots */ 1419 /* No free device slots */
1417 cx231xx_err(DRIVER_NAME ": Supports only %i devices.\n", 1420 dev_err(d,
1418 CX231XX_MAXBOARDS); 1421 "Supports only %i devices.\n",
1422 CX231XX_MAXBOARDS);
1419 return -ENOMEM; 1423 return -ENOMEM;
1420 } 1424 }
1421 } while (test_and_set_bit(nr, &cx231xx_devused)); 1425 } while (test_and_set_bit(nr, &cx231xx_devused));
1422 1426
1423 udev = usb_get_dev(interface_to_usbdev(interface));
1424
1425 /* allocate memory for our device state and initialize it */ 1427 /* allocate memory for our device state and initialize it */
1426 dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL); 1428 dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL);
1427 if (dev == NULL) { 1429 if (dev == NULL) {
1428 cx231xx_err(DRIVER_NAME ": out of memory!\n");
1429 clear_bit(nr, &cx231xx_devused); 1430 clear_bit(nr, &cx231xx_devused);
1430 return -ENOMEM; 1431 return -ENOMEM;
1431 } 1432 }
@@ -1434,6 +1435,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1434 dev->devno = nr; 1435 dev->devno = nr;
1435 dev->model = id->driver_info; 1436 dev->model = id->driver_info;
1436 dev->video_mode.alt = -1; 1437 dev->video_mode.alt = -1;
1438 dev->dev = d;
1437 1439
1438 dev->interface_count++; 1440 dev->interface_count++;
1439 /* reset gpio dir and value */ 1441 /* reset gpio dir and value */
@@ -1472,14 +1474,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1472 speed = "unknown"; 1474 speed = "unknown";
1473 } 1475 }
1474 1476
1475 cx231xx_info("New device %s %s @ %s Mbps " 1477 dev_info(d,
1476 "(%04x:%04x) with %d interfaces\n", 1478 "New device %s %s @ %s Mbps (%04x:%04x) with %d interfaces\n",
1477 udev->manufacturer ? udev->manufacturer : "", 1479 udev->manufacturer ? udev->manufacturer : "",
1478 udev->product ? udev->product : "", 1480 udev->product ? udev->product : "",
1479 speed, 1481 speed,
1480 le16_to_cpu(udev->descriptor.idVendor), 1482 le16_to_cpu(udev->descriptor.idVendor),
1481 le16_to_cpu(udev->descriptor.idProduct), 1483 le16_to_cpu(udev->descriptor.idProduct),
1482 dev->max_iad_interface_count); 1484 dev->max_iad_interface_count);
1483 1485
1484 /* increment interface count */ 1486 /* increment interface count */
1485 dev->interface_count++; 1487 dev->interface_count++;
@@ -1489,13 +1491,12 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1489 1491
1490 assoc_desc = udev->actconfig->intf_assoc[0]; 1492 assoc_desc = udev->actconfig->intf_assoc[0];
1491 if (assoc_desc->bFirstInterface != ifnum) { 1493 if (assoc_desc->bFirstInterface != ifnum) {
1492 cx231xx_err(DRIVER_NAME ": Not found " 1494 dev_err(d, "Not found matching IAD interface\n");
1493 "matching IAD interface\n");
1494 retval = -ENODEV; 1495 retval = -ENODEV;
1495 goto err_if; 1496 goto err_if;
1496 } 1497 }
1497 1498
1498 cx231xx_info("registering interface %d\n", ifnum); 1499 dev_dbg(d, "registering interface %d\n", ifnum);
1499 1500
1500 /* save our data pointer in this interface device */ 1501 /* save our data pointer in this interface device */
1501 usb_set_intfdata(interface, dev); 1502 usb_set_intfdata(interface, dev);
@@ -1503,7 +1504,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1503 /* Create v4l2 device */ 1504 /* Create v4l2 device */
1504 retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); 1505 retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
1505 if (retval) { 1506 if (retval) {
1506 cx231xx_errdev("v4l2_device_register failed\n"); 1507 dev_err(d, "v4l2_device_register failed\n");
1507 goto err_v4l2; 1508 goto err_v4l2;
1508 } 1509 }
1509 1510
@@ -1520,7 +1521,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1520 /* compute alternate max packet sizes for TS1 */ 1521 /* compute alternate max packet sizes for TS1 */
1521 idx = dev->current_pcb_config.hs_config_info[0].interface_info.ts1_index + 1; 1522 idx = dev->current_pcb_config.hs_config_info[0].interface_info.ts1_index + 1;
1522 if (idx >= dev->max_iad_interface_count) { 1523 if (idx >= dev->max_iad_interface_count) {
1523 cx231xx_errdev("TS1 PCB interface #%d doesn't exist\n", idx); 1524 dev_err(d, "TS1 PCB interface #%d doesn't exist\n",
1525 idx);
1524 retval = -ENODEV; 1526 retval = -ENODEV;
1525 goto err_video_alt; 1527 goto err_video_alt;
1526 } 1528 }
@@ -1531,13 +1533,13 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1531 desc.bEndpointAddress; 1533 desc.bEndpointAddress;
1532 1534
1533 dev->ts1_mode.num_alt = uif->num_altsetting; 1535 dev->ts1_mode.num_alt = uif->num_altsetting;
1534 cx231xx_info("EndPoint Addr 0x%x, Alternate settings: %i\n", 1536 dev_info(d,
1535 dev->ts1_mode.end_point_addr, 1537 "TS EndPoint Addr 0x%x, Alternate settings: %i\n",
1536 dev->ts1_mode.num_alt); 1538 dev->ts1_mode.end_point_addr,
1539 dev->ts1_mode.num_alt);
1537 1540
1538 dev->ts1_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->ts1_mode.num_alt, GFP_KERNEL); 1541 dev->ts1_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->ts1_mode.num_alt, GFP_KERNEL);
1539 if (dev->ts1_mode.alt_max_pkt_size == NULL) { 1542 if (dev->ts1_mode.alt_max_pkt_size == NULL) {
1540 cx231xx_errdev("out of memory!\n");
1541 retval = -ENOMEM; 1543 retval = -ENOMEM;
1542 goto err_video_alt; 1544 goto err_video_alt;
1543 } 1545 }
@@ -1548,8 +1550,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1548 wMaxPacketSize); 1550 wMaxPacketSize);
1549 dev->ts1_mode.alt_max_pkt_size[i] = 1551 dev->ts1_mode.alt_max_pkt_size[i] =
1550 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); 1552 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
1551 cx231xx_info("Alternate setting %i, max size= %i\n", i, 1553 dev_dbg(d, "Alternate setting %i, max size= %i\n",
1552 dev->ts1_mode.alt_max_pkt_size[i]); 1554 i, dev->ts1_mode.alt_max_pkt_size[i]);
1553 } 1555 }
1554 } 1556 }
1555 1557
@@ -1613,10 +1615,9 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
1613 wake_up_interruptible_all(&dev->open); 1615 wake_up_interruptible_all(&dev->open);
1614 1616
1615 if (dev->users) { 1617 if (dev->users) {
1616 cx231xx_warn 1618 dev_warn(dev->dev,
1617 ("device %s is open! Deregistration and memory " 1619 "device %s is open! Deregistration and memory deallocation are deferred on close.\n",
1618 "deallocation are deferred on close.\n", 1620 video_device_node_name(dev->vdev));
1619 video_device_node_name(dev->vdev));
1620 1621
1621 /* Even having users, it is safe to remove the RC i2c driver */ 1622 /* Even having users, it is safe to remove the RC i2c driver */
1622 cx231xx_ir_exit(dev); 1623 cx231xx_ir_exit(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 180103e48036..4a3f28c4e8d3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -20,16 +20,15 @@
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include "cx231xx.h"
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/usb.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <media/v4l2-common.h> 29#include <media/v4l2-common.h>
30#include <media/tuner.h> 30#include <media/tuner.h>
31 31
32#include "cx231xx.h"
33#include "cx231xx-reg.h" 32#include "cx231xx-reg.h"
34 33
35/* #define ENABLE_DEBUG_ISOC_FRAMES */ 34/* #define ENABLE_DEBUG_ISOC_FRAMES */
@@ -99,10 +98,10 @@ int cx231xx_register_extension(struct cx231xx_ops *ops)
99 98
100 mutex_lock(&cx231xx_devlist_mutex); 99 mutex_lock(&cx231xx_devlist_mutex);
101 list_add_tail(&ops->next, &cx231xx_extension_devlist); 100 list_add_tail(&ops->next, &cx231xx_extension_devlist);
102 list_for_each_entry(dev, &cx231xx_devlist, devlist) 101 list_for_each_entry(dev, &cx231xx_devlist, devlist) {
103 ops->init(dev); 102 ops->init(dev);
104 103 dev_info(dev->dev, "%s initialized\n", ops->name);
105 printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name); 104 }
106 mutex_unlock(&cx231xx_devlist_mutex); 105 mutex_unlock(&cx231xx_devlist_mutex);
107 return 0; 106 return 0;
108} 107}
@@ -113,11 +112,11 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
113 struct cx231xx *dev = NULL; 112 struct cx231xx *dev = NULL;
114 113
115 mutex_lock(&cx231xx_devlist_mutex); 114 mutex_lock(&cx231xx_devlist_mutex);
116 list_for_each_entry(dev, &cx231xx_devlist, devlist) 115 list_for_each_entry(dev, &cx231xx_devlist, devlist) {
117 ops->fini(dev); 116 ops->fini(dev);
117 dev_info(dev->dev, "%s removed\n", ops->name);
118 }
118 119
119
120 printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
121 list_del(&ops->next); 120 list_del(&ops->next);
122 mutex_unlock(&cx231xx_devlist_mutex); 121 mutex_unlock(&cx231xx_devlist_mutex);
123} 122}
@@ -227,10 +226,9 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
227 226
228 /* call common vendor command request */ 227 /* call common vendor command request */
229 status = cx231xx_send_vendor_cmd(dev, &ven_req); 228 status = cx231xx_send_vendor_cmd(dev, &ven_req);
230 if (status < 0) { 229 if (status < 0 && !dev->i2c_scan_running) {
231 cx231xx_info 230 dev_err(dev->dev, "%s: failed with status -%d\n",
232 ("UsbInterface::sendCommand, failed with status -%d\n", 231 __func__, status);
233 status);
234 } 232 }
235 233
236 return status; 234 return status;
@@ -524,9 +522,9 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
524 usb_set_interface(dev->udev, usb_interface_index, 522 usb_set_interface(dev->udev, usb_interface_index,
525 dev->video_mode.alt); 523 dev->video_mode.alt);
526 if (errCode < 0) { 524 if (errCode < 0) {
527 cx231xx_errdev 525 dev_err(dev->dev,
528 ("cannot change alt number to %d (error=%i)\n", 526 "cannot change alt number to %d (error=%i)\n",
529 dev->video_mode.alt, errCode); 527 dev->video_mode.alt, errCode);
530 return errCode; 528 return errCode;
531 } 529 }
532 } 530 }
@@ -600,9 +598,9 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
600 } 598 }
601 599
602 if (alt > 0 && max_pkt_size == 0) { 600 if (alt > 0 && max_pkt_size == 0) {
603 cx231xx_errdev 601 dev_err(dev->dev,
604 ("can't change interface %d alt no. to %d: Max. Pkt size = 0\n", 602 "can't change interface %d alt no. to %d: Max. Pkt size = 0\n",
605 usb_interface_index, alt); 603 usb_interface_index, alt);
606 /*To workaround error number=-71 on EP0 for videograbber, 604 /*To workaround error number=-71 on EP0 for videograbber,
607 need add following codes.*/ 605 need add following codes.*/
608 if (dev->board.no_alt_vanc) 606 if (dev->board.no_alt_vanc)
@@ -616,9 +614,9 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
616 if (usb_interface_index > 0) { 614 if (usb_interface_index > 0) {
617 status = usb_set_interface(dev->udev, usb_interface_index, alt); 615 status = usb_set_interface(dev->udev, usb_interface_index, alt);
618 if (status < 0) { 616 if (status < 0) {
619 cx231xx_errdev 617 dev_err(dev->dev,
620 ("can't change interface %d alt no. to %d (err=%i)\n", 618 "can't change interface %d alt no. to %d (err=%i)\n",
621 usb_interface_index, alt, status); 619 usb_interface_index, alt, status);
622 return status; 620 return status;
623 } 621 }
624 } 622 }
@@ -767,18 +765,17 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
767 u32 *buffer; 765 u32 *buffer;
768 766
769 buffer = kzalloc(4096, GFP_KERNEL); 767 buffer = kzalloc(4096, GFP_KERNEL);
770 if (buffer == NULL) { 768 if (buffer == NULL)
771 cx231xx_info("out of mem\n");
772 return -ENOMEM; 769 return -ENOMEM;
773 }
774 memcpy(&buffer[0], firmware, 4096); 770 memcpy(&buffer[0], firmware, 4096);
775 771
776 ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5), 772 ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
777 buffer, 4096, &actlen, 2000); 773 buffer, 4096, &actlen, 2000);
778 774
779 if (ret) 775 if (ret)
780 cx231xx_info("bulk message failed: %d (%d/%d)", ret, 776 dev_err(dev->dev,
781 size, actlen); 777 "bulk message failed: %d (%d/%d)", ret,
778 size, actlen);
782 else { 779 else {
783 errCode = actlen != size ? -1 : 0; 780 errCode = actlen != size ? -1 : 0;
784 } 781 }
@@ -987,12 +984,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
987 cx231xx_uninit_isoc(dev); 984 cx231xx_uninit_isoc(dev);
988 985
989 dma_q->p_left_data = kzalloc(4096, GFP_KERNEL); 986 dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
990 if (dma_q->p_left_data == NULL) { 987 if (dma_q->p_left_data == NULL)
991 cx231xx_info("out of mem\n");
992 return -ENOMEM; 988 return -ENOMEM;
993 }
994
995
996 989
997 dev->video_mode.isoc_ctl.isoc_copy = isoc_copy; 990 dev->video_mode.isoc_ctl.isoc_copy = isoc_copy;
998 dev->video_mode.isoc_ctl.num_bufs = num_bufs; 991 dev->video_mode.isoc_ctl.num_bufs = num_bufs;
@@ -1018,14 +1011,16 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
1018 dev->video_mode.isoc_ctl.urb = 1011 dev->video_mode.isoc_ctl.urb =
1019 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 1012 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
1020 if (!dev->video_mode.isoc_ctl.urb) { 1013 if (!dev->video_mode.isoc_ctl.urb) {
1021 cx231xx_errdev("cannot alloc memory for usb buffers\n"); 1014 dev_err(dev->dev,
1015 "cannot alloc memory for usb buffers\n");
1022 return -ENOMEM; 1016 return -ENOMEM;
1023 } 1017 }
1024 1018
1025 dev->video_mode.isoc_ctl.transfer_buffer = 1019 dev->video_mode.isoc_ctl.transfer_buffer =
1026 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 1020 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
1027 if (!dev->video_mode.isoc_ctl.transfer_buffer) { 1021 if (!dev->video_mode.isoc_ctl.transfer_buffer) {
1028 cx231xx_errdev("cannot allocate memory for usbtransfer\n"); 1022 dev_err(dev->dev,
1023 "cannot allocate memory for usbtransfer\n");
1029 kfree(dev->video_mode.isoc_ctl.urb); 1024 kfree(dev->video_mode.isoc_ctl.urb);
1030 return -ENOMEM; 1025 return -ENOMEM;
1031 } 1026 }
@@ -1045,7 +1040,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
1045 for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { 1040 for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) {
1046 urb = usb_alloc_urb(max_packets, GFP_KERNEL); 1041 urb = usb_alloc_urb(max_packets, GFP_KERNEL);
1047 if (!urb) { 1042 if (!urb) {
1048 cx231xx_err("cannot alloc isoc_ctl.urb %i\n", i); 1043 dev_err(dev->dev,
1044 "cannot alloc isoc_ctl.urb %i\n", i);
1049 cx231xx_uninit_isoc(dev); 1045 cx231xx_uninit_isoc(dev);
1050 return -ENOMEM; 1046 return -ENOMEM;
1051 } 1047 }
@@ -1055,10 +1051,10 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
1055 usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, 1051 usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
1056 &urb->transfer_dma); 1052 &urb->transfer_dma);
1057 if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) { 1053 if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) {
1058 cx231xx_err("unable to allocate %i bytes for transfer" 1054 dev_err(dev->dev,
1059 " buffer %i%s\n", 1055 "unable to allocate %i bytes for transfer buffer %i%s\n",
1060 sb_size, i, 1056 sb_size, i,
1061 in_interrupt() ? " while in int" : ""); 1057 in_interrupt() ? " while in int" : "");
1062 cx231xx_uninit_isoc(dev); 1058 cx231xx_uninit_isoc(dev);
1063 return -ENOMEM; 1059 return -ENOMEM;
1064 } 1060 }
@@ -1090,8 +1086,9 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
1090 rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i], 1086 rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i],
1091 GFP_ATOMIC); 1087 GFP_ATOMIC);
1092 if (rc) { 1088 if (rc) {
1093 cx231xx_err("submit of urb %i failed (error=%i)\n", i, 1089 dev_err(dev->dev,
1094 rc); 1090 "submit of urb %i failed (error=%i)\n", i,
1091 rc);
1095 cx231xx_uninit_isoc(dev); 1092 cx231xx_uninit_isoc(dev);
1096 return rc; 1093 return rc;
1097 } 1094 }
@@ -1151,14 +1148,16 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
1151 dev->video_mode.bulk_ctl.urb = 1148 dev->video_mode.bulk_ctl.urb =
1152 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 1149 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
1153 if (!dev->video_mode.bulk_ctl.urb) { 1150 if (!dev->video_mode.bulk_ctl.urb) {
1154 cx231xx_errdev("cannot alloc memory for usb buffers\n"); 1151 dev_err(dev->dev,
1152 "cannot alloc memory for usb buffers\n");
1155 return -ENOMEM; 1153 return -ENOMEM;
1156 } 1154 }
1157 1155
1158 dev->video_mode.bulk_ctl.transfer_buffer = 1156 dev->video_mode.bulk_ctl.transfer_buffer =
1159 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 1157 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
1160 if (!dev->video_mode.bulk_ctl.transfer_buffer) { 1158 if (!dev->video_mode.bulk_ctl.transfer_buffer) {
1161 cx231xx_errdev("cannot allocate memory for usbtransfer\n"); 1159 dev_err(dev->dev,
1160 "cannot allocate memory for usbtransfer\n");
1162 kfree(dev->video_mode.bulk_ctl.urb); 1161 kfree(dev->video_mode.bulk_ctl.urb);
1163 return -ENOMEM; 1162 return -ENOMEM;
1164 } 1163 }
@@ -1178,7 +1177,8 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
1178 for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { 1177 for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
1179 urb = usb_alloc_urb(0, GFP_KERNEL); 1178 urb = usb_alloc_urb(0, GFP_KERNEL);
1180 if (!urb) { 1179 if (!urb) {
1181 cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i); 1180 dev_err(dev->dev,
1181 "cannot alloc bulk_ctl.urb %i\n", i);
1182 cx231xx_uninit_bulk(dev); 1182 cx231xx_uninit_bulk(dev);
1183 return -ENOMEM; 1183 return -ENOMEM;
1184 } 1184 }
@@ -1189,10 +1189,10 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
1189 usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, 1189 usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
1190 &urb->transfer_dma); 1190 &urb->transfer_dma);
1191 if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) { 1191 if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) {
1192 cx231xx_err("unable to allocate %i bytes for transfer" 1192 dev_err(dev->dev,
1193 " buffer %i%s\n", 1193 "unable to allocate %i bytes for transfer buffer %i%s\n",
1194 sb_size, i, 1194 sb_size, i,
1195 in_interrupt() ? " while in int" : ""); 1195 in_interrupt() ? " while in int" : "");
1196 cx231xx_uninit_bulk(dev); 1196 cx231xx_uninit_bulk(dev);
1197 return -ENOMEM; 1197 return -ENOMEM;
1198 } 1198 }
@@ -1212,8 +1212,8 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
1212 rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i], 1212 rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i],
1213 GFP_ATOMIC); 1213 GFP_ATOMIC);
1214 if (rc) { 1214 if (rc) {
1215 cx231xx_err("submit of urb %i failed (error=%i)\n", i, 1215 dev_err(dev->dev,
1216 rc); 1216 "submit of urb %i failed (error=%i)\n", i, rc);
1217 cx231xx_uninit_bulk(dev); 1217 cx231xx_uninit_bulk(dev);
1218 return rc; 1218 return rc;
1219 } 1219 }
@@ -1300,6 +1300,15 @@ int cx231xx_dev_init(struct cx231xx *dev)
1300 cx231xx_i2c_register(&dev->i2c_bus[1]); 1300 cx231xx_i2c_register(&dev->i2c_bus[1]);
1301 cx231xx_i2c_register(&dev->i2c_bus[2]); 1301 cx231xx_i2c_register(&dev->i2c_bus[2]);
1302 1302
1303 cx231xx_i2c_mux_register(dev, 0);
1304 cx231xx_i2c_mux_register(dev, 1);
1305
1306 /* scan the real bus segments in the order of physical port numbers */
1307 cx231xx_do_i2c_scan(dev, I2C_0);
1308 cx231xx_do_i2c_scan(dev, I2C_1_MUX_1);
1309 cx231xx_do_i2c_scan(dev, I2C_2);
1310 cx231xx_do_i2c_scan(dev, I2C_1_MUX_3);
1311
1303 /* init hardware */ 1312 /* init hardware */
1304 /* Note : with out calling set power mode function, 1313 /* Note : with out calling set power mode function,
1305 afe can not be set up correctly */ 1314 afe can not be set up correctly */
@@ -1307,18 +1316,18 @@ int cx231xx_dev_init(struct cx231xx *dev)
1307 errCode = cx231xx_set_power_mode(dev, 1316 errCode = cx231xx_set_power_mode(dev,
1308 POLARIS_AVMODE_ENXTERNAL_AV); 1317 POLARIS_AVMODE_ENXTERNAL_AV);
1309 if (errCode < 0) { 1318 if (errCode < 0) {
1310 cx231xx_errdev 1319 dev_err(dev->dev,
1311 ("%s: Failed to set Power - errCode [%d]!\n", 1320 "%s: Failed to set Power - errCode [%d]!\n",
1312 __func__, errCode); 1321 __func__, errCode);
1313 return errCode; 1322 return errCode;
1314 } 1323 }
1315 } else { 1324 } else {
1316 errCode = cx231xx_set_power_mode(dev, 1325 errCode = cx231xx_set_power_mode(dev,
1317 POLARIS_AVMODE_ANALOGT_TV); 1326 POLARIS_AVMODE_ANALOGT_TV);
1318 if (errCode < 0) { 1327 if (errCode < 0) {
1319 cx231xx_errdev 1328 dev_err(dev->dev,
1320 ("%s: Failed to set Power - errCode [%d]!\n", 1329 "%s: Failed to set Power - errCode [%d]!\n",
1321 __func__, errCode); 1330 __func__, errCode);
1322 return errCode; 1331 return errCode;
1323 } 1332 }
1324 } 1333 }
@@ -1331,42 +1340,43 @@ int cx231xx_dev_init(struct cx231xx *dev)
1331 /* initialize Colibri block */ 1340 /* initialize Colibri block */
1332 errCode = cx231xx_afe_init_super_block(dev, 0x23c); 1341 errCode = cx231xx_afe_init_super_block(dev, 0x23c);
1333 if (errCode < 0) { 1342 if (errCode < 0) {
1334 cx231xx_errdev 1343 dev_err(dev->dev,
1335 ("%s: cx231xx_afe init super block - errCode [%d]!\n", 1344 "%s: cx231xx_afe init super block - errCode [%d]!\n",
1336 __func__, errCode); 1345 __func__, errCode);
1337 return errCode; 1346 return errCode;
1338 } 1347 }
1339 errCode = cx231xx_afe_init_channels(dev); 1348 errCode = cx231xx_afe_init_channels(dev);
1340 if (errCode < 0) { 1349 if (errCode < 0) {
1341 cx231xx_errdev 1350 dev_err(dev->dev,
1342 ("%s: cx231xx_afe init channels - errCode [%d]!\n", 1351 "%s: cx231xx_afe init channels - errCode [%d]!\n",
1343 __func__, errCode); 1352 __func__, errCode);
1344 return errCode; 1353 return errCode;
1345 } 1354 }
1346 1355
1347 /* Set DIF in By pass mode */ 1356 /* Set DIF in By pass mode */
1348 errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); 1357 errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
1349 if (errCode < 0) { 1358 if (errCode < 0) {
1350 cx231xx_errdev 1359 dev_err(dev->dev,
1351 ("%s: cx231xx_dif set to By pass mode - errCode [%d]!\n", 1360 "%s: cx231xx_dif set to By pass mode - errCode [%d]!\n",
1352 __func__, errCode); 1361 __func__, errCode);
1353 return errCode; 1362 return errCode;
1354 } 1363 }
1355 1364
1356 /* I2S block related functions */ 1365 /* I2S block related functions */
1357 errCode = cx231xx_i2s_blk_initialize(dev); 1366 errCode = cx231xx_i2s_blk_initialize(dev);
1358 if (errCode < 0) { 1367 if (errCode < 0) {
1359 cx231xx_errdev 1368 dev_err(dev->dev,
1360 ("%s: cx231xx_i2s block initialize - errCode [%d]!\n", 1369 "%s: cx231xx_i2s block initialize - errCode [%d]!\n",
1361 __func__, errCode); 1370 __func__, errCode);
1362 return errCode; 1371 return errCode;
1363 } 1372 }
1364 1373
1365 /* init control pins */ 1374 /* init control pins */
1366 errCode = cx231xx_init_ctrl_pin_status(dev); 1375 errCode = cx231xx_init_ctrl_pin_status(dev);
1367 if (errCode < 0) { 1376 if (errCode < 0) {
1368 cx231xx_errdev("%s: cx231xx_init ctrl pins - errCode [%d]!\n", 1377 dev_err(dev->dev,
1369 __func__, errCode); 1378 "%s: cx231xx_init ctrl pins - errCode [%d]!\n",
1379 __func__, errCode);
1370 return errCode; 1380 return errCode;
1371 } 1381 }
1372 1382
@@ -1391,9 +1401,9 @@ int cx231xx_dev_init(struct cx231xx *dev)
1391 break; 1401 break;
1392 } 1402 }
1393 if (errCode < 0) { 1403 if (errCode < 0) {
1394 cx231xx_errdev 1404 dev_err(dev->dev,
1395 ("%s: cx231xx_AGC mode to Analog - errCode [%d]!\n", 1405 "%s: cx231xx_AGC mode to Analog - errCode [%d]!\n",
1396 __func__, errCode); 1406 __func__, errCode);
1397 return errCode; 1407 return errCode;
1398 } 1408 }
1399 1409
@@ -1404,9 +1414,7 @@ int cx231xx_dev_init(struct cx231xx *dev)
1404 if (dev->board.has_dvb) 1414 if (dev->board.has_dvb)
1405 cx231xx_set_alt_setting(dev, INDEX_TS1, 0); 1415 cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
1406 1416
1407 /* set the I2C master port to 3 on channel 1 */ 1417 errCode = 0;
1408 errCode = cx231xx_enable_i2c_port_3(dev, true);
1409
1410 return errCode; 1418 return errCode;
1411} 1419}
1412EXPORT_SYMBOL_GPL(cx231xx_dev_init); 1420EXPORT_SYMBOL_GPL(cx231xx_dev_init);
@@ -1414,6 +1422,8 @@ EXPORT_SYMBOL_GPL(cx231xx_dev_init);
1414void cx231xx_dev_uninit(struct cx231xx *dev) 1422void cx231xx_dev_uninit(struct cx231xx *dev)
1415{ 1423{
1416 /* Un Initialize I2C bus */ 1424 /* Un Initialize I2C bus */
1425 cx231xx_i2c_mux_unregister(dev, 1);
1426 cx231xx_i2c_mux_unregister(dev, 0);
1417 cx231xx_i2c_unregister(&dev->i2c_bus[2]); 1427 cx231xx_i2c_unregister(&dev->i2c_bus[2]);
1418 cx231xx_i2c_unregister(&dev->i2c_bus[1]); 1428 cx231xx_i2c_unregister(&dev->i2c_bus[1]);
1419 cx231xx_i2c_unregister(&dev->i2c_bus[0]); 1429 cx231xx_i2c_unregister(&dev->i2c_bus[0]);
@@ -1468,9 +1478,8 @@ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val,
1468 /* call common vendor command request */ 1478 /* call common vendor command request */
1469 status = cx231xx_send_vendor_cmd(dev, &ven_req); 1479 status = cx231xx_send_vendor_cmd(dev, &ven_req);
1470 if (status < 0) { 1480 if (status < 0) {
1471 cx231xx_info 1481 dev_err(dev->dev, "%s: failed with status -%d\n",
1472 ("UsbInterface::sendCommand, failed with status -%d\n", 1482 __func__, status);
1473 status);
1474 } 1483 }
1475 1484
1476 return status; 1485 return status;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 6c7b5e250eed..dd600b994e69 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -19,11 +19,10 @@
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include "cx231xx.h"
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/usb.h>
25 25
26#include "cx231xx.h"
27#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
28#include <media/videobuf-vmalloc.h> 27#include <media/videobuf-vmalloc.h>
29 28
@@ -46,11 +45,6 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
46 45
47DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 46DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
48 47
49#define dprintk(level, fmt, arg...) do { \
50if (debug >= level) \
51 printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
52} while (0)
53
54#define CX231XX_DVB_NUM_BUFS 5 48#define CX231XX_DVB_NUM_BUFS 5
55#define CX231XX_DVB_MAX_PACKETSIZE 564 49#define CX231XX_DVB_MAX_PACKETSIZE 564
56#define CX231XX_DVB_MAX_PACKETS 64 50#define CX231XX_DVB_MAX_PACKETS 64
@@ -197,9 +191,11 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
197 break; 191 break;
198 } 192 }
199 if (packet < 0) { 193 if (packet < 0) {
200 dprintk(1, "URB status %d [%s].\n", status, errmsg); 194 dev_dbg(dev->dev,
195 "URB status %d [%s].\n", status, errmsg);
201 } else { 196 } else {
202 dprintk(1, "URB packet %d, status %d [%s].\n", 197 dev_dbg(dev->dev,
198 "URB packet %d, status %d [%s].\n",
203 packet, status, errmsg); 199 packet, status, errmsg);
204 } 200 }
205} 201}
@@ -265,12 +261,8 @@ static int start_streaming(struct cx231xx_dvb *dvb)
265 struct cx231xx *dev = dvb->adapter.priv; 261 struct cx231xx *dev = dvb->adapter.priv;
266 262
267 if (dev->USE_ISO) { 263 if (dev->USE_ISO) {
268 cx231xx_info("DVB transfer mode is ISO.\n"); 264 dev_dbg(dev->dev, "DVB transfer mode is ISO.\n");
269 mutex_lock(&dev->i2c_lock);
270 cx231xx_enable_i2c_port_3(dev, false);
271 cx231xx_set_alt_setting(dev, INDEX_TS1, 4); 265 cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
272 cx231xx_enable_i2c_port_3(dev, true);
273 mutex_unlock(&dev->i2c_lock);
274 rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE); 266 rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
275 if (rc < 0) 267 if (rc < 0)
276 return rc; 268 return rc;
@@ -280,7 +272,7 @@ static int start_streaming(struct cx231xx_dvb *dvb)
280 dev->ts1_mode.max_pkt_size, 272 dev->ts1_mode.max_pkt_size,
281 dvb_isoc_copy); 273 dvb_isoc_copy);
282 } else { 274 } else {
283 cx231xx_info("DVB transfer mode is BULK.\n"); 275 dev_dbg(dev->dev, "DVB transfer mode is BULK.\n");
284 cx231xx_set_alt_setting(dev, INDEX_TS1, 0); 276 cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
285 rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE); 277 rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
286 if (rc < 0) 278 if (rc < 0)
@@ -378,24 +370,24 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
378 struct xc5000_config cfg; 370 struct xc5000_config cfg;
379 371
380 memset(&cfg, 0, sizeof(cfg)); 372 memset(&cfg, 0, sizeof(cfg));
381 cfg.i2c_adap = &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap; 373 cfg.i2c_adap = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master);
382 cfg.i2c_addr = addr; 374 cfg.i2c_addr = addr;
383 375
384 if (!dev->dvb->frontend) { 376 if (!dev->dvb->frontend) {
385 printk(KERN_ERR "%s/2: dvb frontend not attached. " 377 dev_err(dev->dev, "%s/2: dvb frontend not attached. "
386 "Can't attach xc5000\n", dev->name); 378 "Can't attach xc5000\n", dev->name);
387 return -EINVAL; 379 return -EINVAL;
388 } 380 }
389 381
390 fe = dvb_attach(xc5000_attach, dev->dvb->frontend, &cfg); 382 fe = dvb_attach(xc5000_attach, dev->dvb->frontend, &cfg);
391 if (!fe) { 383 if (!fe) {
392 printk(KERN_ERR "%s/2: xc5000 attach failed\n", dev->name); 384 dev_err(dev->dev, "%s/2: xc5000 attach failed\n", dev->name);
393 dvb_frontend_detach(dev->dvb->frontend); 385 dvb_frontend_detach(dev->dvb->frontend);
394 dev->dvb->frontend = NULL; 386 dev->dvb->frontend = NULL;
395 return -EINVAL; 387 return -EINVAL;
396 } 388 }
397 389
398 printk(KERN_INFO "%s/2: xc5000 attached\n", dev->name); 390 dev_info(dev->dev, "%s/2: xc5000 attached\n", dev->name);
399 391
400 return 0; 392 return 0;
401} 393}
@@ -434,16 +426,17 @@ int cx231xx_reset_analog_tuner(struct cx231xx *dev)
434 426
435 if (dops->init != NULL && !dev->xc_fw_load_done) { 427 if (dops->init != NULL && !dev->xc_fw_load_done) {
436 428
437 cx231xx_info("Reloading firmware for XC5000\n"); 429 dev_dbg(dev->dev,
430 "Reloading firmware for XC5000\n");
438 status = dops->init(dev->dvb->frontend); 431 status = dops->init(dev->dvb->frontend);
439 if (status == 0) { 432 if (status == 0) {
440 dev->xc_fw_load_done = 1; 433 dev->xc_fw_load_done = 1;
441 cx231xx_info 434 dev_dbg(dev->dev,
442 ("XC5000 firmware download completed\n"); 435 "XC5000 firmware download completed\n");
443 } else { 436 } else {
444 dev->xc_fw_load_done = 0; 437 dev->xc_fw_load_done = 0;
445 cx231xx_info 438 dev_dbg(dev->dev,
446 ("XC5000 firmware download failed !!!\n"); 439 "XC5000 firmware download failed !!!\n");
447 } 440 }
448 } 441 }
449 442
@@ -466,7 +459,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
466 result = dvb_register_adapter(&dvb->adapter, dev->name, module, device, 459 result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
467 adapter_nr); 460 adapter_nr);
468 if (result < 0) { 461 if (result < 0) {
469 printk(KERN_WARNING 462 dev_warn(dev->dev,
470 "%s: dvb_register_adapter failed (errno = %d)\n", 463 "%s: dvb_register_adapter failed (errno = %d)\n",
471 dev->name, result); 464 dev->name, result);
472 goto fail_adapter; 465 goto fail_adapter;
@@ -480,7 +473,7 @@ static int register_dvb(struct cx231xx_dvb *dvb,
480 /* register frontend */ 473 /* register frontend */
481 result = dvb_register_frontend(&dvb->adapter, dvb->frontend); 474 result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
482 if (result < 0) { 475 if (result < 0) {
483 printk(KERN_WARNING 476 dev_warn(dev->dev,
484 "%s: dvb_register_frontend failed (errno = %d)\n", 477 "%s: dvb_register_frontend failed (errno = %d)\n",
485 dev->name, result); 478 dev->name, result);
486 goto fail_frontend; 479 goto fail_frontend;
@@ -498,7 +491,8 @@ static int register_dvb(struct cx231xx_dvb *dvb,
498 491
499 result = dvb_dmx_init(&dvb->demux); 492 result = dvb_dmx_init(&dvb->demux);
500 if (result < 0) { 493 if (result < 0) {
501 printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n", 494 dev_warn(dev->dev,
495 "%s: dvb_dmx_init failed (errno = %d)\n",
502 dev->name, result); 496 dev->name, result);
503 goto fail_dmx; 497 goto fail_dmx;
504 } 498 }
@@ -508,15 +502,16 @@ static int register_dvb(struct cx231xx_dvb *dvb,
508 dvb->dmxdev.capabilities = 0; 502 dvb->dmxdev.capabilities = 0;
509 result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); 503 result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
510 if (result < 0) { 504 if (result < 0) {
511 printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n", 505 dev_warn(dev->dev,
512 dev->name, result); 506 "%s: dvb_dmxdev_init failed (errno = %d)\n",
507 dev->name, result);
513 goto fail_dmxdev; 508 goto fail_dmxdev;
514 } 509 }
515 510
516 dvb->fe_hw.source = DMX_FRONTEND_0; 511 dvb->fe_hw.source = DMX_FRONTEND_0;
517 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); 512 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
518 if (result < 0) { 513 if (result < 0) {
519 printk(KERN_WARNING 514 dev_warn(dev->dev,
520 "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n", 515 "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
521 dev->name, result); 516 dev->name, result);
522 goto fail_fe_hw; 517 goto fail_fe_hw;
@@ -525,17 +520,17 @@ static int register_dvb(struct cx231xx_dvb *dvb,
525 dvb->fe_mem.source = DMX_MEMORY_FE; 520 dvb->fe_mem.source = DMX_MEMORY_FE;
526 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); 521 result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
527 if (result < 0) { 522 if (result < 0) {
528 printk(KERN_WARNING 523 dev_warn(dev->dev,
529 "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n", 524 "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
530 dev->name, result); 525 dev->name, result);
531 goto fail_fe_mem; 526 goto fail_fe_mem;
532 } 527 }
533 528
534 result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); 529 result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
535 if (result < 0) { 530 if (result < 0) {
536 printk(KERN_WARNING 531 dev_warn(dev->dev,
537 "%s: connect_frontend failed (errno = %d)\n", dev->name, 532 "%s: connect_frontend failed (errno = %d)\n",
538 result); 533 dev->name, result);
539 goto fail_fe_conn; 534 goto fail_fe_conn;
540 } 535 }
541 536
@@ -583,6 +578,8 @@ static int dvb_init(struct cx231xx *dev)
583{ 578{
584 int result = 0; 579 int result = 0;
585 struct cx231xx_dvb *dvb; 580 struct cx231xx_dvb *dvb;
581 struct i2c_adapter *tuner_i2c;
582 struct i2c_adapter *demod_i2c;
586 583
587 if (!dev->board.has_dvb) { 584 if (!dev->board.has_dvb) {
588 /* This device does not support the extension */ 585 /* This device does not support the extension */
@@ -592,13 +589,16 @@ static int dvb_init(struct cx231xx *dev)
592 dvb = kzalloc(sizeof(struct cx231xx_dvb), GFP_KERNEL); 589 dvb = kzalloc(sizeof(struct cx231xx_dvb), GFP_KERNEL);
593 590
594 if (dvb == NULL) { 591 if (dvb == NULL) {
595 printk(KERN_INFO "cx231xx_dvb: memory allocation failed\n"); 592 dev_info(dev->dev,
593 "cx231xx_dvb: memory allocation failed\n");
596 return -ENOMEM; 594 return -ENOMEM;
597 } 595 }
598 dev->dvb = dvb; 596 dev->dvb = dvb;
599 dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq; 597 dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
600 dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner; 598 dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
601 599
600 tuner_i2c = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master);
601 demod_i2c = cx231xx_get_i2c_adap(dev, dev->board.demod_i2c_master);
602 mutex_lock(&dev->lock); 602 mutex_lock(&dev->lock);
603 cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE); 603 cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
604 cx231xx_demod_reset(dev); 604 cx231xx_demod_reset(dev);
@@ -609,11 +609,11 @@ static int dvb_init(struct cx231xx *dev)
609 609
610 dev->dvb->frontend = dvb_attach(s5h1432_attach, 610 dev->dvb->frontend = dvb_attach(s5h1432_attach,
611 &dvico_s5h1432_config, 611 &dvico_s5h1432_config,
612 &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap); 612 demod_i2c);
613 613
614 if (dev->dvb->frontend == NULL) { 614 if (dev->dvb->frontend == NULL) {
615 printk(DRIVER_NAME 615 dev_err(dev->dev,
616 ": Failed to attach s5h1432 front end\n"); 616 "Failed to attach s5h1432 front end\n");
617 result = -EINVAL; 617 result = -EINVAL;
618 goto out_free; 618 goto out_free;
619 } 619 }
@@ -622,7 +622,7 @@ static int dvb_init(struct cx231xx *dev)
622 dvb->frontend->callback = cx231xx_tuner_callback; 622 dvb->frontend->callback = cx231xx_tuner_callback;
623 623
624 if (!dvb_attach(xc5000_attach, dev->dvb->frontend, 624 if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
625 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 625 tuner_i2c,
626 &cnxt_rde250_tunerconfig)) { 626 &cnxt_rde250_tunerconfig)) {
627 result = -EINVAL; 627 result = -EINVAL;
628 goto out_free; 628 goto out_free;
@@ -634,11 +634,11 @@ static int dvb_init(struct cx231xx *dev)
634 634
635 dev->dvb->frontend = dvb_attach(s5h1411_attach, 635 dev->dvb->frontend = dvb_attach(s5h1411_attach,
636 &xc5000_s5h1411_config, 636 &xc5000_s5h1411_config,
637 &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap); 637 demod_i2c);
638 638
639 if (dev->dvb->frontend == NULL) { 639 if (dev->dvb->frontend == NULL) {
640 printk(DRIVER_NAME 640 dev_err(dev->dev,
641 ": Failed to attach s5h1411 front end\n"); 641 "Failed to attach s5h1411 front end\n");
642 result = -EINVAL; 642 result = -EINVAL;
643 goto out_free; 643 goto out_free;
644 } 644 }
@@ -647,7 +647,7 @@ static int dvb_init(struct cx231xx *dev)
647 dvb->frontend->callback = cx231xx_tuner_callback; 647 dvb->frontend->callback = cx231xx_tuner_callback;
648 648
649 if (!dvb_attach(xc5000_attach, dev->dvb->frontend, 649 if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
650 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 650 tuner_i2c,
651 &cnxt_rdu250_tunerconfig)) { 651 &cnxt_rdu250_tunerconfig)) {
652 result = -EINVAL; 652 result = -EINVAL;
653 goto out_free; 653 goto out_free;
@@ -657,11 +657,11 @@ static int dvb_init(struct cx231xx *dev)
657 657
658 dev->dvb->frontend = dvb_attach(s5h1432_attach, 658 dev->dvb->frontend = dvb_attach(s5h1432_attach,
659 &dvico_s5h1432_config, 659 &dvico_s5h1432_config,
660 &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap); 660 demod_i2c);
661 661
662 if (dev->dvb->frontend == NULL) { 662 if (dev->dvb->frontend == NULL) {
663 printk(DRIVER_NAME 663 dev_err(dev->dev,
664 ": Failed to attach s5h1432 front end\n"); 664 "Failed to attach s5h1432 front end\n");
665 result = -EINVAL; 665 result = -EINVAL;
666 goto out_free; 666 goto out_free;
667 } 667 }
@@ -670,7 +670,7 @@ static int dvb_init(struct cx231xx *dev)
670 dvb->frontend->callback = cx231xx_tuner_callback; 670 dvb->frontend->callback = cx231xx_tuner_callback;
671 671
672 if (!dvb_attach(tda18271_attach, dev->dvb->frontend, 672 if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
673 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 673 0x60, tuner_i2c,
674 &cnxt_rde253s_tunerconfig)) { 674 &cnxt_rde253s_tunerconfig)) {
675 result = -EINVAL; 675 result = -EINVAL;
676 goto out_free; 676 goto out_free;
@@ -681,11 +681,11 @@ static int dvb_init(struct cx231xx *dev)
681 681
682 dev->dvb->frontend = dvb_attach(s5h1411_attach, 682 dev->dvb->frontend = dvb_attach(s5h1411_attach,
683 &tda18271_s5h1411_config, 683 &tda18271_s5h1411_config,
684 &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap); 684 demod_i2c);
685 685
686 if (dev->dvb->frontend == NULL) { 686 if (dev->dvb->frontend == NULL) {
687 printk(DRIVER_NAME 687 dev_err(dev->dev,
688 ": Failed to attach s5h1411 front end\n"); 688 "Failed to attach s5h1411 front end\n");
689 result = -EINVAL; 689 result = -EINVAL;
690 goto out_free; 690 goto out_free;
691 } 691 }
@@ -694,7 +694,7 @@ static int dvb_init(struct cx231xx *dev)
694 dvb->frontend->callback = cx231xx_tuner_callback; 694 dvb->frontend->callback = cx231xx_tuner_callback;
695 695
696 if (!dvb_attach(tda18271_attach, dev->dvb->frontend, 696 if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
697 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 697 0x60, tuner_i2c,
698 &cnxt_rde253s_tunerconfig)) { 698 &cnxt_rde253s_tunerconfig)) {
699 result = -EINVAL; 699 result = -EINVAL;
700 goto out_free; 700 goto out_free;
@@ -702,16 +702,17 @@ static int dvb_init(struct cx231xx *dev)
702 break; 702 break;
703 case CX231XX_BOARD_HAUPPAUGE_EXETER: 703 case CX231XX_BOARD_HAUPPAUGE_EXETER:
704 704
705 printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n", 705 dev_info(dev->dev,
706 __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap)); 706 "%s: looking for tuner / demod on i2c bus: %d\n",
707 __func__, i2c_adapter_id(tuner_i2c));
707 708
708 dev->dvb->frontend = dvb_attach(lgdt3305_attach, 709 dev->dvb->frontend = dvb_attach(lgdt3305_attach,
709 &hcw_lgdt3305_config, 710 &hcw_lgdt3305_config,
710 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap); 711 tuner_i2c);
711 712
712 if (dev->dvb->frontend == NULL) { 713 if (dev->dvb->frontend == NULL) {
713 printk(DRIVER_NAME 714 dev_err(dev->dev,
714 ": Failed to attach LG3305 front end\n"); 715 "Failed to attach LG3305 front end\n");
715 result = -EINVAL; 716 result = -EINVAL;
716 goto out_free; 717 goto out_free;
717 } 718 }
@@ -720,7 +721,7 @@ static int dvb_init(struct cx231xx *dev)
720 dvb->frontend->callback = cx231xx_tuner_callback; 721 dvb->frontend->callback = cx231xx_tuner_callback;
721 722
722 dvb_attach(tda18271_attach, dev->dvb->frontend, 723 dvb_attach(tda18271_attach, dev->dvb->frontend,
723 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 724 0x60, tuner_i2c,
724 &hcw_tda18271_config); 725 &hcw_tda18271_config);
725 break; 726 break;
726 727
@@ -728,12 +729,12 @@ static int dvb_init(struct cx231xx *dev)
728 729
729 dev->dvb->frontend = dvb_attach(si2165_attach, 730 dev->dvb->frontend = dvb_attach(si2165_attach,
730 &hauppauge_930C_HD_1113xx_si2165_config, 731 &hauppauge_930C_HD_1113xx_si2165_config,
731 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap 732 tuner_i2c
732 ); 733 );
733 734
734 if (dev->dvb->frontend == NULL) { 735 if (dev->dvb->frontend == NULL) {
735 printk(DRIVER_NAME 736 dev_err(dev->dev,
736 ": Failed to attach SI2165 front end\n"); 737 "Failed to attach SI2165 front end\n");
737 result = -EINVAL; 738 result = -EINVAL;
738 goto out_free; 739 goto out_free;
739 } 740 }
@@ -745,7 +746,7 @@ static int dvb_init(struct cx231xx *dev)
745 746
746 dvb_attach(tda18271_attach, dev->dvb->frontend, 747 dvb_attach(tda18271_attach, dev->dvb->frontend,
747 0x60, 748 0x60,
748 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 749 tuner_i2c,
749 &hcw_tda18271_config); 750 &hcw_tda18271_config);
750 751
751 dev->cx231xx_reset_analog_tuner = NULL; 752 dev->cx231xx_reset_analog_tuner = NULL;
@@ -761,12 +762,12 @@ static int dvb_init(struct cx231xx *dev)
761 762
762 dev->dvb->frontend = dvb_attach(si2165_attach, 763 dev->dvb->frontend = dvb_attach(si2165_attach,
763 &pctv_quatro_stick_1114xx_si2165_config, 764 &pctv_quatro_stick_1114xx_si2165_config,
764 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap 765 tuner_i2c
765 ); 766 );
766 767
767 if (dev->dvb->frontend == NULL) { 768 if (dev->dvb->frontend == NULL) {
768 printk(DRIVER_NAME 769 dev_err(dev->dev,
769 ": Failed to attach SI2165 front end\n"); 770 "Failed to attach SI2165 front end\n");
770 result = -EINVAL; 771 result = -EINVAL;
771 goto out_free; 772 goto out_free;
772 } 773 }
@@ -786,7 +787,7 @@ static int dvb_init(struct cx231xx *dev)
786 request_module("si2157"); 787 request_module("si2157");
787 788
788 client = i2c_new_device( 789 client = i2c_new_device(
789 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 790 tuner_i2c,
790 &info); 791 &info);
791 if (client == NULL || client->dev.driver == NULL) { 792 if (client == NULL || client->dev.driver == NULL) {
792 dvb_frontend_detach(dev->dvb->frontend); 793 dvb_frontend_detach(dev->dvb->frontend);
@@ -810,16 +811,17 @@ static int dvb_init(struct cx231xx *dev)
810 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: 811 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
811 case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID: 812 case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID:
812 813
813 printk(KERN_INFO "%s: looking for demod on i2c bus: %d\n", 814 dev_info(dev->dev,
814 __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap)); 815 "%s: looking for demod on i2c bus: %d\n",
816 __func__, i2c_adapter_id(tuner_i2c));
815 817
816 dev->dvb->frontend = dvb_attach(mb86a20s_attach, 818 dev->dvb->frontend = dvb_attach(mb86a20s_attach,
817 &pv_mb86a20s_config, 819 &pv_mb86a20s_config,
818 &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap); 820 demod_i2c);
819 821
820 if (dev->dvb->frontend == NULL) { 822 if (dev->dvb->frontend == NULL) {
821 printk(DRIVER_NAME 823 dev_err(dev->dev,
822 ": Failed to attach mb86a20s demod\n"); 824 "Failed to attach mb86a20s demod\n");
823 result = -EINVAL; 825 result = -EINVAL;
824 goto out_free; 826 goto out_free;
825 } 827 }
@@ -828,30 +830,31 @@ static int dvb_init(struct cx231xx *dev)
828 dvb->frontend->callback = cx231xx_tuner_callback; 830 dvb->frontend->callback = cx231xx_tuner_callback;
829 831
830 dvb_attach(tda18271_attach, dev->dvb->frontend, 832 dvb_attach(tda18271_attach, dev->dvb->frontend,
831 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 833 0x60, tuner_i2c,
832 &pv_tda18271_config); 834 &pv_tda18271_config);
833 break; 835 break;
834 836
835 default: 837 default:
836 printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card" 838 dev_err(dev->dev,
837 " isn't supported yet\n", dev->name); 839 "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
840 dev->name);
838 break; 841 break;
839 } 842 }
840 if (NULL == dvb->frontend) { 843 if (NULL == dvb->frontend) {
841 printk(KERN_ERR 844 dev_err(dev->dev,
842 "%s/2: frontend initialization failed\n", dev->name); 845 "%s/2: frontend initialization failed\n", dev->name);
843 result = -EINVAL; 846 result = -EINVAL;
844 goto out_free; 847 goto out_free;
845 } 848 }
846 849
847 /* register everything */ 850 /* register everything */
848 result = register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev); 851 result = register_dvb(dvb, THIS_MODULE, dev, dev->dev);
849 852
850 if (result < 0) 853 if (result < 0)
851 goto out_free; 854 goto out_free;
852 855
853 856
854 printk(KERN_INFO "Successfully loaded cx231xx-dvb\n"); 857 dev_info(dev->dev, "Successfully loaded cx231xx-dvb\n");
855 858
856ret: 859ret:
857 cx231xx_set_mode(dev, CX231XX_SUSPEND); 860 cx231xx_set_mode(dev, CX231XX_SUSPEND);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 7c0f797f1057..a29c345b027d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -20,14 +20,14 @@
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include "cx231xx.h"
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/usb.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c-mux.h>
27#include <media/v4l2-common.h> 28#include <media/v4l2-common.h>
28#include <media/tuner.h> 29#include <media/tuner.h>
29 30
30#include "cx231xx.h"
31 31
32/* ----------------------------------------------------------- */ 32/* ----------------------------------------------------------- */
33 33
@@ -54,10 +54,19 @@ do { \
54 } \ 54 } \
55} while (0) 55} while (0)
56 56
57static inline int get_real_i2c_port(struct cx231xx *dev, int bus_nr)
58{
59 if (bus_nr == 1)
60 return dev->port_3_switch_enabled ? I2C_1_MUX_3 : I2C_1_MUX_1;
61 return bus_nr;
62}
63
57static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus, 64static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus,
58 const struct i2c_msg *msg, int tuner_type) 65 const struct i2c_msg *msg, int tuner_type)
59{ 66{
60 if (bus->nr != dev->board.tuner_i2c_master) 67 int i2c_port = get_real_i2c_port(dev, bus->nr);
68
69 if (i2c_port != dev->board.tuner_i2c_master)
61 return false; 70 return false;
62 71
63 if (msg->addr != dev->board.tuner_addr) 72 if (msg->addr != dev->board.tuner_addr)
@@ -340,14 +349,15 @@ static int cx231xx_i2c_check_for_device(struct i2c_adapter *i2c_adap,
340 struct cx231xx *dev = bus->dev; 349 struct cx231xx *dev = bus->dev;
341 struct cx231xx_i2c_xfer_data req_data; 350 struct cx231xx_i2c_xfer_data req_data;
342 int status = 0; 351 int status = 0;
352 u8 buf[1];
343 353
344 /* prepare xfer_data struct */ 354 /* prepare xfer_data struct */
345 req_data.dev_addr = msg->addr; 355 req_data.dev_addr = msg->addr;
346 req_data.direction = msg->flags; 356 req_data.direction = I2C_M_RD;
347 req_data.saddr_len = 0; 357 req_data.saddr_len = 0;
348 req_data.saddr_dat = 0; 358 req_data.saddr_dat = 0;
349 req_data.buf_size = 0; 359 req_data.buf_size = 1;
350 req_data.p_buffer = NULL; 360 req_data.p_buffer = buf;
351 361
352 /* usb send command */ 362 /* usb send command */
353 status = dev->cx231xx_send_usb_command(bus, &req_data); 363 status = dev->cx231xx_send_usb_command(bus, &req_data);
@@ -455,17 +465,14 @@ static struct i2c_adapter cx231xx_adap_template = {
455 .algo = &cx231xx_algo, 465 .algo = &cx231xx_algo,
456}; 466};
457 467
458static struct i2c_client cx231xx_client_template = {
459 .name = "cx231xx internal",
460};
461
462/* ----------------------------------------------------------- */ 468/* ----------------------------------------------------------- */
463 469
464/* 470/*
465 * i2c_devs 471 * i2c_devs
466 * incomplete list of known devices 472 * incomplete list of known devices
467 */ 473 */
468static char *i2c_devs[128] = { 474static const char *i2c_devs[128] = {
475 [0x20 >> 1] = "demod",
469 [0x60 >> 1] = "colibri", 476 [0x60 >> 1] = "colibri",
470 [0x88 >> 1] = "hammerhead", 477 [0x88 >> 1] = "hammerhead",
471 [0x8e >> 1] = "CIR", 478 [0x8e >> 1] = "CIR",
@@ -480,22 +487,34 @@ static char *i2c_devs[128] = {
480 * cx231xx_do_i2c_scan() 487 * cx231xx_do_i2c_scan()
481 * check i2c address range for devices 488 * check i2c address range for devices
482 */ 489 */
483void cx231xx_do_i2c_scan(struct cx231xx *dev, struct i2c_client *c) 490void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port)
484{ 491{
485 unsigned char buf; 492 unsigned char buf;
486 int i, rc; 493 int i, rc;
494 struct i2c_client client;
495
496 if (!i2c_scan)
497 return;
498
499 /* Don't generate I2C errors during scan */
500 dev->i2c_scan_running = true;
501
502 memset(&client, 0, sizeof(client));
503 client.adapter = cx231xx_get_i2c_adap(dev, i2c_port);
487 504
488 cx231xx_info(": Checking for I2C devices ..\n");
489 for (i = 0; i < 128; i++) { 505 for (i = 0; i < 128; i++) {
490 c->addr = i; 506 client.addr = i;
491 rc = i2c_master_recv(c, &buf, 0); 507 rc = i2c_master_recv(&client, &buf, 0);
492 if (rc < 0) 508 if (rc < 0)
493 continue; 509 continue;
494 cx231xx_info("%s: i2c scan: found device @ 0x%x [%s]\n", 510 dev_info(dev->dev,
495 dev->name, i << 1, 511 "i2c scan: found device @ port %d addr 0x%x [%s]\n",
496 i2c_devs[i] ? i2c_devs[i] : "???"); 512 i2c_port,
513 i << 1,
514 i2c_devs[i] ? i2c_devs[i] : "???");
497 } 515 }
498 cx231xx_info(": Completed Checking for I2C devices.\n"); 516
517 dev->i2c_scan_running = false;
499} 518}
500 519
501/* 520/*
@@ -509,23 +528,17 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
509 BUG_ON(!dev->cx231xx_send_usb_command); 528 BUG_ON(!dev->cx231xx_send_usb_command);
510 529
511 bus->i2c_adap = cx231xx_adap_template; 530 bus->i2c_adap = cx231xx_adap_template;
512 bus->i2c_client = cx231xx_client_template; 531 bus->i2c_adap.dev.parent = dev->dev;
513 bus->i2c_adap.dev.parent = &dev->udev->dev;
514 532
515 strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name)); 533 snprintf(bus->i2c_adap.name, sizeof(bus->i2c_adap.name), "%s-%d", bus->dev->name, bus->nr);
516 534
517 bus->i2c_adap.algo_data = bus; 535 bus->i2c_adap.algo_data = bus;
518 i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev); 536 i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
519 i2c_add_adapter(&bus->i2c_adap); 537 i2c_add_adapter(&bus->i2c_adap);
520 538
521 bus->i2c_client.adapter = &bus->i2c_adap; 539 if (0 != bus->i2c_rc)
522 540 dev_warn(dev->dev,
523 if (0 == bus->i2c_rc) { 541 "i2c bus %d register FAILED\n", bus->nr);
524 if (i2c_scan)
525 cx231xx_do_i2c_scan(dev, &bus->i2c_client);
526 } else
527 cx231xx_warn("%s: i2c bus %d register FAILED\n",
528 dev->name, bus->nr);
529 542
530 return bus->i2c_rc; 543 return bus->i2c_rc;
531} 544}
@@ -539,3 +552,62 @@ int cx231xx_i2c_unregister(struct cx231xx_i2c *bus)
539 i2c_del_adapter(&bus->i2c_adap); 552 i2c_del_adapter(&bus->i2c_adap);
540 return 0; 553 return 0;
541} 554}
555
556/*
557 * cx231xx_i2c_mux_select()
558 * switch i2c master number 1 between port1 and port3
559 */
560static int cx231xx_i2c_mux_select(struct i2c_adapter *adap,
561 void *mux_priv, u32 chan_id)
562{
563 struct cx231xx *dev = mux_priv;
564
565 return cx231xx_enable_i2c_port_3(dev, chan_id);
566}
567
568int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
569{
570 struct i2c_adapter *i2c_parent = &dev->i2c_bus[1].i2c_adap;
571 /* what is the correct mux_dev? */
572 struct device *mux_dev = dev->dev;
573
574 dev->i2c_mux_adap[mux_no] = i2c_add_mux_adapter(i2c_parent,
575 mux_dev,
576 dev /* mux_priv */,
577 0,
578 mux_no /* chan_id */,
579 0 /* class */,
580 &cx231xx_i2c_mux_select,
581 NULL);
582
583 if (!dev->i2c_mux_adap[mux_no])
584 dev_warn(dev->dev,
585 "i2c mux %d register FAILED\n", mux_no);
586
587 return 0;
588}
589
590void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no)
591{
592 i2c_del_mux_adapter(dev->i2c_mux_adap[mux_no]);
593 dev->i2c_mux_adap[mux_no] = NULL;
594}
595
596struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
597{
598 switch (i2c_port) {
599 case I2C_0:
600 return &dev->i2c_bus[0].i2c_adap;
601 case I2C_1:
602 return &dev->i2c_bus[1].i2c_adap;
603 case I2C_2:
604 return &dev->i2c_bus[2].i2c_adap;
605 case I2C_1_MUX_1:
606 return dev->i2c_mux_adap[0];
607 case I2C_1_MUX_3:
608 return dev->i2c_mux_adap[1];
609 default:
610 return NULL;
611 }
612}
613EXPORT_SYMBOL_GPL(cx231xx_get_i2c_adap);
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 05f0434919d4..15d8d1b5f05c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -19,7 +19,6 @@
19 */ 19 */
20 20
21#include "cx231xx.h" 21#include "cx231xx.h"
22#include <linux/usb.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/bitrev.h> 23#include <linux/bitrev.h>
25 24
@@ -63,7 +62,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
63 struct i2c_board_info info; 62 struct i2c_board_info info;
64 u8 ir_i2c_bus; 63 u8 ir_i2c_bus;
65 64
66 dev_dbg(&dev->udev->dev, "%s\n", __func__); 65 dev_dbg(dev->dev, "%s\n", __func__);
67 66
68 /* Only initialize if a rc keycode map is defined */ 67 /* Only initialize if a rc keycode map is defined */
69 if (!cx231xx_boards[dev->model].rc_map_name) 68 if (!cx231xx_boards[dev->model].rc_map_name)
@@ -98,9 +97,10 @@ int cx231xx_ir_init(struct cx231xx *dev)
98 97
99 /* Load and bind ir-kbd-i2c */ 98 /* Load and bind ir-kbd-i2c */
100 ir_i2c_bus = cx231xx_boards[dev->model].ir_i2c_master; 99 ir_i2c_bus = cx231xx_boards[dev->model].ir_i2c_master;
101 dev_dbg(&dev->udev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n", 100 dev_dbg(dev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n",
102 ir_i2c_bus, info.addr); 101 ir_i2c_bus, info.addr);
103 dev->ir_i2c_client = i2c_new_device(&dev->i2c_bus[ir_i2c_bus].i2c_adap, &info); 102 dev->ir_i2c_client = i2c_new_device(
103 cx231xx_get_i2c_adap(dev, ir_i2c_bus), &info);
104 104
105 return 0; 105 return 0;
106} 106}
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
index 3052c4c20229..5bc74149fcb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.c
@@ -703,9 +703,9 @@ int initialize_cx231xx(struct cx231xx *dev)
703 _current_scenario_idx = INDEX_BUSPOWER_DIF_ONLY; 703 _current_scenario_idx = INDEX_BUSPOWER_DIF_ONLY;
704 break; 704 break;
705 default: 705 default:
706 cx231xx_info("bad config in buspower!!!!\n"); 706 dev_err(dev->dev,
707 cx231xx_info("config_info=%x\n", 707 "bad config in buspower!!!!\nconfig_info=%x\n",
708 (config_info & BUSPOWER_MASK)); 708 config_info & BUSPOWER_MASK);
709 return 1; 709 return 1;
710 } 710 }
711 } else { /* self-power */ 711 } else { /* self-power */
@@ -768,9 +768,9 @@ int initialize_cx231xx(struct cx231xx *dev)
768 _current_scenario_idx = INDEX_SELFPOWER_COMPRESSOR; 768 _current_scenario_idx = INDEX_SELFPOWER_COMPRESSOR;
769 break; 769 break;
770 default: 770 default:
771 cx231xx_info("bad senario!!!!!\n"); 771 dev_err(dev->dev,
772 cx231xx_info("config_info=%x\n", 772 "bad senario!!!!!\nconfig_info=%x\n",
773 (config_info & SELFPOWER_MASK)); 773 config_info & SELFPOWER_MASK);
774 return -ENODEV; 774 return -ENODEV;
775 } 775 }
776 } 776 }
@@ -781,18 +781,29 @@ int initialize_cx231xx(struct cx231xx *dev)
781 sizeof(struct pcb_config)); 781 sizeof(struct pcb_config));
782 782
783 if (pcb_debug) { 783 if (pcb_debug) {
784 cx231xx_info("SC(0x00) register = 0x%x\n", config_info); 784 dev_info(dev->dev,
785 cx231xx_info("scenario %d\n", 785 "SC(0x00) register = 0x%x\n", config_info);
786 (dev->current_pcb_config.index) + 1); 786 dev_info(dev->dev,
787 cx231xx_info("type=%x\n", dev->current_pcb_config.type); 787 "scenario %d\n",
788 cx231xx_info("mode=%x\n", dev->current_pcb_config.mode); 788 (dev->current_pcb_config.index) + 1);
789 cx231xx_info("speed=%x\n", dev->current_pcb_config.speed); 789 dev_info(dev->dev,
790 cx231xx_info("ts1_source=%x\n", 790 "type=%x\n",
791 dev->current_pcb_config.ts1_source); 791 dev->current_pcb_config.type);
792 cx231xx_info("ts2_source=%x\n", 792 dev_info(dev->dev,
793 dev->current_pcb_config.ts2_source); 793 "mode=%x\n",
794 cx231xx_info("analog_source=%x\n", 794 dev->current_pcb_config.mode);
795 dev->current_pcb_config.analog_source); 795 dev_info(dev->dev,
796 "speed=%x\n",
797 dev->current_pcb_config.speed);
798 dev_info(dev->dev,
799 "ts1_source=%x\n",
800 dev->current_pcb_config.ts1_source);
801 dev_info(dev->dev,
802 "ts2_source=%x\n",
803 dev->current_pcb_config.ts2_source);
804 dev_info(dev->dev,
805 "analog_source=%x\n",
806 dev->current_pcb_config.analog_source);
796 } 807 }
797 808
798 return 0; 809 return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index c02794274f51..80261ac40208 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -19,12 +19,12 @@
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22#include "cx231xx.h"
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/list.h> 24#include <linux/list.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/bitmap.h> 27#include <linux/bitmap.h>
27#include <linux/usb.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
@@ -35,7 +35,6 @@
35#include <media/msp3400.h> 35#include <media/msp3400.h>
36#include <media/tuner.h> 36#include <media/tuner.h>
37 37
38#include "cx231xx.h"
39#include "cx231xx-vbi.h" 38#include "cx231xx-vbi.h"
40 39
41static inline void print_err_status(struct cx231xx *dev, int packet, int status) 40static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -69,11 +68,12 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
69 break; 68 break;
70 } 69 }
71 if (packet < 0) { 70 if (packet < 0) {
72 cx231xx_err("URB status %d [%s].\n", status, 71 dev_err(dev->dev,
73 errmsg); 72 "URB status %d [%s].\n", status, errmsg);
74 } else { 73 } else {
75 cx231xx_err("URB packet %d, status %d [%s].\n", 74 dev_err(dev->dev,
76 packet, status, errmsg); 75 "URB packet %d, status %d [%s].\n",
76 packet, status, errmsg);
77 } 77 }
78} 78}
79 79
@@ -316,8 +316,8 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
316 case -ESHUTDOWN: 316 case -ESHUTDOWN:
317 return; 317 return;
318 default: /* error */ 318 default: /* error */
319 cx231xx_err("urb completition error %d.\n", 319 dev_err(dev->dev,
320 urb->status); 320 "urb completition error %d.\n", urb->status);
321 break; 321 break;
322 } 322 }
323 323
@@ -331,8 +331,8 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
331 331
332 urb->status = usb_submit_urb(urb, GFP_ATOMIC); 332 urb->status = usb_submit_urb(urb, GFP_ATOMIC);
333 if (urb->status) { 333 if (urb->status) {
334 cx231xx_err("urb resubmit failed (error=%i)\n", 334 dev_err(dev->dev, "urb resubmit failed (error=%i)\n",
335 urb->status); 335 urb->status);
336 } 336 }
337} 337}
338 338
@@ -344,7 +344,7 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
344 struct urb *urb; 344 struct urb *urb;
345 int i; 345 int i;
346 346
347 cx231xx_info("called cx231xx_uninit_vbi_isoc\n"); 347 dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n");
348 348
349 dev->vbi_mode.bulk_ctl.nfields = -1; 349 dev->vbi_mode.bulk_ctl.nfields = -1;
350 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 350 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
@@ -393,7 +393,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
393 struct urb *urb; 393 struct urb *urb;
394 int rc; 394 int rc;
395 395
396 cx231xx_info("called cx231xx_vbi_isoc\n"); 396 dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n");
397 397
398 /* De-allocates all pending stuff */ 398 /* De-allocates all pending stuff */
399 cx231xx_uninit_vbi_isoc(dev); 399 cx231xx_uninit_vbi_isoc(dev);
@@ -419,14 +419,16 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
419 dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs, 419 dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
420 GFP_KERNEL); 420 GFP_KERNEL);
421 if (!dev->vbi_mode.bulk_ctl.urb) { 421 if (!dev->vbi_mode.bulk_ctl.urb) {
422 cx231xx_errdev("cannot alloc memory for usb buffers\n"); 422 dev_err(dev->dev,
423 "cannot alloc memory for usb buffers\n");
423 return -ENOMEM; 424 return -ENOMEM;
424 } 425 }
425 426
426 dev->vbi_mode.bulk_ctl.transfer_buffer = 427 dev->vbi_mode.bulk_ctl.transfer_buffer =
427 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 428 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
428 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { 429 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
429 cx231xx_errdev("cannot allocate memory for usbtransfer\n"); 430 dev_err(dev->dev,
431 "cannot allocate memory for usbtransfer\n");
430 kfree(dev->vbi_mode.bulk_ctl.urb); 432 kfree(dev->vbi_mode.bulk_ctl.urb);
431 return -ENOMEM; 433 return -ENOMEM;
432 } 434 }
@@ -441,7 +443,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
441 443
442 urb = usb_alloc_urb(0, GFP_KERNEL); 444 urb = usb_alloc_urb(0, GFP_KERNEL);
443 if (!urb) { 445 if (!urb) {
444 cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i); 446 dev_err(dev->dev,
447 "cannot alloc bulk_ctl.urb %i\n", i);
445 cx231xx_uninit_vbi_isoc(dev); 448 cx231xx_uninit_vbi_isoc(dev);
446 return -ENOMEM; 449 return -ENOMEM;
447 } 450 }
@@ -451,9 +454,10 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
451 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 454 dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
452 kzalloc(sb_size, GFP_KERNEL); 455 kzalloc(sb_size, GFP_KERNEL);
453 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 456 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
454 cx231xx_err("unable to allocate %i bytes for transfer" 457 dev_err(dev->dev,
455 " buffer %i%s\n", sb_size, i, 458 "unable to allocate %i bytes for transfer buffer %i%s\n",
456 in_interrupt() ? " while in int" : ""); 459 sb_size, i,
460 in_interrupt() ? " while in int" : "");
457 cx231xx_uninit_vbi_isoc(dev); 461 cx231xx_uninit_vbi_isoc(dev);
458 return -ENOMEM; 462 return -ENOMEM;
459 } 463 }
@@ -470,8 +474,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
470 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 474 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
471 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); 475 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
472 if (rc) { 476 if (rc) {
473 cx231xx_err("submit of urb %i failed (error=%i)\n", i, 477 dev_err(dev->dev,
474 rc); 478 "submit of urb %i failed (error=%i)\n", i, rc);
475 cx231xx_uninit_vbi_isoc(dev); 479 cx231xx_uninit_vbi_isoc(dev);
476 return rc; 480 return rc;
477 } 481 }
@@ -522,7 +526,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
522 struct cx231xx_buffer *buf) 526 struct cx231xx_buffer *buf)
523{ 527{
524 /* Advice that buffer was filled */ 528 /* Advice that buffer was filled */
525 /* cx231xx_info("[%p/%d] wakeup\n", buf, buf->vb.i); */ 529 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
526 530
527 buf->vb.state = VIDEOBUF_DONE; 531 buf->vb.state = VIDEOBUF_DONE;
528 buf->vb.field_count++; 532 buf->vb.field_count++;
@@ -614,7 +618,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
614 char *outp; 618 char *outp;
615 619
616 if (list_empty(&dma_q->active)) { 620 if (list_empty(&dma_q->active)) {
617 cx231xx_err("No active queue to serve\n"); 621 dev_err(dev->dev, "No active queue to serve\n");
618 dev->vbi_mode.bulk_ctl.buf = NULL; 622 dev->vbi_mode.bulk_ctl.buf = NULL;
619 *buf = NULL; 623 *buf = NULL;
620 return; 624 return;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 3b3ada6562ca..53ca12c1ff69 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -22,12 +22,12 @@
22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#include "cx231xx.h"
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/list.h> 27#include <linux/list.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/bitmap.h> 30#include <linux/bitmap.h>
30#include <linux/usb.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
@@ -41,10 +41,9 @@
41 41
42#include "dvb_frontend.h" 42#include "dvb_frontend.h"
43 43
44#include "cx231xx.h"
45#include "cx231xx-vbi.h" 44#include "cx231xx-vbi.h"
46 45
47#define CX231XX_VERSION "0.0.2" 46#define CX231XX_VERSION "0.0.3"
48 47
49#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>" 48#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
50#define DRIVER_DESC "Conexant cx231xx based USB video device driver" 49#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
@@ -737,8 +736,9 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
737 if (!dev->video_mode.bulk_ctl.num_bufs) 736 if (!dev->video_mode.bulk_ctl.num_bufs)
738 urb_init = 1; 737 urb_init = 1;
739 } 738 }
740 /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n", 739 dev_dbg(dev->dev,
741 urb_init, dev->video_mode.max_pkt_size);*/ 740 "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
741 urb_init, dev->video_mode.max_pkt_size);
742 if (urb_init) { 742 if (urb_init) {
743 dev->mode_tv = 0; 743 dev->mode_tv = 0;
744 if (dev->USE_ISO) 744 if (dev->USE_ISO)
@@ -809,7 +809,7 @@ void video_mux(struct cx231xx *dev, int index)
809 809
810 cx231xx_set_audio_input(dev, dev->ctl_ainput); 810 cx231xx_set_audio_input(dev, dev->ctl_ainput);
811 811
812 cx231xx_info("video_mux : %d\n", index); 812 dev_dbg(dev->dev, "video_mux : %d\n", index);
813 813
814 /* do mode control overrides if required */ 814 /* do mode control overrides if required */
815 cx231xx_do_mode_ctrl_overrides(dev); 815 cx231xx_do_mode_ctrl_overrides(dev);
@@ -861,7 +861,7 @@ static void res_free(struct cx231xx_fh *fh)
861static int check_dev(struct cx231xx *dev) 861static int check_dev(struct cx231xx *dev)
862{ 862{
863 if (dev->state & DEV_DISCONNECTED) { 863 if (dev->state & DEV_DISCONNECTED) {
864 cx231xx_errdev("v4l2 ioctl: device not present\n"); 864 dev_err(dev->dev, "v4l2 ioctl: device not present\n");
865 return -ENODEV; 865 return -ENODEV;
866 } 866 }
867 return 0; 867 return 0;
@@ -953,12 +953,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
953 return -EINVAL; 953 return -EINVAL;
954 954
955 if (videobuf_queue_is_busy(&fh->vb_vidq)) { 955 if (videobuf_queue_is_busy(&fh->vb_vidq)) {
956 cx231xx_errdev("%s queue busy\n", __func__); 956 dev_err(dev->dev, "%s: queue busy\n", __func__);
957 return -EBUSY; 957 return -EBUSY;
958 } 958 }
959 959
960 if (dev->stream_on && !fh->stream_on) { 960 if (dev->stream_on && !fh->stream_on) {
961 cx231xx_errdev("%s device in use by another fh\n", __func__); 961 dev_err(dev->dev,
962 "%s: device in use by another fh\n", __func__);
962 return -EBUSY; 963 return -EBUSY;
963 } 964 }
964 965
@@ -967,7 +968,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
967 dev->height = f->fmt.pix.height; 968 dev->height = f->fmt.pix.height;
968 dev->format = fmt; 969 dev->format = fmt;
969 970
970 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); 971 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
971 call_all(dev, video, s_mbus_fmt, &mbus_fmt); 972 call_all(dev, video, s_mbus_fmt, &mbus_fmt);
972 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); 973 v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
973 974
@@ -1012,7 +1013,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1012 resolution (since a standard change effects things like the number 1013 resolution (since a standard change effects things like the number
1013 of lines in VACT, etc) */ 1014 of lines in VACT, etc) */
1014 memset(&mbus_fmt, 0, sizeof(mbus_fmt)); 1015 memset(&mbus_fmt, 0, sizeof(mbus_fmt));
1015 mbus_fmt.code = V4L2_MBUS_FMT_FIXED; 1016 mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
1016 mbus_fmt.width = dev->width; 1017 mbus_fmt.width = dev->width;
1017 mbus_fmt.height = dev->height; 1018 mbus_fmt.height = dev->height;
1018 call_all(dev, video, s_mbus_fmt, &mbus_fmt); 1019 call_all(dev, video, s_mbus_fmt, &mbus_fmt);
@@ -1176,9 +1177,9 @@ int cx231xx_s_frequency(struct file *file, void *priv,
1176 int rc; 1177 int rc;
1177 u32 if_frequency = 5400000; 1178 u32 if_frequency = 5400000;
1178 1179
1179 cx231xx_info("Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n", 1180 dev_dbg(dev->dev,
1180 f->frequency, f->type); 1181 "Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
1181 /*cx231xx_info("f->type: 1-radio 2-analogTV 3-digitalTV\n");*/ 1182 f->frequency, f->type);
1182 1183
1183 rc = check_dev(dev); 1184 rc = check_dev(dev);
1184 if (rc < 0) 1185 if (rc < 0)
@@ -1213,13 +1214,14 @@ int cx231xx_s_frequency(struct file *file, void *priv,
1213 else if (dev->norm & V4L2_STD_SECAM_LC) 1214 else if (dev->norm & V4L2_STD_SECAM_LC)
1214 if_frequency = 1250000; /*1.25MHz */ 1215 if_frequency = 1250000; /*1.25MHz */
1215 1216
1216 cx231xx_info("if_frequency is set to %d\n", if_frequency); 1217 dev_dbg(dev->dev,
1218 "if_frequency is set to %d\n", if_frequency);
1217 cx231xx_set_Colibri_For_LowIF(dev, if_frequency, 1, 1); 1219 cx231xx_set_Colibri_For_LowIF(dev, if_frequency, 1, 1);
1218 1220
1219 update_HH_register_after_set_DIF(dev); 1221 update_HH_register_after_set_DIF(dev);
1220 } 1222 }
1221 1223
1222 cx231xx_info("Set New FREQUENCY to %d\n", f->frequency); 1224 dev_dbg(dev->dev, "Set New FREQUENCY to %d\n", f->frequency);
1223 1225
1224 return rc; 1226 return rc;
1225} 1227}
@@ -1523,7 +1525,8 @@ static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
1523 struct cx231xx *dev = fh->dev; 1525 struct cx231xx *dev = fh->dev;
1524 1526
1525 if (dev->vbi_stream_on && !fh->stream_on) { 1527 if (dev->vbi_stream_on && !fh->stream_on) {
1526 cx231xx_errdev("%s device in use by another fh\n", __func__); 1528 dev_err(dev->dev,
1529 "%s device in use by another fh\n", __func__);
1527 return -EBUSY; 1530 return -EBUSY;
1528 } 1531 }
1529 return vidioc_try_fmt_vbi_cap(file, priv, f); 1532 return vidioc_try_fmt_vbi_cap(file, priv, f);
@@ -1642,17 +1645,15 @@ static int cx231xx_v4l2_open(struct file *filp)
1642#if 0 1645#if 0
1643 errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); 1646 errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
1644 if (errCode < 0) { 1647 if (errCode < 0) {
1645 cx231xx_errdev 1648 dev_err(dev->dev,
1646 ("Device locked on digital mode. Can't open analog\n"); 1649 "Device locked on digital mode. Can't open analog\n");
1647 return -EBUSY; 1650 return -EBUSY;
1648 } 1651 }
1649#endif 1652#endif
1650 1653
1651 fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL); 1654 fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
1652 if (!fh) { 1655 if (!fh)
1653 cx231xx_errdev("cx231xx-video.c: Out of memory?!\n");
1654 return -ENOMEM; 1656 return -ENOMEM;
1655 }
1656 if (mutex_lock_interruptible(&dev->lock)) { 1657 if (mutex_lock_interruptible(&dev->lock)) {
1657 kfree(fh); 1658 kfree(fh);
1658 return -ERESTARTSYS; 1659 return -ERESTARTSYS;
@@ -1736,8 +1737,8 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
1736 dev->radio_dev = NULL; 1737 dev->radio_dev = NULL;
1737 } 1738 }
1738 if (dev->vbi_dev) { 1739 if (dev->vbi_dev) {
1739 cx231xx_info("V4L2 device %s deregistered\n", 1740 dev_info(dev->dev, "V4L2 device %s deregistered\n",
1740 video_device_node_name(dev->vbi_dev)); 1741 video_device_node_name(dev->vbi_dev));
1741 if (video_is_registered(dev->vbi_dev)) 1742 if (video_is_registered(dev->vbi_dev))
1742 video_unregister_device(dev->vbi_dev); 1743 video_unregister_device(dev->vbi_dev);
1743 else 1744 else
@@ -1745,8 +1746,8 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
1745 dev->vbi_dev = NULL; 1746 dev->vbi_dev = NULL;
1746 } 1747 }
1747 if (dev->vdev) { 1748 if (dev->vdev) {
1748 cx231xx_info("V4L2 device %s deregistered\n", 1749 dev_info(dev->dev, "V4L2 device %s deregistered\n",
1749 video_device_node_name(dev->vdev)); 1750 video_device_node_name(dev->vdev));
1750 1751
1751 if (dev->board.has_417) 1752 if (dev->board.has_417)
1752 cx231xx_417_unregister(dev); 1753 cx231xx_417_unregister(dev);
@@ -2080,8 +2081,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2080{ 2081{
2081 int ret; 2082 int ret;
2082 2083
2083 cx231xx_info("%s: v4l2 driver version %s\n", 2084 dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
2084 dev->name, CX231XX_VERSION);
2085 2085
2086 /* set default norm */ 2086 /* set default norm */
2087 dev->norm = V4L2_STD_PAL; 2087 dev->norm = V4L2_STD_PAL;
@@ -2119,7 +2119,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2119 /* allocate and fill video video_device struct */ 2119 /* allocate and fill video video_device struct */
2120 dev->vdev = cx231xx_vdev_init(dev, &cx231xx_video_template, "video"); 2120 dev->vdev = cx231xx_vdev_init(dev, &cx231xx_video_template, "video");
2121 if (!dev->vdev) { 2121 if (!dev->vdev) {
2122 cx231xx_errdev("cannot allocate video_device.\n"); 2122 dev_err(dev->dev, "cannot allocate video_device.\n");
2123 return -ENODEV; 2123 return -ENODEV;
2124 } 2124 }
2125 2125
@@ -2128,13 +2128,14 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2128 ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER, 2128 ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
2129 video_nr[dev->devno]); 2129 video_nr[dev->devno]);
2130 if (ret) { 2130 if (ret) {
2131 cx231xx_errdev("unable to register video device (error=%i).\n", 2131 dev_err(dev->dev,
2132 ret); 2132 "unable to register video device (error=%i).\n",
2133 ret);
2133 return ret; 2134 return ret;
2134 } 2135 }
2135 2136
2136 cx231xx_info("%s/0: registered device %s [v4l2]\n", 2137 dev_info(dev->dev, "Registered video device %s [v4l2]\n",
2137 dev->name, video_device_node_name(dev->vdev)); 2138 video_device_node_name(dev->vdev));
2138 2139
2139 /* Initialize VBI template */ 2140 /* Initialize VBI template */
2140 cx231xx_vbi_template = cx231xx_video_template; 2141 cx231xx_vbi_template = cx231xx_video_template;
@@ -2144,7 +2145,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2144 dev->vbi_dev = cx231xx_vdev_init(dev, &cx231xx_vbi_template, "vbi"); 2145 dev->vbi_dev = cx231xx_vdev_init(dev, &cx231xx_vbi_template, "vbi");
2145 2146
2146 if (!dev->vbi_dev) { 2147 if (!dev->vbi_dev) {
2147 cx231xx_errdev("cannot allocate video_device.\n"); 2148 dev_err(dev->dev, "cannot allocate video_device.\n");
2148 return -ENODEV; 2149 return -ENODEV;
2149 } 2150 }
2150 dev->vbi_dev->ctrl_handler = &dev->ctrl_handler; 2151 dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
@@ -2152,34 +2153,32 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
2152 ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, 2153 ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
2153 vbi_nr[dev->devno]); 2154 vbi_nr[dev->devno]);
2154 if (ret < 0) { 2155 if (ret < 0) {
2155 cx231xx_errdev("unable to register vbi device\n"); 2156 dev_err(dev->dev, "unable to register vbi device\n");
2156 return ret; 2157 return ret;
2157 } 2158 }
2158 2159
2159 cx231xx_info("%s/0: registered device %s\n", 2160 dev_info(dev->dev, "Registered VBI device %s\n",
2160 dev->name, video_device_node_name(dev->vbi_dev)); 2161 video_device_node_name(dev->vbi_dev));
2161 2162
2162 if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) { 2163 if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
2163 dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template, 2164 dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
2164 "radio"); 2165 "radio");
2165 if (!dev->radio_dev) { 2166 if (!dev->radio_dev) {
2166 cx231xx_errdev("cannot allocate video_device.\n"); 2167 dev_err(dev->dev,
2168 "cannot allocate video_device.\n");
2167 return -ENODEV; 2169 return -ENODEV;
2168 } 2170 }
2169 dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler; 2171 dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
2170 ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO, 2172 ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
2171 radio_nr[dev->devno]); 2173 radio_nr[dev->devno]);
2172 if (ret < 0) { 2174 if (ret < 0) {
2173 cx231xx_errdev("can't register radio device\n"); 2175 dev_err(dev->dev,
2176 "can't register radio device\n");
2174 return ret; 2177 return ret;
2175 } 2178 }
2176 cx231xx_info("Registered radio device as %s\n", 2179 dev_info(dev->dev, "Registered radio device as %s\n",
2177 video_device_node_name(dev->radio_dev)); 2180 video_device_node_name(dev->radio_dev));
2178 } 2181 }
2179 2182
2180 cx231xx_info("V4L2 device registered as %s and %s\n",
2181 video_device_node_name(dev->vdev),
2182 video_device_node_name(dev->vbi_dev));
2183
2184 return 0; 2183 return 0;
2185} 2184}
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index aeb1bf42b88d..f9e262eb0db9 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/usb.h>
31 32
32#include <media/cx2341x.h> 33#include <media/cx2341x.h>
33 34
@@ -322,10 +323,11 @@ enum cx231xx_decoder {
322}; 323};
323 324
324enum CX231XX_I2C_MASTER_PORT { 325enum CX231XX_I2C_MASTER_PORT {
325 I2C_0 = 0, 326 I2C_0 = 0, /* master 0 - internal connection */
326 I2C_1 = 1, 327 I2C_1 = 1, /* master 1 - used with mux */
327 I2C_2 = 2, 328 I2C_2 = 2, /* master 2 */
328 I2C_3 = 3 329 I2C_1_MUX_1 = 3, /* master 1 - port 1 (I2C_DEMOD_EN = 0) */
330 I2C_1_MUX_3 = 4 /* master 1 - port 3 (I2C_DEMOD_EN = 1) */
329}; 331};
330 332
331struct cx231xx_board { 333struct cx231xx_board {
@@ -367,7 +369,6 @@ struct cx231xx_board {
367 unsigned int valid:1; 369 unsigned int valid:1;
368 unsigned int no_alt_vanc:1; 370 unsigned int no_alt_vanc:1;
369 unsigned int external_av:1; 371 unsigned int external_av:1;
370 unsigned int dont_use_port_3:1;
371 372
372 unsigned char xclk, i2c_speed; 373 unsigned char xclk, i2c_speed;
373 374
@@ -472,7 +473,6 @@ struct cx231xx_i2c {
472 473
473 /* i2c i/o */ 474 /* i2c i/o */
474 struct i2c_adapter i2c_adap; 475 struct i2c_adapter i2c_adap;
475 struct i2c_client i2c_client;
476 u32 i2c_rc; 476 u32 i2c_rc;
477 477
478 /* different settings for each bus */ 478 /* different settings for each bus */
@@ -597,6 +597,7 @@ struct cx231xx {
597 char name[30]; /* name (including minor) of the device */ 597 char name[30]; /* name (including minor) of the device */
598 int model; /* index in the device_data struct */ 598 int model; /* index in the device_data struct */
599 int devno; /* marks the number of this device */ 599 int devno; /* marks the number of this device */
600 struct device *dev; /* pointer to USB interface's dev */
600 601
601 struct cx231xx_board board; 602 struct cx231xx_board board;
602 603
@@ -609,6 +610,8 @@ struct cx231xx {
609 unsigned int has_audio_class:1; 610 unsigned int has_audio_class:1;
610 unsigned int has_alsa_audio:1; 611 unsigned int has_alsa_audio:1;
611 612
613 unsigned int i2c_scan_running:1; /* true only during i2c_scan */
614
612 struct cx231xx_fmt *format; 615 struct cx231xx_fmt *format;
613 616
614 struct v4l2_device v4l2_dev; 617 struct v4l2_device v4l2_dev;
@@ -628,7 +631,10 @@ struct cx231xx {
628 631
629 /* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */ 632 /* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
630 struct cx231xx_i2c i2c_bus[3]; 633 struct cx231xx_i2c i2c_bus[3];
634 struct i2c_adapter *i2c_mux_adap[2];
635
631 unsigned int xc_fw_load_done:1; 636 unsigned int xc_fw_load_done:1;
637 unsigned int port_3_switch_enabled:1;
632 /* locks */ 638 /* locks */
633 struct mutex gpio_i2c_lock; 639 struct mutex gpio_i2c_lock;
634 struct mutex i2c_lock; 640 struct mutex i2c_lock;
@@ -751,9 +757,12 @@ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq);
751int cx231xx_reset_analog_tuner(struct cx231xx *dev); 757int cx231xx_reset_analog_tuner(struct cx231xx *dev);
752 758
753/* Provided by cx231xx-i2c.c */ 759/* Provided by cx231xx-i2c.c */
754void cx231xx_do_i2c_scan(struct cx231xx *dev, struct i2c_client *c); 760void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port);
755int cx231xx_i2c_register(struct cx231xx_i2c *bus); 761int cx231xx_i2c_register(struct cx231xx_i2c *bus);
756int cx231xx_i2c_unregister(struct cx231xx_i2c *bus); 762int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
763int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no);
764void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no);
765struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port);
757 766
758/* Internal block control functions */ 767/* Internal block control functions */
759int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, 768int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
@@ -802,7 +811,6 @@ void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev);
802void reset_s5h1432_demod(struct cx231xx *dev); 811void reset_s5h1432_demod(struct cx231xx *dev);
803void cx231xx_dump_HH_reg(struct cx231xx *dev); 812void cx231xx_dump_HH_reg(struct cx231xx *dev);
804void update_HH_register_after_set_DIF(struct cx231xx *dev); 813void update_HH_register_after_set_DIF(struct cx231xx *dev);
805void cx231xx_dump_SC_reg(struct cx231xx *dev);
806 814
807 815
808 816
@@ -976,23 +984,6 @@ void cx231xx_ir_exit(struct cx231xx *dev);
976#define cx231xx_ir_exit(dev) (0) 984#define cx231xx_ir_exit(dev) (0)
977#endif 985#endif
978 986
979
980/* printk macros */
981
982#define cx231xx_err(fmt, arg...) do {\
983 printk(KERN_ERR fmt , ##arg); } while (0)
984
985#define cx231xx_errdev(fmt, arg...) do {\
986 printk(KERN_ERR "%s: "fmt,\
987 dev->name , ##arg); } while (0)
988
989#define cx231xx_info(fmt, arg...) do {\
990 printk(KERN_INFO "%s: "fmt,\
991 dev->name , ##arg); } while (0)
992#define cx231xx_warn(fmt, arg...) do {\
993 printk(KERN_WARNING "%s: "fmt,\
994 dev->name , ##arg); } while (0)
995
996static inline unsigned int norm_maxw(struct cx231xx *dev) 987static inline unsigned int norm_maxw(struct cx231xx *dev)
997{ 988{
998 if (dev->board.max_range_640_480) 989 if (dev->board.max_range_640_480)
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 5b34323ad207..0982e734fab5 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -145,6 +145,9 @@ config DVB_USB_DVBSKY
145 tristate "DVBSky USB support" 145 tristate "DVBSky USB support"
146 depends on DVB_USB_V2 146 depends on DVB_USB_V2
147 select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT 147 select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
148 select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
148 select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT 149 select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
150 select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
151 select DVB_SP2 if MEDIA_SUBDRV_AUTOSELECT
149 help 152 help
150 Say Y here to support the USB receivers from DVBSky. 153 Say Y here to support the USB receivers from DVBSky.
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 1896ab218b11..80a29f5377ea 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1171,6 +1171,7 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
1171 1171
1172 dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id); 1172 dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
1173 1173
1174 memset(&si2168_config, 0, sizeof(si2168_config));
1174 si2168_config.i2c_adapter = &adapter; 1175 si2168_config.i2c_adapter = &adapter;
1175 si2168_config.fe = &adap->fe[0]; 1176 si2168_config.fe = &adap->fe[0];
1176 si2168_config.ts_mode = SI2168_TS_SERIAL; 1177 si2168_config.ts_mode = SI2168_TS_SERIAL;
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 34688c89df11..9b5add4499e3 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -21,10 +21,17 @@
21#include "dvb_usb.h" 21#include "dvb_usb.h"
22#include "m88ds3103.h" 22#include "m88ds3103.h"
23#include "m88ts2022.h" 23#include "m88ts2022.h"
24#include "sp2.h"
25#include "si2168.h"
26#include "si2157.h"
24 27
25#define DVBSKY_MSG_DELAY 0/*2000*/ 28#define DVBSKY_MSG_DELAY 0/*2000*/
26#define DVBSKY_BUF_LEN 64 29#define DVBSKY_BUF_LEN 64
27 30
31static int dvb_usb_dvbsky_disable_rc;
32module_param_named(disable_rc, dvb_usb_dvbsky_disable_rc, int, 0644);
33MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
34
28DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 35DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
29 36
30struct dvbsky_state { 37struct dvbsky_state {
@@ -32,7 +39,9 @@ struct dvbsky_state {
32 u8 ibuf[DVBSKY_BUF_LEN]; 39 u8 ibuf[DVBSKY_BUF_LEN];
33 u8 obuf[DVBSKY_BUF_LEN]; 40 u8 obuf[DVBSKY_BUF_LEN];
34 u8 last_lock; 41 u8 last_lock;
42 struct i2c_client *i2c_client_demod;
35 struct i2c_client *i2c_client_tuner; 43 struct i2c_client *i2c_client_tuner;
44 struct i2c_client *i2c_client_ci;
36 45
37 /* fe hook functions*/ 46 /* fe hook functions*/
38 int (*fe_set_voltage)(struct dvb_frontend *fe, 47 int (*fe_set_voltage)(struct dvb_frontend *fe,
@@ -96,8 +105,7 @@ static int dvbsky_gpio_ctrl(struct dvb_usb_device *d, u8 gport, u8 value)
96 obuf[2] = value; 105 obuf[2] = value;
97 ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1); 106 ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1);
98 if (ret) 107 if (ret)
99 dev_err(&d->udev->dev, "%s: %s() failed=%d\n", 108 dev_err(&d->udev->dev, "failed=%d\n", ret);
100 KBUILD_MODNAME, __func__, ret);
101 return ret; 109 return ret;
102} 110}
103 111
@@ -114,7 +122,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
114 122
115 if (num > 2) { 123 if (num > 2) {
116 dev_err(&d->udev->dev, 124 dev_err(&d->udev->dev,
117 "dvbsky_usb: too many i2c messages[%d] than 2.", num); 125 "too many i2c messages[%d], max 2.", num);
118 ret = -EOPNOTSUPP; 126 ret = -EOPNOTSUPP;
119 goto i2c_error; 127 goto i2c_error;
120 } 128 }
@@ -122,7 +130,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
122 if (num == 1) { 130 if (num == 1) {
123 if (msg[0].len > 60) { 131 if (msg[0].len > 60) {
124 dev_err(&d->udev->dev, 132 dev_err(&d->udev->dev,
125 "dvbsky_usb: too many i2c bytes[%d] than 60.", 133 "too many i2c bytes[%d], max 60.",
126 msg[0].len); 134 msg[0].len);
127 ret = -EOPNOTSUPP; 135 ret = -EOPNOTSUPP;
128 goto i2c_error; 136 goto i2c_error;
@@ -136,8 +144,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
136 ret = dvbsky_usb_generic_rw(d, obuf, 4, 144 ret = dvbsky_usb_generic_rw(d, obuf, 4,
137 ibuf, msg[0].len + 1); 145 ibuf, msg[0].len + 1);
138 if (ret) 146 if (ret)
139 dev_err(&d->udev->dev, "%s: %s() failed=%d\n", 147 dev_err(&d->udev->dev, "failed=%d\n", ret);
140 KBUILD_MODNAME, __func__, ret);
141 if (!ret) 148 if (!ret)
142 memcpy(msg[0].buf, &ibuf[1], msg[0].len); 149 memcpy(msg[0].buf, &ibuf[1], msg[0].len);
143 } else { 150 } else {
@@ -149,13 +156,12 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
149 ret = dvbsky_usb_generic_rw(d, obuf, 156 ret = dvbsky_usb_generic_rw(d, obuf,
150 msg[0].len + 3, ibuf, 1); 157 msg[0].len + 3, ibuf, 1);
151 if (ret) 158 if (ret)
152 dev_err(&d->udev->dev, "%s: %s() failed=%d\n", 159 dev_err(&d->udev->dev, "failed=%d\n", ret);
153 KBUILD_MODNAME, __func__, ret);
154 } 160 }
155 } else { 161 } else {
156 if ((msg[0].len > 60) || (msg[1].len > 60)) { 162 if ((msg[0].len > 60) || (msg[1].len > 60)) {
157 dev_err(&d->udev->dev, 163 dev_err(&d->udev->dev,
158 "dvbsky_usb: too many i2c bytes[w-%d][r-%d] than 60.", 164 "too many i2c bytes[w-%d][r-%d], max 60.",
159 msg[0].len, msg[1].len); 165 msg[0].len, msg[1].len);
160 ret = -EOPNOTSUPP; 166 ret = -EOPNOTSUPP;
161 goto i2c_error; 167 goto i2c_error;
@@ -169,8 +175,7 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
169 ret = dvbsky_usb_generic_rw(d, obuf, 175 ret = dvbsky_usb_generic_rw(d, obuf,
170 msg[0].len + 4, ibuf, msg[1].len + 1); 176 msg[0].len + 4, ibuf, msg[1].len + 1);
171 if (ret) 177 if (ret)
172 dev_err(&d->udev->dev, "%s: %s() failed=%d\n", 178 dev_err(&d->udev->dev, "failed=%d\n", ret);
173 KBUILD_MODNAME, __func__, ret);
174 179
175 if (!ret) 180 if (!ret)
176 memcpy(msg[1].buf, &ibuf[1], msg[1].len); 181 memcpy(msg[1].buf, &ibuf[1], msg[1].len);
@@ -201,8 +206,7 @@ static int dvbsky_rc_query(struct dvb_usb_device *d)
201 obuf[0] = 0x10; 206 obuf[0] = 0x10;
202 ret = dvbsky_usb_generic_rw(d, obuf, 1, ibuf, 2); 207 ret = dvbsky_usb_generic_rw(d, obuf, 1, ibuf, 2);
203 if (ret) 208 if (ret)
204 dev_err(&d->udev->dev, "%s: %s() failed=%d\n", 209 dev_err(&d->udev->dev, "failed=%d\n", ret);
205 KBUILD_MODNAME, __func__, ret);
206 if (ret == 0) 210 if (ret == 0)
207 code = (ibuf[0] << 8) | ibuf[1]; 211 code = (ibuf[0] << 8) | ibuf[1];
208 if (code != 0xffff) { 212 if (code != 0xffff) {
@@ -218,6 +222,11 @@ static int dvbsky_rc_query(struct dvb_usb_device *d)
218 222
219static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) 223static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
220{ 224{
225 if (dvb_usb_dvbsky_disable_rc) {
226 rc->map_name = NULL;
227 return 0;
228 }
229
221 rc->allowed_protos = RC_BIT_RC5; 230 rc->allowed_protos = RC_BIT_RC5;
222 rc->query = dvbsky_rc_query; 231 rc->query = dvbsky_rc_query;
223 rc->interval = 300; 232 rc->interval = 300;
@@ -265,8 +274,6 @@ static int dvbsky_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6])
265 if (i2c_transfer(&d->i2c_adap, msg, 2) == 2) 274 if (i2c_transfer(&d->i2c_adap, msg, 2) == 2)
266 memcpy(mac, ibuf, 6); 275 memcpy(mac, ibuf, 6);
267 276
268 dev_info(&d->udev->dev, "dvbsky_usb MAC address=%pM\n", mac);
269
270 return 0; 277 return 0;
271} 278}
272 279
@@ -362,6 +369,300 @@ fail_attach:
362 return ret; 369 return ret;
363} 370}
364 371
372static int dvbsky_usb_ci_set_voltage(struct dvb_frontend *fe,
373 fe_sec_voltage_t voltage)
374{
375 struct dvb_usb_device *d = fe_to_d(fe);
376 struct dvbsky_state *state = d_to_priv(d);
377 u8 value;
378
379 if (voltage == SEC_VOLTAGE_OFF)
380 value = 0;
381 else
382 value = 1;
383 dvbsky_gpio_ctrl(d, 0x00, value);
384
385 return state->fe_set_voltage(fe, voltage);
386}
387
388static int dvbsky_ci_ctrl(void *priv, u8 read, int addr,
389 u8 data, int *mem)
390{
391 struct dvb_usb_device *d = priv;
392 int ret = 0;
393 u8 command[4], respond[2], command_size, respond_size;
394
395 command[1] = (u8)((addr >> 8) & 0xff); /*high part of address*/
396 command[2] = (u8)(addr & 0xff); /*low part of address*/
397 if (read) {
398 command[0] = 0x71;
399 command_size = 3;
400 respond_size = 2;
401 } else {
402 command[0] = 0x70;
403 command[3] = data;
404 command_size = 4;
405 respond_size = 1;
406 }
407 ret = dvbsky_usb_generic_rw(d, command, command_size,
408 respond, respond_size);
409 if (ret)
410 goto err;
411 if (read)
412 *mem = respond[1];
413 return ret;
414err:
415 dev_err(&d->udev->dev, "ci control failed=%d\n", ret);
416 return ret;
417}
418
419static const struct m88ds3103_config dvbsky_s960c_m88ds3103_config = {
420 .i2c_addr = 0x68,
421 .clock = 27000000,
422 .i2c_wr_max = 33,
423 .clock_out = 0,
424 .ts_mode = M88DS3103_TS_CI,
425 .ts_clk = 10000,
426 .ts_clk_pol = 1,
427 .agc = 0x99,
428 .lnb_hv_pol = 0,
429 .lnb_en_pol = 1,
430};
431
432static int dvbsky_s960c_attach(struct dvb_usb_adapter *adap)
433{
434 struct dvbsky_state *state = adap_to_priv(adap);
435 struct dvb_usb_device *d = adap_to_d(adap);
436 int ret = 0;
437 /* demod I2C adapter */
438 struct i2c_adapter *i2c_adapter;
439 struct i2c_client *client_tuner, *client_ci;
440 struct i2c_board_info info;
441 struct sp2_config sp2_config;
442 struct m88ts2022_config m88ts2022_config = {
443 .clock = 27000000,
444 };
445 memset(&info, 0, sizeof(struct i2c_board_info));
446
447 /* attach demod */
448 adap->fe[0] = dvb_attach(m88ds3103_attach,
449 &dvbsky_s960c_m88ds3103_config,
450 &d->i2c_adap,
451 &i2c_adapter);
452 if (!adap->fe[0]) {
453 dev_err(&d->udev->dev, "dvbsky_s960ci_attach fail.\n");
454 ret = -ENODEV;
455 goto fail_attach;
456 }
457
458 /* attach tuner */
459 m88ts2022_config.fe = adap->fe[0];
460 strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
461 info.addr = 0x60;
462 info.platform_data = &m88ts2022_config;
463 request_module("m88ts2022");
464 client_tuner = i2c_new_device(i2c_adapter, &info);
465 if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
466 ret = -ENODEV;
467 goto fail_tuner_device;
468 }
469
470 if (!try_module_get(client_tuner->dev.driver->owner)) {
471 ret = -ENODEV;
472 goto fail_tuner_module;
473 }
474
475 /* attach ci controller */
476 memset(&sp2_config, 0, sizeof(sp2_config));
477 sp2_config.dvb_adap = &adap->dvb_adap;
478 sp2_config.priv = d;
479 sp2_config.ci_control = dvbsky_ci_ctrl;
480 memset(&info, 0, sizeof(struct i2c_board_info));
481 strlcpy(info.type, "sp2", I2C_NAME_SIZE);
482 info.addr = 0x40;
483 info.platform_data = &sp2_config;
484 request_module("sp2");
485 client_ci = i2c_new_device(&d->i2c_adap, &info);
486 if (client_ci == NULL || client_ci->dev.driver == NULL) {
487 ret = -ENODEV;
488 goto fail_ci_device;
489 }
490
491 if (!try_module_get(client_ci->dev.driver->owner)) {
492 ret = -ENODEV;
493 goto fail_ci_module;
494 }
495
496 /* delegate signal strength measurement to tuner */
497 adap->fe[0]->ops.read_signal_strength =
498 adap->fe[0]->ops.tuner_ops.get_rf_strength;
499
500 /* hook fe: need to resync the slave fifo when signal locks. */
501 state->fe_read_status = adap->fe[0]->ops.read_status;
502 adap->fe[0]->ops.read_status = dvbsky_usb_read_status;
503
504 /* hook fe: LNB off/on is control by Cypress usb chip. */
505 state->fe_set_voltage = adap->fe[0]->ops.set_voltage;
506 adap->fe[0]->ops.set_voltage = dvbsky_usb_ci_set_voltage;
507
508 state->i2c_client_tuner = client_tuner;
509 state->i2c_client_ci = client_ci;
510 return ret;
511fail_ci_module:
512 i2c_unregister_device(client_ci);
513fail_ci_device:
514 module_put(client_tuner->dev.driver->owner);
515fail_tuner_module:
516 i2c_unregister_device(client_tuner);
517fail_tuner_device:
518 dvb_frontend_detach(adap->fe[0]);
519fail_attach:
520 return ret;
521}
522
523static int dvbsky_t680c_attach(struct dvb_usb_adapter *adap)
524{
525 struct dvbsky_state *state = adap_to_priv(adap);
526 struct dvb_usb_device *d = adap_to_d(adap);
527 int ret = 0;
528 struct i2c_adapter *i2c_adapter;
529 struct i2c_client *client_demod, *client_tuner, *client_ci;
530 struct i2c_board_info info;
531 struct si2168_config si2168_config;
532 struct si2157_config si2157_config;
533 struct sp2_config sp2_config;
534
535 /* attach demod */
536 memset(&si2168_config, 0, sizeof(si2168_config));
537 si2168_config.i2c_adapter = &i2c_adapter;
538 si2168_config.fe = &adap->fe[0];
539 si2168_config.ts_mode = SI2168_TS_PARALLEL;
540 memset(&info, 0, sizeof(struct i2c_board_info));
541 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
542 info.addr = 0x64;
543 info.platform_data = &si2168_config;
544
545 request_module(info.type);
546 client_demod = i2c_new_device(&d->i2c_adap, &info);
547 if (client_demod == NULL ||
548 client_demod->dev.driver == NULL)
549 goto fail_demod_device;
550 if (!try_module_get(client_demod->dev.driver->owner))
551 goto fail_demod_module;
552
553 /* attach tuner */
554 memset(&si2157_config, 0, sizeof(si2157_config));
555 si2157_config.fe = adap->fe[0];
556 memset(&info, 0, sizeof(struct i2c_board_info));
557 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
558 info.addr = 0x60;
559 info.platform_data = &si2157_config;
560
561 request_module(info.type);
562 client_tuner = i2c_new_device(i2c_adapter, &info);
563 if (client_tuner == NULL ||
564 client_tuner->dev.driver == NULL)
565 goto fail_tuner_device;
566 if (!try_module_get(client_tuner->dev.driver->owner))
567 goto fail_tuner_module;
568
569 /* attach ci controller */
570 memset(&sp2_config, 0, sizeof(sp2_config));
571 sp2_config.dvb_adap = &adap->dvb_adap;
572 sp2_config.priv = d;
573 sp2_config.ci_control = dvbsky_ci_ctrl;
574 memset(&info, 0, sizeof(struct i2c_board_info));
575 strlcpy(info.type, "sp2", I2C_NAME_SIZE);
576 info.addr = 0x40;
577 info.platform_data = &sp2_config;
578
579 request_module(info.type);
580 client_ci = i2c_new_device(&d->i2c_adap, &info);
581
582 if (client_ci == NULL || client_ci->dev.driver == NULL)
583 goto fail_ci_device;
584
585 if (!try_module_get(client_ci->dev.driver->owner))
586 goto fail_ci_module;
587
588 state->i2c_client_demod = client_demod;
589 state->i2c_client_tuner = client_tuner;
590 state->i2c_client_ci = client_ci;
591 return ret;
592fail_ci_module:
593 i2c_unregister_device(client_ci);
594fail_ci_device:
595 module_put(client_tuner->dev.driver->owner);
596fail_tuner_module:
597 i2c_unregister_device(client_tuner);
598fail_tuner_device:
599 module_put(client_demod->dev.driver->owner);
600fail_demod_module:
601 i2c_unregister_device(client_demod);
602fail_demod_device:
603 ret = -ENODEV;
604 return ret;
605}
606
607static int dvbsky_t330_attach(struct dvb_usb_adapter *adap)
608{
609 struct dvbsky_state *state = adap_to_priv(adap);
610 struct dvb_usb_device *d = adap_to_d(adap);
611 int ret = 0;
612 struct i2c_adapter *i2c_adapter;
613 struct i2c_client *client_demod, *client_tuner;
614 struct i2c_board_info info;
615 struct si2168_config si2168_config;
616 struct si2157_config si2157_config;
617
618 /* attach demod */
619 memset(&si2168_config, 0, sizeof(si2168_config));
620 si2168_config.i2c_adapter = &i2c_adapter;
621 si2168_config.fe = &adap->fe[0];
622 si2168_config.ts_mode = SI2168_TS_PARALLEL | 0x40;
623 memset(&info, 0, sizeof(struct i2c_board_info));
624 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
625 info.addr = 0x64;
626 info.platform_data = &si2168_config;
627
628 request_module(info.type);
629 client_demod = i2c_new_device(&d->i2c_adap, &info);
630 if (client_demod == NULL ||
631 client_demod->dev.driver == NULL)
632 goto fail_demod_device;
633 if (!try_module_get(client_demod->dev.driver->owner))
634 goto fail_demod_module;
635
636 /* attach tuner */
637 memset(&si2157_config, 0, sizeof(si2157_config));
638 si2157_config.fe = adap->fe[0];
639 memset(&info, 0, sizeof(struct i2c_board_info));
640 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
641 info.addr = 0x60;
642 info.platform_data = &si2157_config;
643
644 request_module(info.type);
645 client_tuner = i2c_new_device(i2c_adapter, &info);
646 if (client_tuner == NULL ||
647 client_tuner->dev.driver == NULL)
648 goto fail_tuner_device;
649 if (!try_module_get(client_tuner->dev.driver->owner))
650 goto fail_tuner_module;
651
652 state->i2c_client_demod = client_demod;
653 state->i2c_client_tuner = client_tuner;
654 return ret;
655fail_tuner_module:
656 i2c_unregister_device(client_tuner);
657fail_tuner_device:
658 module_put(client_demod->dev.driver->owner);
659fail_demod_module:
660 i2c_unregister_device(client_demod);
661fail_demod_device:
662 ret = -ENODEV;
663 return ret;
664}
665
365static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name) 666static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
366{ 667{
367 dvbsky_gpio_ctrl(d, 0x04, 1); 668 dvbsky_gpio_ctrl(d, 0x04, 1);
@@ -404,6 +705,18 @@ static void dvbsky_exit(struct dvb_usb_device *d)
404 module_put(client->dev.driver->owner); 705 module_put(client->dev.driver->owner);
405 i2c_unregister_device(client); 706 i2c_unregister_device(client);
406 } 707 }
708 client = state->i2c_client_demod;
709 /* remove I2C demod */
710 if (client) {
711 module_put(client->dev.driver->owner);
712 i2c_unregister_device(client);
713 }
714 client = state->i2c_client_ci;
715 /* remove I2C ci */
716 if (client) {
717 module_put(client->dev.driver->owner);
718 i2c_unregister_device(client);
719 }
407} 720}
408 721
409/* DVB USB Driver stuff */ 722/* DVB USB Driver stuff */
@@ -434,9 +747,104 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
434 } 747 }
435}; 748};
436 749
750static struct dvb_usb_device_properties dvbsky_s960c_props = {
751 .driver_name = KBUILD_MODNAME,
752 .owner = THIS_MODULE,
753 .adapter_nr = adapter_nr,
754 .size_of_priv = sizeof(struct dvbsky_state),
755
756 .generic_bulk_ctrl_endpoint = 0x01,
757 .generic_bulk_ctrl_endpoint_response = 0x81,
758 .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
759
760 .i2c_algo = &dvbsky_i2c_algo,
761 .frontend_attach = dvbsky_s960c_attach,
762 .init = dvbsky_init,
763 .get_rc_config = dvbsky_get_rc_config,
764 .streaming_ctrl = dvbsky_streaming_ctrl,
765 .identify_state = dvbsky_identify_state,
766 .exit = dvbsky_exit,
767 .read_mac_address = dvbsky_read_mac_addr,
768
769 .num_adapters = 1,
770 .adapter = {
771 {
772 .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
773 }
774 }
775};
776
777static struct dvb_usb_device_properties dvbsky_t680c_props = {
778 .driver_name = KBUILD_MODNAME,
779 .owner = THIS_MODULE,
780 .adapter_nr = adapter_nr,
781 .size_of_priv = sizeof(struct dvbsky_state),
782
783 .generic_bulk_ctrl_endpoint = 0x01,
784 .generic_bulk_ctrl_endpoint_response = 0x81,
785 .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
786
787 .i2c_algo = &dvbsky_i2c_algo,
788 .frontend_attach = dvbsky_t680c_attach,
789 .init = dvbsky_init,
790 .get_rc_config = dvbsky_get_rc_config,
791 .streaming_ctrl = dvbsky_streaming_ctrl,
792 .identify_state = dvbsky_identify_state,
793 .exit = dvbsky_exit,
794 .read_mac_address = dvbsky_read_mac_addr,
795
796 .num_adapters = 1,
797 .adapter = {
798 {
799 .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
800 }
801 }
802};
803
804static struct dvb_usb_device_properties dvbsky_t330_props = {
805 .driver_name = KBUILD_MODNAME,
806 .owner = THIS_MODULE,
807 .adapter_nr = adapter_nr,
808 .size_of_priv = sizeof(struct dvbsky_state),
809
810 .generic_bulk_ctrl_endpoint = 0x01,
811 .generic_bulk_ctrl_endpoint_response = 0x81,
812 .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
813
814 .i2c_algo = &dvbsky_i2c_algo,
815 .frontend_attach = dvbsky_t330_attach,
816 .init = dvbsky_init,
817 .get_rc_config = dvbsky_get_rc_config,
818 .streaming_ctrl = dvbsky_streaming_ctrl,
819 .identify_state = dvbsky_identify_state,
820 .exit = dvbsky_exit,
821 .read_mac_address = dvbsky_read_mac_addr,
822
823 .num_adapters = 1,
824 .adapter = {
825 {
826 .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
827 }
828 }
829};
830
437static const struct usb_device_id dvbsky_id_table[] = { 831static const struct usb_device_id dvbsky_id_table[] = {
438 { DVB_USB_DEVICE(0x0572, 0x6831, 832 { DVB_USB_DEVICE(0x0572, 0x6831,
439 &dvbsky_s960_props, "DVBSky S960/S860", RC_MAP_DVBSKY) }, 833 &dvbsky_s960_props, "DVBSky S960/S860", RC_MAP_DVBSKY) },
834 { DVB_USB_DEVICE(0x0572, 0x960c,
835 &dvbsky_s960c_props, "DVBSky S960CI", RC_MAP_DVBSKY) },
836 { DVB_USB_DEVICE(0x0572, 0x680c,
837 &dvbsky_t680c_props, "DVBSky T680CI", RC_MAP_DVBSKY) },
838 { DVB_USB_DEVICE(0x0572, 0x0320,
839 &dvbsky_t330_props, "DVBSky T330", RC_MAP_DVBSKY) },
840 { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
841 USB_PID_TECHNOTREND_TVSTICK_CT2_4400,
842 &dvbsky_t330_props, "TechnoTrend TVStick CT2-4400",
843 RC_MAP_TT_1500) },
844 { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
845 USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
846 &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
847 RC_MAP_TT_1500) },
440 { } 848 { }
441}; 849};
442MODULE_DEVICE_TABLE(usb, dvbsky_id_table); 850MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 9f2c5459b73a..994de53a574b 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -817,20 +817,22 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
817 case 0x1122: 817 case 0x1122:
818 switch (st->dvb_usb_lme2510_firmware) { 818 switch (st->dvb_usb_lme2510_firmware) {
819 default: 819 default:
820 st->dvb_usb_lme2510_firmware = TUNER_S0194;
821 case TUNER_S0194: 820 case TUNER_S0194:
822 fw_lme = fw_s0194; 821 fw_lme = fw_s0194;
823 ret = request_firmware(&fw, fw_lme, &udev->dev); 822 ret = request_firmware(&fw, fw_lme, &udev->dev);
824 if (ret == 0) { 823 if (ret == 0) {
824 st->dvb_usb_lme2510_firmware = TUNER_S0194;
825 cold = 0; 825 cold = 0;
826 break; 826 break;
827 } 827 }
828 st->dvb_usb_lme2510_firmware = TUNER_LG; 828 /* fall through */
829 case TUNER_LG: 829 case TUNER_LG:
830 fw_lme = fw_lg; 830 fw_lme = fw_lg;
831 ret = request_firmware(&fw, fw_lme, &udev->dev); 831 ret = request_firmware(&fw, fw_lme, &udev->dev);
832 if (ret == 0) 832 if (ret == 0) {
833 st->dvb_usb_lme2510_firmware = TUNER_LG;
833 break; 834 break;
835 }
834 st->dvb_usb_lme2510_firmware = TUNER_DEFAULT; 836 st->dvb_usb_lme2510_firmware = TUNER_DEFAULT;
835 break; 837 break;
836 } 838 }
@@ -838,26 +840,30 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
838 case 0x1120: 840 case 0x1120:
839 switch (st->dvb_usb_lme2510_firmware) { 841 switch (st->dvb_usb_lme2510_firmware) {
840 default: 842 default:
841 st->dvb_usb_lme2510_firmware = TUNER_S7395;
842 case TUNER_S7395: 843 case TUNER_S7395:
843 fw_lme = fw_c_s7395; 844 fw_lme = fw_c_s7395;
844 ret = request_firmware(&fw, fw_lme, &udev->dev); 845 ret = request_firmware(&fw, fw_lme, &udev->dev);
845 if (ret == 0) { 846 if (ret == 0) {
847 st->dvb_usb_lme2510_firmware = TUNER_S7395;
846 cold = 0; 848 cold = 0;
847 break; 849 break;
848 } 850 }
849 st->dvb_usb_lme2510_firmware = TUNER_LG; 851 /* fall through */
850 case TUNER_LG: 852 case TUNER_LG:
851 fw_lme = fw_c_lg; 853 fw_lme = fw_c_lg;
852 ret = request_firmware(&fw, fw_lme, &udev->dev); 854 ret = request_firmware(&fw, fw_lme, &udev->dev);
853 if (ret == 0) 855 if (ret == 0) {
856 st->dvb_usb_lme2510_firmware = TUNER_LG;
854 break; 857 break;
855 st->dvb_usb_lme2510_firmware = TUNER_S0194; 858 }
859 /* fall through */
856 case TUNER_S0194: 860 case TUNER_S0194:
857 fw_lme = fw_c_s0194; 861 fw_lme = fw_c_s0194;
858 ret = request_firmware(&fw, fw_lme, &udev->dev); 862 ret = request_firmware(&fw, fw_lme, &udev->dev);
859 if (ret == 0) 863 if (ret == 0) {
864 st->dvb_usb_lme2510_firmware = TUNER_S0194;
860 break; 865 break;
866 }
861 st->dvb_usb_lme2510_firmware = TUNER_DEFAULT; 867 st->dvb_usb_lme2510_firmware = TUNER_DEFAULT;
862 cold = 0; 868 cold = 0;
863 break; 869 break;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 27b1e0397e71..896a225ee011 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,6 +24,9 @@
24 24
25#include "rtl2830.h" 25#include "rtl2830.h"
26#include "rtl2832.h" 26#include "rtl2832.h"
27#include "rtl2832_sdr.h"
28#include "mn88472.h"
29#include "mn88473.h"
27 30
28#include "qt1010.h" 31#include "qt1010.h"
29#include "mt2060.h" 32#include "mt2060.h"
@@ -35,25 +38,6 @@
35#include "tua9001.h" 38#include "tua9001.h"
36#include "r820t.h" 39#include "r820t.h"
37 40
38/*
39 * RTL2832_SDR module is in staging. That logic is added in order to avoid any
40 * hard dependency to drivers/staging/ directory as we want compile mainline
41 * driver even whole staging directory is missing.
42 */
43#include <media/v4l2-subdev.h>
44
45#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
46struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
47 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
48 struct v4l2_subdev *sd);
49#else
50static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
51 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
52 struct v4l2_subdev *sd)
53{
54 return NULL;
55}
56#endif
57 41
58#ifdef CONFIG_MEDIA_ATTACH 42#ifdef CONFIG_MEDIA_ATTACH
59#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ 43#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
@@ -420,6 +404,8 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
420 struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; 404 struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
421 struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; 405 struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
422 struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf}; 406 struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf};
407 struct rtl28xxu_req req_mn88472 = {0xff38, CMD_I2C_RD, 1, buf};
408 struct rtl28xxu_req req_mn88473 = {0xff38, CMD_I2C_RD, 1, buf};
423 409
424 dev_dbg(&d->udev->dev, "%s:\n", __func__); 410 dev_dbg(&d->udev->dev, "%s:\n", __func__);
425 411
@@ -449,7 +435,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
449 if (ret == 0 && buf[0] == 0xa1) { 435 if (ret == 0 && buf[0] == 0xa1) {
450 priv->tuner = TUNER_RTL2832_FC0012; 436 priv->tuner = TUNER_RTL2832_FC0012;
451 priv->tuner_name = "FC0012"; 437 priv->tuner_name = "FC0012";
452 goto found; 438 goto tuner_found;
453 } 439 }
454 440
455 /* check FC0013 ID register; reg=00 val=a3 */ 441 /* check FC0013 ID register; reg=00 val=a3 */
@@ -457,7 +443,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
457 if (ret == 0 && buf[0] == 0xa3) { 443 if (ret == 0 && buf[0] == 0xa3) {
458 priv->tuner = TUNER_RTL2832_FC0013; 444 priv->tuner = TUNER_RTL2832_FC0013;
459 priv->tuner_name = "FC0013"; 445 priv->tuner_name = "FC0013";
460 goto found; 446 goto tuner_found;
461 } 447 }
462 448
463 /* check MT2266 ID register; reg=00 val=85 */ 449 /* check MT2266 ID register; reg=00 val=85 */
@@ -465,7 +451,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
465 if (ret == 0 && buf[0] == 0x85) { 451 if (ret == 0 && buf[0] == 0x85) {
466 priv->tuner = TUNER_RTL2832_MT2266; 452 priv->tuner = TUNER_RTL2832_MT2266;
467 priv->tuner_name = "MT2266"; 453 priv->tuner_name = "MT2266";
468 goto found; 454 goto tuner_found;
469 } 455 }
470 456
471 /* check FC2580 ID register; reg=01 val=56 */ 457 /* check FC2580 ID register; reg=01 val=56 */
@@ -473,7 +459,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
473 if (ret == 0 && buf[0] == 0x56) { 459 if (ret == 0 && buf[0] == 0x56) {
474 priv->tuner = TUNER_RTL2832_FC2580; 460 priv->tuner = TUNER_RTL2832_FC2580;
475 priv->tuner_name = "FC2580"; 461 priv->tuner_name = "FC2580";
476 goto found; 462 goto tuner_found;
477 } 463 }
478 464
479 /* check MT2063 ID register; reg=00 val=9e || 9c */ 465 /* check MT2063 ID register; reg=00 val=9e || 9c */
@@ -481,7 +467,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
481 if (ret == 0 && (buf[0] == 0x9e || buf[0] == 0x9c)) { 467 if (ret == 0 && (buf[0] == 0x9e || buf[0] == 0x9c)) {
482 priv->tuner = TUNER_RTL2832_MT2063; 468 priv->tuner = TUNER_RTL2832_MT2063;
483 priv->tuner_name = "MT2063"; 469 priv->tuner_name = "MT2063";
484 goto found; 470 goto tuner_found;
485 } 471 }
486 472
487 /* check MAX3543 ID register; reg=00 val=38 */ 473 /* check MAX3543 ID register; reg=00 val=38 */
@@ -489,7 +475,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
489 if (ret == 0 && buf[0] == 0x38) { 475 if (ret == 0 && buf[0] == 0x38) {
490 priv->tuner = TUNER_RTL2832_MAX3543; 476 priv->tuner = TUNER_RTL2832_MAX3543;
491 priv->tuner_name = "MAX3543"; 477 priv->tuner_name = "MAX3543";
492 goto found; 478 goto tuner_found;
493 } 479 }
494 480
495 /* check TUA9001 ID register; reg=7e val=2328 */ 481 /* check TUA9001 ID register; reg=7e val=2328 */
@@ -497,7 +483,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
497 if (ret == 0 && buf[0] == 0x23 && buf[1] == 0x28) { 483 if (ret == 0 && buf[0] == 0x23 && buf[1] == 0x28) {
498 priv->tuner = TUNER_RTL2832_TUA9001; 484 priv->tuner = TUNER_RTL2832_TUA9001;
499 priv->tuner_name = "TUA9001"; 485 priv->tuner_name = "TUA9001";
500 goto found; 486 goto tuner_found;
501 } 487 }
502 488
503 /* check MXL5007R ID register; reg=d9 val=14 */ 489 /* check MXL5007R ID register; reg=d9 val=14 */
@@ -505,7 +491,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
505 if (ret == 0 && buf[0] == 0x14) { 491 if (ret == 0 && buf[0] == 0x14) {
506 priv->tuner = TUNER_RTL2832_MXL5007T; 492 priv->tuner = TUNER_RTL2832_MXL5007T;
507 priv->tuner_name = "MXL5007T"; 493 priv->tuner_name = "MXL5007T";
508 goto found; 494 goto tuner_found;
509 } 495 }
510 496
511 /* check E4000 ID register; reg=02 val=40 */ 497 /* check E4000 ID register; reg=02 val=40 */
@@ -513,7 +499,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
513 if (ret == 0 && buf[0] == 0x40) { 499 if (ret == 0 && buf[0] == 0x40) {
514 priv->tuner = TUNER_RTL2832_E4000; 500 priv->tuner = TUNER_RTL2832_E4000;
515 priv->tuner_name = "E4000"; 501 priv->tuner_name = "E4000";
516 goto found; 502 goto tuner_found;
517 } 503 }
518 504
519 /* check TDA18272 ID register; reg=00 val=c760 */ 505 /* check TDA18272 ID register; reg=00 val=c760 */
@@ -521,7 +507,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
521 if (ret == 0 && (buf[0] == 0xc7 || buf[1] == 0x60)) { 507 if (ret == 0 && (buf[0] == 0xc7 || buf[1] == 0x60)) {
522 priv->tuner = TUNER_RTL2832_TDA18272; 508 priv->tuner = TUNER_RTL2832_TDA18272;
523 priv->tuner_name = "TDA18272"; 509 priv->tuner_name = "TDA18272";
524 goto found; 510 goto tuner_found;
525 } 511 }
526 512
527 /* check R820T ID register; reg=00 val=69 */ 513 /* check R820T ID register; reg=00 val=69 */
@@ -529,7 +515,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
529 if (ret == 0 && buf[0] == 0x69) { 515 if (ret == 0 && buf[0] == 0x69) {
530 priv->tuner = TUNER_RTL2832_R820T; 516 priv->tuner = TUNER_RTL2832_R820T;
531 priv->tuner_name = "R820T"; 517 priv->tuner_name = "R820T";
532 goto found; 518 goto tuner_found;
533 } 519 }
534 520
535 /* check R828D ID register; reg=00 val=69 */ 521 /* check R828D ID register; reg=00 val=69 */
@@ -537,13 +523,44 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
537 if (ret == 0 && buf[0] == 0x69) { 523 if (ret == 0 && buf[0] == 0x69) {
538 priv->tuner = TUNER_RTL2832_R828D; 524 priv->tuner = TUNER_RTL2832_R828D;
539 priv->tuner_name = "R828D"; 525 priv->tuner_name = "R828D";
540 goto found; 526 goto tuner_found;
541 } 527 }
542 528
543 529tuner_found:
544found:
545 dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name); 530 dev_dbg(&d->udev->dev, "%s: tuner=%s\n", __func__, priv->tuner_name);
546 531
532 /* probe slave demod */
533 if (priv->tuner == TUNER_RTL2832_R828D) {
534 /* power on MN88472 demod on GPIO0 */
535 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x01, 0x01);
536 if (ret)
537 goto err;
538
539 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
540 if (ret)
541 goto err;
542
543 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
544 if (ret)
545 goto err;
546
547 /* check MN88472 answers */
548 ret = rtl28xxu_ctrl_msg(d, &req_mn88472);
549 if (ret == 0 && buf[0] == 0x02) {
550 dev_dbg(&d->udev->dev, "%s: MN88472 found\n", __func__);
551 priv->slave_demod = SLAVE_DEMOD_MN88472;
552 goto demod_found;
553 }
554
555 ret = rtl28xxu_ctrl_msg(d, &req_mn88473);
556 if (ret == 0 && buf[0] == 0x03) {
557 dev_dbg(&d->udev->dev, "%s: MN88473 found\n", __func__);
558 priv->slave_demod = SLAVE_DEMOD_MN88473;
559 goto demod_found;
560 }
561 }
562
563demod_found:
547 /* close demod I2C gate */ 564 /* close demod I2C gate */
548 ret = rtl28xxu_ctrl_msg(d, &req_gate_close); 565 ret = rtl28xxu_ctrl_msg(d, &req_gate_close);
549 if (ret < 0) 566 if (ret < 0)
@@ -818,7 +835,66 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
818 /* set fe callback */ 835 /* set fe callback */
819 adap->fe[0]->callback = rtl2832u_frontend_callback; 836 adap->fe[0]->callback = rtl2832u_frontend_callback;
820 837
838 if (priv->slave_demod) {
839 struct i2c_board_info info = {};
840 struct i2c_client *client;
841
842 /*
843 * We continue on reduced mode, without DVB-T2/C, using master
844 * demod, when slave demod fails.
845 */
846 ret = 0;
847
848 /* attach slave demodulator */
849 if (priv->slave_demod == SLAVE_DEMOD_MN88472) {
850 struct mn88472_config mn88472_config = {};
851
852 mn88472_config.fe = &adap->fe[1];
853 mn88472_config.i2c_wr_max = 22,
854 strlcpy(info.type, "mn88472", I2C_NAME_SIZE);
855 info.addr = 0x18;
856 info.platform_data = &mn88472_config;
857 request_module(info.type);
858 client = i2c_new_device(priv->demod_i2c_adapter, &info);
859 if (client == NULL || client->dev.driver == NULL) {
860 priv->slave_demod = SLAVE_DEMOD_NONE;
861 goto err_slave_demod_failed;
862 }
863
864 if (!try_module_get(client->dev.driver->owner)) {
865 i2c_unregister_device(client);
866 priv->slave_demod = SLAVE_DEMOD_NONE;
867 goto err_slave_demod_failed;
868 }
869
870 priv->i2c_client_slave_demod = client;
871 } else {
872 struct mn88473_config mn88473_config = {};
873
874 mn88473_config.fe = &adap->fe[1];
875 mn88473_config.i2c_wr_max = 22,
876 strlcpy(info.type, "mn88473", I2C_NAME_SIZE);
877 info.addr = 0x18;
878 info.platform_data = &mn88473_config;
879 request_module(info.type);
880 client = i2c_new_device(priv->demod_i2c_adapter, &info);
881 if (client == NULL || client->dev.driver == NULL) {
882 priv->slave_demod = SLAVE_DEMOD_NONE;
883 goto err_slave_demod_failed;
884 }
885
886 if (!try_module_get(client->dev.driver->owner)) {
887 i2c_unregister_device(client);
888 priv->slave_demod = SLAVE_DEMOD_NONE;
889 goto err_slave_demod_failed;
890 }
891
892 priv->i2c_client_slave_demod = client;
893 }
894 }
895
821 return 0; 896 return 0;
897err_slave_demod_failed:
822err: 898err:
823 dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret); 899 dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
824 return ret; 900 return ret;
@@ -984,7 +1060,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
984 break; 1060 break;
985 } 1061 }
986 1062
987 priv->client = client; 1063 priv->i2c_client_tuner = client;
988 sd = i2c_get_clientdata(client); 1064 sd = i2c_get_clientdata(client);
989 i2c_set_adapdata(i2c_adap_internal, d); 1065 i2c_set_adapdata(i2c_adap_internal, d);
990 1066
@@ -1024,32 +1100,30 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
1024 &rtl28xxu_rtl2832_r820t_config, NULL); 1100 &rtl28xxu_rtl2832_r820t_config, NULL);
1025 break; 1101 break;
1026 case TUNER_RTL2832_R828D: 1102 case TUNER_RTL2832_R828D:
1027 /* power off mn88472 demod on GPIO0 */ 1103 fe = dvb_attach(r820t_attach, adap->fe[0],
1028 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01); 1104 priv->demod_i2c_adapter,
1029 if (ret)
1030 goto err;
1031
1032 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_DIR, 0x00, 0x01);
1033 if (ret)
1034 goto err;
1035
1036 ret = rtl28xx_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x01, 0x01);
1037 if (ret)
1038 goto err;
1039
1040 fe = dvb_attach(r820t_attach, adap->fe[0], &d->i2c_adap,
1041 &rtl2832u_r828d_config); 1105 &rtl2832u_r828d_config);
1042
1043 /* Use tuner to get the signal strength */
1044 adap->fe[0]->ops.read_signal_strength = 1106 adap->fe[0]->ops.read_signal_strength =
1045 adap->fe[0]->ops.tuner_ops.get_rf_strength; 1107 adap->fe[0]->ops.tuner_ops.get_rf_strength;
1108
1109 if (adap->fe[1]) {
1110 fe = dvb_attach(r820t_attach, adap->fe[1],
1111 priv->demod_i2c_adapter,
1112 &rtl2832u_r828d_config);
1113 adap->fe[1]->ops.read_signal_strength =
1114 adap->fe[1]->ops.tuner_ops.get_rf_strength;
1115 }
1116
1117 /* attach SDR */
1118 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
1119 &rtl28xxu_rtl2832_r820t_config, NULL);
1046 break; 1120 break;
1047 default: 1121 default:
1048 dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME, 1122 dev_err(&d->udev->dev, "%s: unknown tuner=%d\n", KBUILD_MODNAME,
1049 priv->tuner); 1123 priv->tuner);
1050 } 1124 }
1051 1125
1052 if (fe == NULL && priv->client == NULL) { 1126 if (fe == NULL && priv->i2c_client_tuner == NULL) {
1053 ret = -ENODEV; 1127 ret = -ENODEV;
1054 goto err; 1128 goto err;
1055 } 1129 }
@@ -1097,11 +1171,19 @@ err:
1097static void rtl28xxu_exit(struct dvb_usb_device *d) 1171static void rtl28xxu_exit(struct dvb_usb_device *d)
1098{ 1172{
1099 struct rtl28xxu_priv *priv = d->priv; 1173 struct rtl28xxu_priv *priv = d->priv;
1100 struct i2c_client *client = priv->client; 1174 struct i2c_client *client;
1101 1175
1102 dev_dbg(&d->udev->dev, "%s:\n", __func__); 1176 dev_dbg(&d->udev->dev, "%s:\n", __func__);
1103 1177
1104 /* remove I2C tuner */ 1178 /* remove I2C tuner */
1179 client = priv->i2c_client_tuner;
1180 if (client) {
1181 module_put(client->dev.driver->owner);
1182 i2c_unregister_device(client);
1183 }
1184
1185 /* remove I2C slave demod */
1186 client = priv->i2c_client_slave_demod;
1105 if (client) { 1187 if (client) {
1106 module_put(client->dev.driver->owner); 1188 module_put(client->dev.driver->owner);
1107 i2c_unregister_device(client); 1189 i2c_unregister_device(client);
@@ -1201,13 +1283,6 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
1201 if (ret) 1283 if (ret)
1202 goto err; 1284 goto err;
1203 1285
1204 mdelay(5);
1205
1206 /* enable ADC */
1207 ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x48, 0x48);
1208 if (ret)
1209 goto err;
1210
1211 /* streaming EP: clear stall & reset */ 1286 /* streaming EP: clear stall & reset */
1212 ret = rtl28xx_wr_regs(d, USB_EPA_CTL, "\x00\x00", 2); 1287 ret = rtl28xx_wr_regs(d, USB_EPA_CTL, "\x00\x00", 2);
1213 if (ret) 1288 if (ret)
@@ -1222,11 +1297,6 @@ static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
1222 if (ret) 1297 if (ret)
1223 goto err; 1298 goto err;
1224 1299
1225 /* disable ADC */
1226 ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x48);
1227 if (ret)
1228 goto err;
1229
1230 /* disable PLL */ 1300 /* disable PLL */
1231 ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x80); 1301 ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, 0x00, 0x80);
1232 if (ret) 1302 if (ret)
@@ -1244,6 +1314,38 @@ err:
1244 return ret; 1314 return ret;
1245} 1315}
1246 1316
1317static int rtl2832u_frontend_ctrl(struct dvb_frontend *fe, int onoff)
1318{
1319 struct dvb_usb_device *d = fe_to_d(fe);
1320 struct dvb_usb_adapter *adap = fe_to_adap(fe);
1321 int ret;
1322 u8 val;
1323
1324 dev_dbg(&d->udev->dev, "%s: fe=%d onoff=%d\n", __func__, fe->id, onoff);
1325
1326 /* control internal demod ADC */
1327 if (fe->id == 0 && onoff)
1328 val = 0x48; /* enable ADC */
1329 else
1330 val = 0x00; /* disable ADC */
1331
1332 ret = rtl28xx_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
1333 if (ret)
1334 goto err;
1335
1336 /* bypass slave demod TS through master demod */
1337 if (fe->id == 1 && onoff) {
1338 ret = rtl2832_enable_external_ts_if(adap->fe[0]);
1339 if (ret)
1340 goto err;
1341 }
1342
1343 return 0;
1344err:
1345 dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
1346 return ret;
1347}
1348
1247#if IS_ENABLED(CONFIG_RC_CORE) 1349#if IS_ENABLED(CONFIG_RC_CORE)
1248static int rtl2831u_rc_query(struct dvb_usb_device *d) 1350static int rtl2831u_rc_query(struct dvb_usb_device *d)
1249{ 1351{
@@ -1467,6 +1569,7 @@ static const struct dvb_usb_device_properties rtl2832u_props = {
1467 .size_of_priv = sizeof(struct rtl28xxu_priv), 1569 .size_of_priv = sizeof(struct rtl28xxu_priv),
1468 1570
1469 .power_ctrl = rtl2832u_power_ctrl, 1571 .power_ctrl = rtl2832u_power_ctrl,
1572 .frontend_ctrl = rtl2832u_frontend_ctrl,
1470 .i2c_algo = &rtl28xxu_i2c_algo, 1573 .i2c_algo = &rtl28xxu_i2c_algo,
1471 .read_config = rtl2832u_read_config, 1574 .read_config = rtl2832u_read_config,
1472 .frontend_attach = rtl2832u_frontend_attach, 1575 .frontend_attach = rtl2832u_frontend_attach,
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index a26cab10f382..3e3ea9d64a38 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -57,7 +57,12 @@ struct rtl28xxu_priv {
57 u8 page; /* integrated demod active register page */ 57 u8 page; /* integrated demod active register page */
58 struct i2c_adapter *demod_i2c_adapter; 58 struct i2c_adapter *demod_i2c_adapter;
59 bool rc_active; 59 bool rc_active;
60 struct i2c_client *client; 60 struct i2c_client *i2c_client_tuner;
61 struct i2c_client *i2c_client_slave_demod;
62 #define SLAVE_DEMOD_NONE 0
63 #define SLAVE_DEMOD_MN88472 1
64 #define SLAVE_DEMOD_MN88473 2
65 unsigned int slave_demod:2;
61}; 66};
62 67
63enum rtl28xxu_chip_id { 68enum rtl28xxu_chip_id {
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 41d3eb922a00..3364200db093 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -130,7 +130,6 @@ config DVB_USB_CXUSB
130 130
131 Medion MD95700 hybrid USB2.0 device. 131 Medion MD95700 hybrid USB2.0 device.
132 DViCO FusionHDTV (Bluebird) USB2.0 devices 132 DViCO FusionHDTV (Bluebird) USB2.0 devices
133 TechnoTrend TVStick CT2-4400 and CT2-4650 CI devices
134 133
135config DVB_USB_M920X 134config DVB_USB_M920X
136 tristate "Uli m920x DVB-T USB2.0 support" 135 tristate "Uli m920x DVB-T USB2.0 support"
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 3f4361e48a32..efa782ed6e2d 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
1081 err("usb_register failed. (%d)", result); 1081 err("usb_register failed. (%d)", result);
1082 return result; 1082 return result;
1083 } 1083 }
1084#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
1085 /* FIXME: convert to todays kernel IR infrastructure */
1084 rc_decode = symbol_request(af9005_rc_decode); 1086 rc_decode = symbol_request(af9005_rc_decode);
1085 rc_keys = symbol_request(rc_map_af9005_table); 1087 rc_keys = symbol_request(rc_map_af9005_table);
1086 rc_keys_size = symbol_request(rc_map_af9005_table_size); 1088 rc_keys_size = symbol_request(rc_map_af9005_table_size);
1089#endif
1087 if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) { 1090 if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
1088 err("af9005_rc_decode function not found, disabling remote"); 1091 err("af9005_rc_decode function not found, disabling remote");
1089 af9005_properties.rc.legacy.rc_query = NULL; 1092 af9005_properties.rc.legacy.rc_query = NULL;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 356abb369c20..0f345b1f9014 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -44,7 +44,6 @@
44#include "atbm8830.h" 44#include "atbm8830.h"
45#include "si2168.h" 45#include "si2168.h"
46#include "si2157.h" 46#include "si2157.h"
47#include "sp2.h"
48 47
49/* Max transfer size done by I2C transfer functions */ 48/* Max transfer size done by I2C transfer functions */
50#define MAX_XFER_SIZE 80 49#define MAX_XFER_SIZE 80
@@ -147,22 +146,6 @@ static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
147 } 146 }
148} 147}
149 148
150static int cxusb_tt_ct2_4400_gpio_tuner(struct dvb_usb_device *d, int onoff)
151{
152 u8 o[2], i;
153 int rc;
154
155 o[0] = 0x83;
156 o[1] = onoff;
157 rc = cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
158
159 if (rc) {
160 deb_info("gpio_write failed.\n");
161 return -EIO;
162 }
163 return 0;
164}
165
166/* I2C */ 149/* I2C */
167static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], 150static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
168 int num) 151 int num)
@@ -524,30 +507,6 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
524 return 0; 507 return 0;
525} 508}
526 509
527static int cxusb_tt_ct2_4400_rc_query(struct dvb_usb_device *d)
528{
529 u8 i[2];
530 int ret;
531 u32 cmd, keycode;
532 u8 rc5_cmd, rc5_addr, rc5_toggle;
533
534 ret = cxusb_ctrl_msg(d, 0x10, NULL, 0, i, 2);
535 if (ret)
536 return ret;
537
538 cmd = (i[0] << 8) | i[1];
539
540 if (cmd != 0xffff) {
541 rc5_cmd = cmd & 0x3F; /* bits 1-6 for command */
542 rc5_addr = (cmd & 0x07C0) >> 6; /* bits 7-11 for address */
543 rc5_toggle = (cmd & 0x0800) >> 11; /* bit 12 for toggle */
544 keycode = (rc5_addr << 8) | rc5_cmd;
545 rc_keydown(d->rc_dev, RC_BIT_RC5, keycode, rc5_toggle);
546 }
547
548 return 0;
549}
550
551static struct rc_map_table rc_map_dvico_mce_table[] = { 510static struct rc_map_table rc_map_dvico_mce_table[] = {
552 { 0xfe02, KEY_TV }, 511 { 0xfe02, KEY_TV },
553 { 0xfe0e, KEY_MP3 }, 512 { 0xfe0e, KEY_MP3 },
@@ -673,70 +632,6 @@ static struct rc_map_table rc_map_d680_dmb_table[] = {
673 { 0x0025, KEY_POWER }, 632 { 0x0025, KEY_POWER },
674}; 633};
675 634
676static int cxusb_tt_ct2_4400_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
677{
678 u8 wbuf[2];
679 u8 rbuf[6];
680 int ret;
681 struct i2c_msg msg[] = {
682 {
683 .addr = 0x51,
684 .flags = 0,
685 .buf = wbuf,
686 .len = 2,
687 }, {
688 .addr = 0x51,
689 .flags = I2C_M_RD,
690 .buf = rbuf,
691 .len = 6,
692 }
693 };
694
695 wbuf[0] = 0x1e;
696 wbuf[1] = 0x00;
697 ret = cxusb_i2c_xfer(&d->i2c_adap, msg, 2);
698
699 if (ret == 2) {
700 memcpy(mac, rbuf, 6);
701 return 0;
702 } else {
703 if (ret < 0)
704 return ret;
705 return -EIO;
706 }
707}
708
709static int cxusb_tt_ct2_4650_ci_ctrl(void *priv, u8 read, int addr,
710 u8 data, int *mem)
711{
712 struct dvb_usb_device *d = priv;
713 u8 wbuf[3];
714 u8 rbuf[2];
715 int ret;
716
717 wbuf[0] = (addr >> 8) & 0xff;
718 wbuf[1] = addr & 0xff;
719
720 if (read) {
721 ret = cxusb_ctrl_msg(d, CMD_SP2_CI_READ, wbuf, 2, rbuf, 2);
722 } else {
723 wbuf[2] = data;
724 ret = cxusb_ctrl_msg(d, CMD_SP2_CI_WRITE, wbuf, 3, rbuf, 1);
725 }
726
727 if (ret)
728 goto err;
729
730 if (read)
731 *mem = rbuf[1];
732
733 return 0;
734err:
735 deb_info("%s: ci usb write returned %d\n", __func__, ret);
736 return ret;
737
738}
739
740static int cxusb_dee1601_demod_init(struct dvb_frontend* fe) 635static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
741{ 636{
742 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 }; 637 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -1408,36 +1303,34 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
1408 return 0; 1303 return 0;
1409} 1304}
1410 1305
1411static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap) 1306static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
1412{ 1307{
1413 struct dvb_usb_device *d = adap->dev; 1308 struct dvb_usb_device *d = adap->dev;
1414 struct cxusb_state *st = d->priv; 1309 struct cxusb_state *st = d->priv;
1415 struct i2c_adapter *adapter; 1310 struct i2c_adapter *adapter;
1416 struct i2c_client *client_demod; 1311 struct i2c_client *client_demod;
1417 struct i2c_client *client_tuner; 1312 struct i2c_client *client_tuner;
1418 struct i2c_client *client_ci;
1419 struct i2c_board_info info; 1313 struct i2c_board_info info;
1420 struct si2168_config si2168_config; 1314 struct si2168_config si2168_config;
1421 struct si2157_config si2157_config; 1315 struct si2157_config si2157_config;
1422 struct sp2_config sp2_config;
1423 u8 o[2], i;
1424 1316
1425 /* reset the tuner */ 1317 /* Select required USB configuration */
1426 if (cxusb_tt_ct2_4400_gpio_tuner(d, 0) < 0) { 1318 if (usb_set_interface(d->udev, 0, 0) < 0)
1427 err("clear tuner gpio failed"); 1319 err("set interface failed");
1428 return -EIO; 1320
1429 } 1321 /* Unblock all USB pipes */
1430 msleep(100); 1322 usb_clear_halt(d->udev,
1431 if (cxusb_tt_ct2_4400_gpio_tuner(d, 1) < 0) { 1323 usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
1432 err("set tuner gpio failed"); 1324 usb_clear_halt(d->udev,
1433 return -EIO; 1325 usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
1434 } 1326 usb_clear_halt(d->udev,
1435 msleep(100); 1327 usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
1436 1328
1437 /* attach frontend */ 1329 /* attach frontend */
1438 si2168_config.i2c_adapter = &adapter; 1330 si2168_config.i2c_adapter = &adapter;
1439 si2168_config.fe = &adap->fe_adap[0].fe; 1331 si2168_config.fe = &adap->fe_adap[0].fe;
1440 si2168_config.ts_mode = SI2168_TS_PARALLEL; 1332 si2168_config.ts_mode = SI2168_TS_PARALLEL;
1333 si2168_config.ts_clock_inv = 1;
1441 memset(&info, 0, sizeof(struct i2c_board_info)); 1334 memset(&info, 0, sizeof(struct i2c_board_info));
1442 strlcpy(info.type, "si2168", I2C_NAME_SIZE); 1335 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
1443 info.addr = 0x64; 1336 info.addr = 0x64;
@@ -1477,48 +1370,6 @@ static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
1477 1370
1478 st->i2c_client_tuner = client_tuner; 1371 st->i2c_client_tuner = client_tuner;
1479 1372
1480 /* initialize CI */
1481 if (d->udev->descriptor.idProduct ==
1482 USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) {
1483
1484 memcpy(o, "\xc0\x01", 2);
1485 cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
1486 msleep(100);
1487
1488 memcpy(o, "\xc0\x00", 2);
1489 cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
1490 msleep(100);
1491
1492 memset(&sp2_config, 0, sizeof(sp2_config));
1493 sp2_config.dvb_adap = &adap->dvb_adap;
1494 sp2_config.priv = d;
1495 sp2_config.ci_control = cxusb_tt_ct2_4650_ci_ctrl;
1496 memset(&info, 0, sizeof(struct i2c_board_info));
1497 strlcpy(info.type, "sp2", I2C_NAME_SIZE);
1498 info.addr = 0x40;
1499 info.platform_data = &sp2_config;
1500 request_module(info.type);
1501 client_ci = i2c_new_device(&d->i2c_adap, &info);
1502 if (client_ci == NULL || client_ci->dev.driver == NULL) {
1503 module_put(client_tuner->dev.driver->owner);
1504 i2c_unregister_device(client_tuner);
1505 module_put(client_demod->dev.driver->owner);
1506 i2c_unregister_device(client_demod);
1507 return -ENODEV;
1508 }
1509 if (!try_module_get(client_ci->dev.driver->owner)) {
1510 i2c_unregister_device(client_ci);
1511 module_put(client_tuner->dev.driver->owner);
1512 i2c_unregister_device(client_tuner);
1513 module_put(client_demod->dev.driver->owner);
1514 i2c_unregister_device(client_demod);
1515 return -ENODEV;
1516 }
1517
1518 st->i2c_client_ci = client_ci;
1519
1520 }
1521
1522 return 0; 1373 return 0;
1523} 1374}
1524 1375
@@ -1603,7 +1454,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
1603static struct dvb_usb_device_properties cxusb_aver_a868r_properties; 1454static struct dvb_usb_device_properties cxusb_aver_a868r_properties;
1604static struct dvb_usb_device_properties cxusb_d680_dmb_properties; 1455static struct dvb_usb_device_properties cxusb_d680_dmb_properties;
1605static struct dvb_usb_device_properties cxusb_mygica_d689_properties; 1456static struct dvb_usb_device_properties cxusb_mygica_d689_properties;
1606static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties; 1457static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
1607 1458
1608static int cxusb_probe(struct usb_interface *intf, 1459static int cxusb_probe(struct usb_interface *intf,
1609 const struct usb_device_id *id) 1460 const struct usb_device_id *id)
@@ -1634,7 +1485,7 @@ static int cxusb_probe(struct usb_interface *intf,
1634 THIS_MODULE, NULL, adapter_nr) || 1485 THIS_MODULE, NULL, adapter_nr) ||
1635 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, 1486 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
1636 THIS_MODULE, NULL, adapter_nr) || 1487 THIS_MODULE, NULL, adapter_nr) ||
1637 0 == dvb_usb_device_init(intf, &cxusb_tt_ct2_4400_properties, 1488 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
1638 THIS_MODULE, NULL, adapter_nr) || 1489 THIS_MODULE, NULL, adapter_nr) ||
1639 0) 1490 0)
1640 return 0; 1491 return 0;
@@ -1648,13 +1499,6 @@ static void cxusb_disconnect(struct usb_interface *intf)
1648 struct cxusb_state *st = d->priv; 1499 struct cxusb_state *st = d->priv;
1649 struct i2c_client *client; 1500 struct i2c_client *client;
1650 1501
1651 /* remove I2C client for CI */
1652 client = st->i2c_client_ci;
1653 if (client) {
1654 module_put(client->dev.driver->owner);
1655 i2c_unregister_device(client);
1656 }
1657
1658 /* remove I2C client for tuner */ 1502 /* remove I2C client for tuner */
1659 client = st->i2c_client_tuner; 1503 client = st->i2c_client_tuner;
1660 if (client) { 1504 if (client) {
@@ -1693,8 +1537,7 @@ static struct usb_device_id cxusb_table [] = {
1693 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2) }, 1537 { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2) },
1694 { USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB) }, 1538 { USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB) },
1695 { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689) }, 1539 { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689) },
1696 { USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_TVSTICK_CT2_4400) }, 1540 { USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230) },
1697 { USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) },
1698 {} /* Terminating entry */ 1541 {} /* Terminating entry */
1699}; 1542};
1700MODULE_DEVICE_TABLE (usb, cxusb_table); 1543MODULE_DEVICE_TABLE (usb, cxusb_table);
@@ -2341,7 +2184,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
2341 } 2184 }
2342}; 2185};
2343 2186
2344static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = { 2187static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
2345 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 2188 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
2346 2189
2347 .usb_ctrl = CYPRESS_FX2, 2190 .usb_ctrl = CYPRESS_FX2,
@@ -2349,25 +2192,21 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
2349 .size_of_priv = sizeof(struct cxusb_state), 2192 .size_of_priv = sizeof(struct cxusb_state),
2350 2193
2351 .num_adapters = 1, 2194 .num_adapters = 1,
2352 .read_mac_address = cxusb_tt_ct2_4400_read_mac_address,
2353
2354 .adapter = { 2195 .adapter = {
2355 { 2196 {
2356 .num_frontends = 1, 2197 .num_frontends = 1,
2357 .fe = {{ 2198 .fe = {{
2358 .streaming_ctrl = cxusb_streaming_ctrl, 2199 .streaming_ctrl = cxusb_streaming_ctrl,
2359 /* both frontend and tuner attached in the 2200 .frontend_attach = cxusb_mygica_t230_frontend_attach,
2360 same function */
2361 .frontend_attach = cxusb_tt_ct2_4400_attach,
2362 2201
2363 /* parameter for the MPEG2-data transfer */ 2202 /* parameter for the MPEG2-data transfer */
2364 .stream = { 2203 .stream = {
2365 .type = USB_BULK, 2204 .type = USB_BULK,
2366 .count = 8, 2205 .count = 5,
2367 .endpoint = 0x82, 2206 .endpoint = 0x02,
2368 .u = { 2207 .u = {
2369 .bulk = { 2208 .bulk = {
2370 .buffersize = 4096, 2209 .buffersize = 8192,
2371 } 2210 }
2372 } 2211 }
2373 }, 2212 },
@@ -2375,28 +2214,25 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
2375 }, 2214 },
2376 }, 2215 },
2377 2216
2378 .i2c_algo = &cxusb_i2c_algo, 2217 .power_ctrl = cxusb_d680_dmb_power_ctrl,
2218
2219 .i2c_algo = &cxusb_i2c_algo,
2220
2379 .generic_bulk_ctrl_endpoint = 0x01, 2221 .generic_bulk_ctrl_endpoint = 0x01,
2380 .generic_bulk_ctrl_endpoint_response = 0x81,
2381 2222
2382 .rc.core = { 2223 .rc.legacy = {
2383 .rc_codes = RC_MAP_TT_1500, 2224 .rc_interval = 100,
2384 .allowed_protos = RC_BIT_RC5, 2225 .rc_map_table = rc_map_d680_dmb_table,
2385 .rc_query = cxusb_tt_ct2_4400_rc_query, 2226 .rc_map_size = ARRAY_SIZE(rc_map_d680_dmb_table),
2386 .rc_interval = 150, 2227 .rc_query = cxusb_d680_dmb_rc_query,
2387 }, 2228 },
2388 2229
2389 .num_device_descs = 2, 2230 .num_device_descs = 1,
2390 .devices = { 2231 .devices = {
2391 { 2232 {
2392 "TechnoTrend TVStick CT2-4400", 2233 "Mygica T230 DVB-T/T2/C",
2393 { NULL },
2394 { &cxusb_table[20], NULL },
2395 },
2396 {
2397 "TechnoTrend TT-connect CT2-4650 CI",
2398 { NULL }, 2234 { NULL },
2399 { &cxusb_table[21], NULL }, 2235 { &cxusb_table[22], NULL },
2400 }, 2236 },
2401 } 2237 }
2402}; 2238};
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 29f3e2ea2476..527ff7905e15 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,14 +28,10 @@
28#define CMD_ANALOG 0x50 28#define CMD_ANALOG 0x50
29#define CMD_DIGITAL 0x51 29#define CMD_DIGITAL 0x51
30 30
31#define CMD_SP2_CI_WRITE 0x70
32#define CMD_SP2_CI_READ 0x71
33
34struct cxusb_state { 31struct cxusb_state {
35 u8 gpio_write_state[3]; 32 u8 gpio_write_state[3];
36 struct i2c_client *i2c_client_demod; 33 struct i2c_client *i2c_client_demod;
37 struct i2c_client *i2c_client_tuner; 34 struct i2c_client *i2c_client_tuner;
38 struct i2c_client *i2c_client_ci;
39}; 35};
40 36
41#endif 37#endif
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 6b0b8b6b9e2a..5801ae7f672a 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -449,6 +449,8 @@ static int technisat_usb2_read_mac_address(struct dvb_usb_device *d,
449 return 0; 449 return 0;
450} 450}
451 451
452static struct stv090x_config technisat_usb2_stv090x_config;
453
452/* frontend attach */ 454/* frontend attach */
453static int technisat_usb2_set_voltage(struct dvb_frontend *fe, 455static int technisat_usb2_set_voltage(struct dvb_frontend *fe,
454 fe_sec_voltage_t voltage) 456 fe_sec_voltage_t voltage)
@@ -472,7 +474,8 @@ static int technisat_usb2_set_voltage(struct dvb_frontend *fe,
472 } 474 }
473 475
474 for (i = 0; i < 3; i++) 476 for (i = 0; i < 3; i++)
475 if (stv090x_set_gpio(fe, i+2, 0, gpio[i], 0) != 0) 477 if (technisat_usb2_stv090x_config.set_gpio(fe, i+2, 0,
478 gpio[i], 0) != 0)
476 return -EREMOTEIO; 479 return -EREMOTEIO;
477 return 0; 480 return 0;
478} 481}
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 957c7ae30efe..44ae1e0661e6 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "activates debug info");
56#define dprintk(fmt, arg...) do { \ 56#define dprintk(fmt, arg...) do { \
57 if (debug) \ 57 if (debug) \
58 printk(KERN_INFO "em28xx-audio %s: " fmt, \ 58 printk(KERN_INFO "em28xx-audio %s: " fmt, \
59 __func__, ##arg); \ 59 __func__, ##arg); \
60 } while (0) 60 } while (0)
61 61
62static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 62static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -232,7 +232,6 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
232 .channels_max = 2, 232 .channels_max = 2,
233 .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */ 233 .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */
234 234
235
236 /* 235 /*
237 * The period is 12.288 bytes. Allow a 10% of variation along its 236 * The period is 12.288 bytes. Allow a 10% of variation along its
238 * value, in order to avoid overruns/underruns due to some clock 237 * value, in order to avoid overruns/underruns due to some clock
@@ -361,7 +360,7 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
361 dprintk("Setting capture parameters\n"); 360 dprintk("Setting capture parameters\n");
362 361
363 ret = snd_pcm_alloc_vmalloc_buffer(substream, 362 ret = snd_pcm_alloc_vmalloc_buffer(substream,
364 params_buffer_bytes(hw_params)); 363 params_buffer_bytes(hw_params));
365 if (ret < 0) 364 if (ret < 0)
366 return ret; 365 return ret;
367#if 0 366#if 0
@@ -478,7 +477,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
478 * AC97 volume control support 477 * AC97 volume control support
479 */ 478 */
480static int em28xx_vol_info(struct snd_kcontrol *kcontrol, 479static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
481 struct snd_ctl_elem_info *info) 480 struct snd_ctl_elem_info *info)
482{ 481{
483 struct em28xx *dev = snd_kcontrol_chip(kcontrol); 482 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
484 483
@@ -494,7 +493,7 @@ static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
494} 493}
495 494
496static int em28xx_vol_put(struct snd_kcontrol *kcontrol, 495static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
497 struct snd_ctl_elem_value *value) 496 struct snd_ctl_elem_value *value)
498{ 497{
499 struct em28xx *dev = snd_kcontrol_chip(kcontrol); 498 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
500 struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; 499 struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
@@ -534,7 +533,7 @@ err:
534} 533}
535 534
536static int em28xx_vol_get(struct snd_kcontrol *kcontrol, 535static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
537 struct snd_ctl_elem_value *value) 536 struct snd_ctl_elem_value *value)
538{ 537{
539 struct em28xx *dev = snd_kcontrol_chip(kcontrol); 538 struct em28xx *dev = snd_kcontrol_chip(kcontrol);
540 struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; 539 struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream;
@@ -655,7 +654,7 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
655 struct snd_kcontrol *kctl; 654 struct snd_kcontrol *kctl;
656 struct snd_kcontrol_new tmp; 655 struct snd_kcontrol_new tmp;
657 656
658 memset (&tmp, 0, sizeof(tmp)); 657 memset(&tmp, 0, sizeof(tmp));
659 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, 658 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
660 tmp.private_value = id, 659 tmp.private_value = id,
661 tmp.name = ctl_name, 660 tmp.name = ctl_name,
@@ -672,7 +671,7 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
672 dprintk("Added control %s for ac97 volume control 0x%04x\n", 671 dprintk("Added control %s for ac97 volume control 0x%04x\n",
673 ctl_name, id); 672 ctl_name, id);
674 673
675 memset (&tmp, 0, sizeof(tmp)); 674 memset(&tmp, 0, sizeof(tmp));
676 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, 675 tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
677 tmp.private_value = id, 676 tmp.private_value = id,
678 tmp.name = ctl_name, 677 tmp.name = ctl_name,
@@ -731,7 +730,7 @@ static void em28xx_audio_free_urb(struct em28xx *dev)
731 730
732/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ 731/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
733static int em28xx_audio_ep_packet_size(struct usb_device *udev, 732static int em28xx_audio_ep_packet_size(struct usb_device *udev,
734 struct usb_endpoint_descriptor *e) 733 struct usb_endpoint_descriptor *e)
735{ 734{
736 int size = le16_to_cpu(e->wMaxPacketSize); 735 int size = le16_to_cpu(e->wMaxPacketSize);
737 736
@@ -781,7 +780,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
781 interval = 1 << (ep->bInterval - 1); 780 interval = 1 << (ep->bInterval - 1);
782 781
783 em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n", 782 em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
784 EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed), 783 EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
785 dev->ifnum, alt, 784 dev->ifnum, alt,
786 interval, 785 interval,
787 ep_size); 786 ep_size);
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 6d2ea9afd57b..7be661f73930 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -27,7 +27,6 @@
27 27
28#include "em28xx.h" 28#include "em28xx.h"
29 29
30
31/* Possible i2c addresses of Micron sensors */ 30/* Possible i2c addresses of Micron sensors */
32static unsigned short micron_sensor_addrs[] = { 31static unsigned short micron_sensor_addrs[] = {
33 0xb8 >> 1, /* MT9V111, MT9V403 */ 32 0xb8 >> 1, /* MT9V111, MT9V403 */
@@ -43,7 +42,6 @@ static unsigned short omnivision_sensor_addrs[] = {
43 I2C_CLIENT_END 42 I2C_CLIENT_END
44}; 43};
45 44
46
47static struct soc_camera_link camlink = { 45static struct soc_camera_link camlink = {
48 .bus_id = 0, 46 .bus_id = 0,
49 .flags = 0, 47 .flags = 0,
@@ -51,7 +49,6 @@ static struct soc_camera_link camlink = {
51 .unbalanced_power = true, 49 .unbalanced_power = true,
52}; 50};
53 51
54
55/* FIXME: Should be replaced by a proper mt9m111 driver */ 52/* FIXME: Should be replaced by a proper mt9m111 driver */
56static int em28xx_initialize_mt9m111(struct em28xx *dev) 53static int em28xx_initialize_mt9m111(struct em28xx *dev)
57{ 54{
@@ -70,7 +67,6 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev)
70 return 0; 67 return 0;
71} 68}
72 69
73
74/* FIXME: Should be replaced by a proper mt9m001 driver */ 70/* FIXME: Should be replaced by a proper mt9m001 driver */
75static int em28xx_initialize_mt9m001(struct em28xx *dev) 71static int em28xx_initialize_mt9m001(struct em28xx *dev)
76{ 72{
@@ -98,7 +94,6 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
98 return 0; 94 return 0;
99} 95}
100 96
101
102/* 97/*
103 * Probes Micron sensors with 8 bit address and 16 bit register width 98 * Probes Micron sensors with 8 bit address and 16 bit register width
104 */ 99 */
@@ -430,7 +425,7 @@ int em28xx_init_camera(struct em28xx *dev)
430 break; 425 break;
431 } 426 }
432 427
433 fmt.code = V4L2_MBUS_FMT_YUYV8_2X8; 428 fmt.code = MEDIA_BUS_FMT_YUYV8_2X8;
434 fmt.width = 640; 429 fmt.width = 640;
435 fmt.height = 480; 430 fmt.height = 480;
436 v4l2_subdev_call(subdev, video, s_mbus_fmt, &fmt); 431 v4l2_subdev_call(subdev, video, s_mbus_fmt, &fmt);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 71fa51e7984e..d9704e66b8c9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -64,7 +64,6 @@ module_param(usb_xfer_mode, int, 0444);
64MODULE_PARM_DESC(usb_xfer_mode, 64MODULE_PARM_DESC(usb_xfer_mode,
65 "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)"); 65 "USB transfer mode for frame data (-1 = auto, 0 = prefer isoc, 1 = prefer bulk)");
66 66
67
68/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */ 67/* Bitmask marking allocated devices from 0 to EM28XX_MAXBOARDS - 1 */
69static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS); 68static DECLARE_BITMAP(em28xx_devused, EM28XX_MAXBOARDS);
70 69
@@ -190,8 +189,8 @@ static struct em28xx_reg_seq kworld_a340_digital[] = {
190}; 189};
191 190
192static struct em28xx_reg_seq kworld_ub435q_v3_digital[] = { 191static struct em28xx_reg_seq kworld_ub435q_v3_digital[] = {
193 {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100}, 192 {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
194 {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, 193 {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
195 {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100}, 194 {EM2874_R80_GPIO_P0_CTRL, 0xbe, 0xff, 100},
196 {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100}, 195 {EM2874_R80_GPIO_P0_CTRL, 0xfe, 0xff, 100},
197 { -1, -1, -1, -1}, 196 { -1, -1, -1, -1},
@@ -301,7 +300,6 @@ static struct em28xx_reg_seq dikom_dk300_digital[] = {
301 { -1, -1, -1, -1}, 300 { -1, -1, -1, -1},
302}; 301};
303 302
304
305/* Reset for the most [digital] boards */ 303/* Reset for the most [digital] boards */
306static struct em28xx_reg_seq leadership_digital[] = { 304static struct em28xx_reg_seq leadership_digital[] = {
307 {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10}, 305 {EM2874_R80_GPIO_P0_CTRL, 0x70, 0xff, 10},
@@ -479,6 +477,20 @@ static struct em28xx_reg_seq pctv_292e[] = {
479 {-1, -1, -1, -1}, 477 {-1, -1, -1, -1},
480}; 478};
481 479
480static struct em28xx_reg_seq terratec_t2_stick_hd[] = {
481 {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
482 {0x0d, 0xff, 0xff, 600},
483 {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10},
484 {EM2874_R80_GPIO_P0_CTRL, 0xbc, 0xff, 100},
485 {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 100},
486 {EM2874_R80_GPIO_P0_CTRL, 0x00, 0xff, 300},
487 {EM2874_R80_GPIO_P0_CTRL, 0xf8, 0xff, 100},
488 {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 300},
489 {0x0d, 0x42, 0xff, 1000},
490 {EM2874_R5F_TS_ENABLE, 0x85, 0xff, 0},
491 {-1, -1, -1, -1},
492};
493
482/* 494/*
483 * Button definitions 495 * Button definitions
484 */ 496 */
@@ -548,7 +560,6 @@ static struct em28xx_led pctv_80e_leds[] = {
548 {-1, 0, 0, 0}, 560 {-1, 0, 0, 0},
549}; 561};
550 562
551
552/* 563/*
553 * Board definitions 564 * Board definitions
554 */ 565 */
@@ -1514,7 +1525,7 @@ struct em28xx_board em28xx_boards[] = {
1514 .type = EM28XX_VMUX_TELEVISION, 1525 .type = EM28XX_VMUX_TELEVISION,
1515 .vmux = SAA7115_COMPOSITE2, 1526 .vmux = SAA7115_COMPOSITE2,
1516 .amux = EM28XX_AMUX_VIDEO, 1527 .amux = EM28XX_AMUX_VIDEO,
1517 .aout = EM28XX_AOUT_MONO | /* I2S */ 1528 .aout = EM28XX_AOUT_MONO | /* I2S */
1518 EM28XX_AOUT_MASTER, /* Line out pin */ 1529 EM28XX_AOUT_MASTER, /* Line out pin */
1519 }, { 1530 }, {
1520 .type = EM28XX_VMUX_COMPOSITE1, 1531 .type = EM28XX_VMUX_COMPOSITE1,
@@ -1536,7 +1547,7 @@ struct em28xx_board em28xx_boards[] = {
1536 .type = EM28XX_VMUX_TELEVISION, 1547 .type = EM28XX_VMUX_TELEVISION,
1537 .vmux = SAA7115_COMPOSITE2, 1548 .vmux = SAA7115_COMPOSITE2,
1538 .amux = EM28XX_AMUX_VIDEO, 1549 .amux = EM28XX_AMUX_VIDEO,
1539 .aout = EM28XX_AOUT_MONO | /* I2S */ 1550 .aout = EM28XX_AOUT_MONO | /* I2S */
1540 EM28XX_AOUT_MASTER, /* Line out pin */ 1551 EM28XX_AOUT_MASTER, /* Line out pin */
1541 }, { 1552 }, {
1542 .type = EM28XX_VMUX_COMPOSITE1, 1553 .type = EM28XX_VMUX_COMPOSITE1,
@@ -2243,6 +2254,31 @@ struct em28xx_board em28xx_boards[] = {
2243 .has_dvb = 1, 2254 .has_dvb = 1,
2244 .ir_codes = RC_MAP_PINNACLE_PCTV_HD, 2255 .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
2245 }, 2256 },
2257 [EM2861_BOARD_LEADTEK_VC100] = {
2258 .name = "Leadtek VC100",
2259 .tuner_type = TUNER_ABSENT, /* Capture only device */
2260 .decoder = EM28XX_TVP5150,
2261 .input = { {
2262 .type = EM28XX_VMUX_COMPOSITE1,
2263 .vmux = TVP5150_COMPOSITE1,
2264 .amux = EM28XX_AMUX_LINE_IN,
2265 }, {
2266 .type = EM28XX_VMUX_SVIDEO,
2267 .vmux = TVP5150_SVIDEO,
2268 .amux = EM28XX_AMUX_LINE_IN,
2269 } },
2270 },
2271 /* eb1a:8179 Terratec Cinergy T2 Stick HD.
2272 * Empia EM28178, Silicon Labs Si2168, Silicon Labs Si2146 */
2273 [EM28178_BOARD_TERRATEC_T2_STICK_HD] = {
2274 .name = "Terratec Cinergy T2 Stick HD",
2275 .def_i2c_bus = 1,
2276 .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_400_KHZ,
2277 .tuner_type = TUNER_ABSENT,
2278 .tuner_gpio = terratec_t2_stick_hd,
2279 .has_dvb = 1,
2280 .ir_codes = RC_MAP_TERRATEC_SLIM_2,
2281 },
2246}; 2282};
2247EXPORT_SYMBOL_GPL(em28xx_boards); 2283EXPORT_SYMBOL_GPL(em28xx_boards);
2248 2284
@@ -2424,6 +2460,10 @@ struct usb_device_id em28xx_id_table[] = {
2424 .driver_info = EM28178_BOARD_PCTV_461E }, 2460 .driver_info = EM28178_BOARD_PCTV_461E },
2425 { USB_DEVICE(0x2013, 0x025f), 2461 { USB_DEVICE(0x2013, 0x025f),
2426 .driver_info = EM28178_BOARD_PCTV_292E }, 2462 .driver_info = EM28178_BOARD_PCTV_292E },
2463 { USB_DEVICE(0x0413, 0x6f07),
2464 .driver_info = EM2861_BOARD_LEADTEK_VC100 },
2465 { USB_DEVICE(0xeb1a, 0x8179),
2466 .driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD },
2427 { }, 2467 { },
2428}; 2468};
2429MODULE_DEVICE_TABLE(usb, em28xx_id_table); 2469MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2453,6 +2493,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
2453 {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF}, 2493 {0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
2454 {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT}, 2494 {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT},
2455}; 2495};
2496
2456/* NOTE: introduce a separate hash table for devices with 16 bit eeproms */ 2497/* NOTE: introduce a separate hash table for devices with 16 bit eeproms */
2457 2498
2458int em28xx_tuner_callback(void *ptr, int component, int command, int arg) 2499int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
@@ -2695,7 +2736,7 @@ static int em28xx_hint_board(struct em28xx *dev)
2695 " insmod option:\n"); 2736 " insmod option:\n");
2696 for (i = 0; i < em28xx_bcount; i++) { 2737 for (i = 0; i < em28xx_bcount; i++) {
2697 em28xx_errdev(" card=%d -> %s\n", 2738 em28xx_errdev(" card=%d -> %s\n",
2698 i, em28xx_boards[i].name); 2739 i, em28xx_boards[i].name);
2699 } 2740 }
2700 return -1; 2741 return -1;
2701} 2742}
@@ -3051,6 +3092,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
3051 if (le16_to_cpu(dev->udev->descriptor.idVendor) 3092 if (le16_to_cpu(dev->udev->descriptor.idVendor)
3052 == 0xeb1a) { 3093 == 0xeb1a) {
3053 __le16 idProd = dev->udev->descriptor.idProduct; 3094 __le16 idProd = dev->udev->descriptor.idProduct;
3095
3054 if (le16_to_cpu(idProd) == 0x2710) 3096 if (le16_to_cpu(idProd) == 0x2710)
3055 chip_name = "em2710"; 3097 chip_name = "em2710";
3056 else if (le16_to_cpu(idProd) == 0x2820) 3098 else if (le16_to_cpu(idProd) == 0x2820)
@@ -3139,7 +3181,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
3139 retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX); 3181 retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX);
3140 if (retval < 0) { 3182 if (retval < 0) {
3141 em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n", 3183 em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
3142 __func__, retval); 3184 __func__, retval);
3143 return retval; 3185 return retval;
3144 } 3186 }
3145 3187
@@ -3147,13 +3189,13 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
3147 if (dev->def_i2c_bus) { 3189 if (dev->def_i2c_bus) {
3148 if (dev->is_em25xx) 3190 if (dev->is_em25xx)
3149 retval = em28xx_i2c_register(dev, 1, 3191 retval = em28xx_i2c_register(dev, 1,
3150 EM28XX_I2C_ALGO_EM25XX_BUS_B); 3192 EM28XX_I2C_ALGO_EM25XX_BUS_B);
3151 else 3193 else
3152 retval = em28xx_i2c_register(dev, 1, 3194 retval = em28xx_i2c_register(dev, 1,
3153 EM28XX_I2C_ALGO_EM28XX); 3195 EM28XX_I2C_ALGO_EM28XX);
3154 if (retval < 0) { 3196 if (retval < 0) {
3155 em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n", 3197 em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
3156 __func__, retval); 3198 __func__, retval);
3157 3199
3158 em28xx_i2c_unregister(dev, 0); 3200 em28xx_i2c_unregister(dev, 0);
3159 3201
@@ -3193,7 +3235,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3193 if (nr >= EM28XX_MAXBOARDS) { 3235 if (nr >= EM28XX_MAXBOARDS) {
3194 /* No free device slots */ 3236 /* No free device slots */
3195 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n", 3237 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
3196 EM28XX_MAXBOARDS); 3238 EM28XX_MAXBOARDS);
3197 retval = -ENOMEM; 3239 retval = -ENOMEM;
3198 goto err_no_slot; 3240 goto err_no_slot;
3199 } 3241 }
@@ -3377,6 +3419,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
3377 /* Checks if audio is provided by a USB Audio Class interface */ 3419 /* Checks if audio is provided by a USB Audio Class interface */
3378 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) { 3420 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
3379 struct usb_interface *uif = udev->config->interface[i]; 3421 struct usb_interface *uif = udev->config->interface[i];
3422
3380 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) { 3423 if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
3381 if (has_vendor_audio) 3424 if (has_vendor_audio)
3382 em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n" 3425 em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
@@ -3487,7 +3530,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
3487} 3530}
3488 3531
3489static int em28xx_usb_suspend(struct usb_interface *interface, 3532static int em28xx_usb_suspend(struct usb_interface *interface,
3490 pm_message_t message) 3533 pm_message_t message)
3491{ 3534{
3492 struct em28xx *dev; 3535 struct em28xx *dev;
3493 3536
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 901cf2b952d7..86461a708abe 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
75 * reads data from the usb device specifying bRequest 75 * reads data from the usb device specifying bRequest
76 */ 76 */
77int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, 77int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
78 char *buf, int len) 78 char *buf, int len)
79{ 79{
80 int ret; 80 int ret;
81 int pipe = usb_rcvctrlpipe(dev->udev, 0); 81 int pipe = usb_rcvctrlpipe(dev->udev, 0);
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(em28xx_read_reg);
151 * sends data to the usb device, specifying bRequest 151 * sends data to the usb device, specifying bRequest
152 */ 152 */
153int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, 153int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
154 int len) 154 int len)
155{ 155{
156 int ret; 156 int ret;
157 int pipe = usb_sndctrlpipe(dev->udev, 0); 157 int pipe = usb_sndctrlpipe(dev->udev, 0);
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(em28xx_write_reg);
213 * the actual value 213 * the actual value
214 */ 214 */
215int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val, 215int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
216 u8 bitmask) 216 u8 bitmask)
217{ 217{
218 int oldval; 218 int oldval;
219 u8 newval; 219 u8 newval;
@@ -222,7 +222,7 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
222 if (oldval < 0) 222 if (oldval < 0)
223 return oldval; 223 return oldval;
224 224
225 newval = (((u8) oldval) & ~bitmask) | (val & bitmask); 225 newval = (((u8)oldval) & ~bitmask) | (val & bitmask);
226 226
227 return em28xx_write_regs(dev, reg, &newval, 1); 227 return em28xx_write_regs(dev, reg, &newval, 1);
228} 228}
@@ -314,7 +314,7 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
314 if (ret < 0) 314 if (ret < 0)
315 return ret; 315 return ret;
316 316
317 ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *) &value, 2); 317 ret = em28xx_write_regs(dev, EM28XX_R40_AC97LSB, (u8 *)&value, 2);
318 if (ret < 0) 318 if (ret < 0)
319 return ret; 319 return ret;
320 320
@@ -361,7 +361,7 @@ static int set_ac97_input(struct em28xx *dev)
361 361
362 if (ret < 0) 362 if (ret < 0)
363 em28xx_warn("couldn't setup AC97 register %d\n", 363 em28xx_warn("couldn't setup AC97 register %d\n",
364 inputs[i].reg); 364 inputs[i].reg);
365 } 365 }
366 return 0; 366 return 0;
367} 367}
@@ -445,7 +445,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
445 ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000); 445 ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
446 if (ret < 0) 446 if (ret < 0)
447 em28xx_warn("couldn't setup AC97 register %d\n", 447 em28xx_warn("couldn't setup AC97 register %d\n",
448 outputs[i].reg); 448 outputs[i].reg);
449 } 449 }
450 } 450 }
451 451
@@ -483,7 +483,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
483 vol); 483 vol);
484 if (ret < 0) 484 if (ret < 0)
485 em28xx_warn("couldn't setup AC97 register %d\n", 485 em28xx_warn("couldn't setup AC97 register %d\n",
486 outputs[i].reg); 486 outputs[i].reg);
487 } 487 }
488 488
489 if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) { 489 if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) {
@@ -531,7 +531,7 @@ int em28xx_audio_setup(struct em28xx *dev)
531 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) { 531 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
532 dev->int_audio_type = EM28XX_INT_AUDIO_I2S; 532 dev->int_audio_type = EM28XX_INT_AUDIO_I2S;
533 if (dev->chip_id < CHIP_ID_EM2860 && 533 if (dev->chip_id < CHIP_ID_EM2860 &&
534 (cfg & EM28XX_CHIPCFG_AUDIOMASK) == 534 (cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
535 EM2820_CHIPCFG_I2S_1_SAMPRATE) 535 EM2820_CHIPCFG_I2S_1_SAMPRATE)
536 i2s_samplerates = 1; 536 i2s_samplerates = 1;
537 else if (dev->chip_id >= CHIP_ID_EM2860 && 537 else if (dev->chip_id >= CHIP_ID_EM2860 &&
@@ -541,7 +541,7 @@ int em28xx_audio_setup(struct em28xx *dev)
541 else 541 else
542 i2s_samplerates = 3; 542 i2s_samplerates = 3;
543 em28xx_info("I2S Audio (%d sample rate(s))\n", 543 em28xx_info("I2S Audio (%d sample rate(s))\n",
544 i2s_samplerates); 544 i2s_samplerates);
545 /* Skip the code that does AC97 vendor detection */ 545 /* Skip the code that does AC97 vendor detection */
546 dev->audio_mode.ac97 = EM28XX_NO_AC97; 546 dev->audio_mode.ac97 = EM28XX_NO_AC97;
547 goto init_audio; 547 goto init_audio;
@@ -614,8 +614,9 @@ const struct em28xx_led *em28xx_find_led(struct em28xx *dev,
614{ 614{
615 if (dev->board.leds) { 615 if (dev->board.leds) {
616 u8 k = 0; 616 u8 k = 0;
617
617 while (dev->board.leds[k].role >= 0 && 618 while (dev->board.leds[k].role >= 0 &&
618 dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) { 619 dev->board.leds[k].role < EM28XX_NUM_LED_ROLES) {
619 if (dev->board.leds[k].role == role) 620 if (dev->board.leds[k].role == role)
620 return &dev->board.leds[k]; 621 return &dev->board.leds[k];
621 k++; 622 k++;
@@ -658,10 +659,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
658 659
659 if (dev->mode == EM28XX_ANALOG_MODE) 660 if (dev->mode == EM28XX_ANALOG_MODE)
660 rc = em28xx_write_reg(dev, 661 rc = em28xx_write_reg(dev,
661 EM28XX_R12_VINENABLE, 0x67); 662 EM28XX_R12_VINENABLE,
663 0x67);
662 else 664 else
663 rc = em28xx_write_reg(dev, 665 rc = em28xx_write_reg(dev,
664 EM28XX_R12_VINENABLE, 0x37); 666 EM28XX_R12_VINENABLE,
667 0x37);
665 if (rc < 0) 668 if (rc < 0)
666 return rc; 669 return rc;
667 670
@@ -815,9 +818,9 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
815 818
816 if (usb_bufs->transfer_buffer[i]) { 819 if (usb_bufs->transfer_buffer[i]) {
817 usb_free_coherent(dev->udev, 820 usb_free_coherent(dev->udev,
818 urb->transfer_buffer_length, 821 urb->transfer_buffer_length,
819 usb_bufs->transfer_buffer[i], 822 usb_bufs->transfer_buffer[i],
820 urb->transfer_dma); 823 urb->transfer_dma);
821 } 824 }
822 usb_free_urb(urb); 825 usb_free_urb(urb);
823 usb_bufs->urb[i] = NULL; 826 usb_bufs->urb[i] = NULL;
@@ -889,7 +892,7 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
889 if ((xfer_bulk && !dev->analog_ep_bulk) || 892 if ((xfer_bulk && !dev->analog_ep_bulk) ||
890 (!xfer_bulk && !dev->analog_ep_isoc)) { 893 (!xfer_bulk && !dev->analog_ep_isoc)) {
891 em28xx_errdev("no endpoint for analog mode and transfer type %d\n", 894 em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
892 xfer_bulk > 0); 895 xfer_bulk > 0);
893 return -EINVAL; 896 return -EINVAL;
894 } 897 }
895 usb_bufs = &dev->usb_ctl.analog_bufs; 898 usb_bufs = &dev->usb_ctl.analog_bufs;
@@ -988,9 +991,9 @@ EXPORT_SYMBOL_GPL(em28xx_alloc_urbs);
988 * Allocate URBs and start IRQ 991 * Allocate URBs and start IRQ
989 */ 992 */
990int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode, 993int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
991 int xfer_bulk, int num_bufs, int max_pkt_size, 994 int xfer_bulk, int num_bufs, int max_pkt_size,
992 int packet_multiplier, 995 int packet_multiplier,
993 int (*urb_data_copy) (struct em28xx *dev, struct urb *urb)) 996 int (*urb_data_copy)(struct em28xx *dev, struct urb *urb))
994{ 997{
995 struct em28xx_dmaqueue *dma_q = &dev->vidq; 998 struct em28xx_dmaqueue *dma_q = &dev->vidq;
996 struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq; 999 struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 9682c52d67d1..9877b699c6bc 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -63,7 +63,6 @@ MODULE_LICENSE("GPL");
63MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface"); 63MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
64MODULE_VERSION(EM28XX_VERSION); 64MODULE_VERSION(EM28XX_VERSION);
65 65
66
67static unsigned int debug; 66static unsigned int debug;
68module_param(debug, int, 0644); 67module_param(debug, int, 0644);
69MODULE_PARM_DESC(debug, "enable debug messages [dvb]"); 68MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
@@ -71,7 +70,7 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
71DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 70DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
72 71
73#define dprintk(level, fmt, arg...) do { \ 72#define dprintk(level, fmt, arg...) do { \
74if (debug >= level) \ 73if (debug >= level) \
75 printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \ 74 printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
76} while (0) 75} while (0)
77 76
@@ -99,9 +98,8 @@ struct em28xx_dvb {
99 struct i2c_client *i2c_client_tuner; 98 struct i2c_client *i2c_client_tuner;
100}; 99};
101 100
102
103static inline void print_err_status(struct em28xx *dev, 101static inline void print_err_status(struct em28xx *dev,
104 int packet, int status) 102 int packet, int status)
105{ 103{
106 char *errmsg = "Unknown"; 104 char *errmsg = "Unknown";
107 105
@@ -169,7 +167,7 @@ static inline int em28xx_dvb_urb_data_copy(struct em28xx *dev, struct urb *urb)
169 if (!urb->actual_length) 167 if (!urb->actual_length)
170 continue; 168 continue;
171 dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer, 169 dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
172 urb->actual_length); 170 urb->actual_length);
173 } else { 171 } else {
174 if (urb->iso_frame_desc[i].status < 0) { 172 if (urb->iso_frame_desc[i].status < 0) {
175 print_err_status(dev, i, 173 print_err_status(dev, i,
@@ -280,7 +278,6 @@ static int em28xx_stop_feed(struct dvb_demux_feed *feed)
280} 278}
281 279
282 280
283
284/* ------------------------------------------------------------------ */ 281/* ------------------------------------------------------------------ */
285static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire) 282static int em28xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
286{ 283{
@@ -740,7 +737,7 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
740 return ret; 737 return ret;
741#else 738#else
742 dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n", 739 dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
743 KBUILD_MODNAME, c->lna); 740 KBUILD_MODNAME, c->lna);
744 return 0; 741 return 0;
745#endif 742#endif
746} 743}
@@ -830,6 +827,7 @@ static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
830 .no_tuner = 1, 827 .no_tuner = 1,
831 .parallel_ts = 1, 828 .parallel_ts = 1,
832}; 829};
830
833static struct qt1010_config em28xx_qt1010_config = { 831static struct qt1010_config em28xx_qt1010_config = {
834 .i2c_address = 0x62 832 .i2c_address = 0x62
835}; 833};
@@ -861,7 +859,6 @@ static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
861 .agc = 0x99, 859 .agc = 0x99,
862}; 860};
863 861
864
865static struct tda18271_std_map drx_j_std_map = { 862static struct tda18271_std_map drx_j_std_map = {
866 .atsc_6 = { .if_freq = 5000, .agc_mode = 3, .std = 0, .if_lvl = 1, 863 .atsc_6 = { .if_freq = 5000, .agc_mode = 3, .std = 0, .if_lvl = 1,
867 .rfagc_top = 0x37, }, 864 .rfagc_top = 0x37, },
@@ -948,7 +945,7 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
948 result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]); 945 result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
949 if (result < 0) { 946 if (result < 0) {
950 printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n", 947 printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
951 dev->name, result); 948 dev->name, result);
952 goto fail_frontend1; 949 goto fail_frontend1;
953 } 950 }
954 } 951 }
@@ -1047,7 +1044,7 @@ static void em28xx_unregister_dvb(struct em28xx_dvb *dvb)
1047 1044
1048static int em28xx_dvb_init(struct em28xx *dev) 1045static int em28xx_dvb_init(struct em28xx *dev)
1049{ 1046{
1050 int result = 0, mfe_shared = 0; 1047 int result = 0;
1051 struct em28xx_dvb *dvb; 1048 struct em28xx_dvb *dvb;
1052 1049
1053 if (dev->is_audio_only) { 1050 if (dev->is_audio_only) {
@@ -1182,7 +1179,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
1182 &dev->i2c_adap[dev->def_i2c_bus]); 1179 &dev->i2c_adap[dev->def_i2c_bus]);
1183 if (dvb->fe[0] != NULL) { 1180 if (dvb->fe[0] != NULL) {
1184 if (!dvb_attach(simple_tuner_attach, dvb->fe[0], 1181 if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
1185 &dev->i2c_adap[dev->def_i2c_bus], 0x61, TUNER_THOMSON_DTT761X)) { 1182 &dev->i2c_adap[dev->def_i2c_bus],
1183 0x61, TUNER_THOMSON_DTT761X)) {
1186 result = -EINVAL; 1184 result = -EINVAL;
1187 goto out_free; 1185 goto out_free;
1188 } 1186 }
@@ -1204,7 +1202,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
1204 &dev->i2c_adap[dev->def_i2c_bus], 0x48); 1202 &dev->i2c_adap[dev->def_i2c_bus], 0x48);
1205 if (dvb->fe[0]) { 1203 if (dvb->fe[0]) {
1206 if (!dvb_attach(simple_tuner_attach, dvb->fe[0], 1204 if (!dvb_attach(simple_tuner_attach, dvb->fe[0],
1207 &dev->i2c_adap[dev->def_i2c_bus], 0x60, TUNER_PHILIPS_CU1216L)) { 1205 &dev->i2c_adap[dev->def_i2c_bus],
1206 0x60, TUNER_PHILIPS_CU1216L)) {
1208 result = -EINVAL; 1207 result = -EINVAL;
1209 goto out_free; 1208 goto out_free;
1210 } 1209 }
@@ -1219,7 +1218,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
1219 goto out_free; 1218 goto out_free;
1220 } 1219 }
1221 if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60, 1220 if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
1222 &dev->i2c_adap[dev->def_i2c_bus], 1221 &dev->i2c_adap[dev->def_i2c_bus],
1223 &kworld_a340_config)) { 1222 &kworld_a340_config)) {
1224 dvb_frontend_detach(dvb->fe[0]); 1223 dvb_frontend_detach(dvb->fe[0]);
1225 result = -EINVAL; 1224 result = -EINVAL;
@@ -1250,10 +1249,10 @@ static int em28xx_dvb_init(struct em28xx *dev)
1250#ifdef CONFIG_GPIOLIB 1249#ifdef CONFIG_GPIOLIB
1251 /* enable LNA for DVB-T, DVB-T2 and DVB-C */ 1250 /* enable LNA for DVB-T, DVB-T2 and DVB-C */
1252 result = gpio_request_one(dvb->lna_gpio, 1251 result = gpio_request_one(dvb->lna_gpio,
1253 GPIOF_OUT_INIT_LOW, NULL); 1252 GPIOF_OUT_INIT_LOW, NULL);
1254 if (result) 1253 if (result)
1255 em28xx_errdev("gpio request failed %d\n", 1254 em28xx_errdev("gpio request failed %d\n",
1256 result); 1255 result);
1257 else 1256 else
1258 gpio_free(dvb->lna_gpio); 1257 gpio_free(dvb->lna_gpio);
1259 1258
@@ -1266,6 +1265,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
1266 case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C: 1265 case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
1267 { 1266 {
1268 struct xc5000_config cfg; 1267 struct xc5000_config cfg;
1268
1269 hauppauge_hvr930c_init(dev); 1269 hauppauge_hvr930c_init(dev);
1270 1270
1271 dvb->fe[0] = dvb_attach(drxk_attach, 1271 dvb->fe[0] = dvb_attach(drxk_attach,
@@ -1339,7 +1339,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
1339 /* attach SEC */ 1339 /* attach SEC */
1340 if (dvb->fe[0]) 1340 if (dvb->fe[0])
1341 dvb_attach(a8293_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus], 1341 dvb_attach(a8293_attach, dvb->fe[0], &dev->i2c_adap[dev->def_i2c_bus],
1342 &em28xx_a8293_config); 1342 &em28xx_a8293_config);
1343 break; 1343 break;
1344 case EM2874_BOARD_DELOCK_61959: 1344 case EM2874_BOARD_DELOCK_61959:
1345 case EM2874_BOARD_MAXMEDIA_UB425_TC: 1345 case EM2874_BOARD_MAXMEDIA_UB425_TC:
@@ -1553,6 +1553,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
1553 struct si2157_config si2157_config; 1553 struct si2157_config si2157_config;
1554 1554
1555 /* attach demod */ 1555 /* attach demod */
1556 memset(&si2168_config, 0, sizeof(si2168_config));
1556 si2168_config.i2c_adapter = &adapter; 1557 si2168_config.i2c_adapter = &adapter;
1557 si2168_config.fe = &dvb->fe[0]; 1558 si2168_config.fe = &dvb->fe[0];
1558 si2168_config.ts_mode = SI2168_TS_PARALLEL; 1559 si2168_config.ts_mode = SI2168_TS_PARALLEL;
@@ -1603,6 +1604,65 @@ static int em28xx_dvb_init(struct em28xx *dev)
1603 dvb->fe[0]->ops.set_lna = em28xx_pctv_292e_set_lna; 1604 dvb->fe[0]->ops.set_lna = em28xx_pctv_292e_set_lna;
1604 } 1605 }
1605 break; 1606 break;
1607 case EM28178_BOARD_TERRATEC_T2_STICK_HD:
1608 {
1609 struct i2c_adapter *adapter;
1610 struct i2c_client *client;
1611 struct i2c_board_info info;
1612 struct si2168_config si2168_config;
1613 struct si2157_config si2157_config;
1614
1615 /* attach demod */
1616 memset(&si2168_config, 0, sizeof(si2168_config));
1617 si2168_config.i2c_adapter = &adapter;
1618 si2168_config.fe = &dvb->fe[0];
1619 si2168_config.ts_mode = SI2168_TS_PARALLEL;
1620 memset(&info, 0, sizeof(struct i2c_board_info));
1621 strlcpy(info.type, "si2168", I2C_NAME_SIZE);
1622 info.addr = 0x64;
1623 info.platform_data = &si2168_config;
1624 request_module(info.type);
1625 client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
1626 if (client == NULL || client->dev.driver == NULL) {
1627 result = -ENODEV;
1628 goto out_free;
1629 }
1630
1631 if (!try_module_get(client->dev.driver->owner)) {
1632 i2c_unregister_device(client);
1633 result = -ENODEV;
1634 goto out_free;
1635 }
1636
1637 dvb->i2c_client_demod = client;
1638
1639 /* attach tuner */
1640 memset(&si2157_config, 0, sizeof(si2157_config));
1641 si2157_config.fe = dvb->fe[0];
1642 memset(&info, 0, sizeof(struct i2c_board_info));
1643 strlcpy(info.type, "si2146", I2C_NAME_SIZE);
1644 info.addr = 0x60;
1645 info.platform_data = &si2157_config;
1646 request_module("si2157");
1647 client = i2c_new_device(adapter, &info);
1648 if (client == NULL || client->dev.driver == NULL) {
1649 module_put(dvb->i2c_client_demod->dev.driver->owner);
1650 i2c_unregister_device(dvb->i2c_client_demod);
1651 result = -ENODEV;
1652 goto out_free;
1653 }
1654
1655 if (!try_module_get(client->dev.driver->owner)) {
1656 i2c_unregister_device(client);
1657 module_put(dvb->i2c_client_demod->dev.driver->owner);
1658 i2c_unregister_device(dvb->i2c_client_demod);
1659 result = -ENODEV;
1660 goto out_free;
1661 }
1662
1663 dvb->i2c_client_tuner = client;
1664 }
1665 break;
1606 default: 1666 default:
1607 em28xx_errdev("/2: The frontend of your DVB/ATSC card" 1667 em28xx_errdev("/2: The frontend of your DVB/ATSC card"
1608 " isn't supported yet\n"); 1668 " isn't supported yet\n");
@@ -1624,9 +1684,6 @@ static int em28xx_dvb_init(struct em28xx *dev)
1624 if (result < 0) 1684 if (result < 0)
1625 goto out_free; 1685 goto out_free;
1626 1686
1627 /* MFE lock */
1628 dvb->adapter.mfe_shared = mfe_shared;
1629
1630 em28xx_info("DVB extension successfully initialized\n"); 1687 em28xx_info("DVB extension successfully initialized\n");
1631 1688
1632 kref_get(&dev->ref); 1689 kref_get(&dev->ref);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1048c1a23fb6..a19b5c8b56ff 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -593,6 +593,7 @@ static inline unsigned long em28xx_hash_mem(char *buf, int length, int bits)
593 unsigned long l = 0; 593 unsigned long l = 0;
594 int len = 0; 594 int len = 0;
595 unsigned char c; 595 unsigned char c;
596
596 do { 597 do {
597 if (len == length) { 598 if (len == length) {
598 c = (char)len; 599 c = (char)len;
@@ -877,6 +878,7 @@ static struct i2c_client em28xx_client_template = {
877 * incomplete list of known devices 878 * incomplete list of known devices
878 */ 879 */
879static char *i2c_devs[128] = { 880static char *i2c_devs[128] = {
881 [0x1c >> 1] = "lgdt330x",
880 [0x3e >> 1] = "remote IR sensor", 882 [0x3e >> 1] = "remote IR sensor",
881 [0x4a >> 1] = "saa7113h", 883 [0x4a >> 1] = "saa7113h",
882 [0x52 >> 1] = "drxk", 884 [0x52 >> 1] = "drxk",
@@ -949,7 +951,7 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
949 retval = i2c_add_adapter(&dev->i2c_adap[bus]); 951 retval = i2c_add_adapter(&dev->i2c_adap[bus]);
950 if (retval < 0) { 952 if (retval < 0) {
951 em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n", 953 em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n",
952 __func__, retval); 954 __func__, retval);
953 return retval; 955 return retval;
954 } 956 }
955 957
@@ -961,7 +963,7 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
961 retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len); 963 retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len);
962 if ((retval < 0) && (retval != -ENODEV)) { 964 if ((retval < 0) && (retval != -ENODEV)) {
963 em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n", 965 em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
964 __func__, retval); 966 __func__, retval);
965 967
966 return retval; 968 return retval;
967 } 969 }
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 23f8f6afa2e0..d8dc03aadfbd 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -459,7 +459,7 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
459 return em2874_ir_change_protocol(rc_dev, rc_type); 459 return em2874_ir_change_protocol(rc_dev, rc_type);
460 default: 460 default:
461 printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n", 461 printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
462 dev->chip_id); 462 dev->chip_id);
463 return -EINVAL; 463 return -EINVAL;
464 } 464 }
465} 465}
@@ -505,7 +505,7 @@ static void em28xx_query_buttons(struct work_struct *work)
505 /* Check states of the buttons and act */ 505 /* Check states of the buttons and act */
506 j = 0; 506 j = 0;
507 while (dev->board.buttons[j].role >= 0 && 507 while (dev->board.buttons[j].role >= 0 &&
508 dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) { 508 dev->board.buttons[j].role < EM28XX_NUM_BUTTON_ROLES) {
509 struct em28xx_button *button = &dev->board.buttons[j]; 509 struct em28xx_button *button = &dev->board.buttons[j];
510 /* Check if button uses the current address */ 510 /* Check if button uses the current address */
511 if (button->reg_r != dev->button_polling_addresses[i]) { 511 if (button->reg_r != dev->button_polling_addresses[i]) {
@@ -607,7 +607,7 @@ static void em28xx_init_buttons(struct em28xx *dev)
607 607
608 dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL; 608 dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
609 while (dev->board.buttons[i].role >= 0 && 609 while (dev->board.buttons[i].role >= 0 &&
610 dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) { 610 dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
611 struct em28xx_button *button = &dev->board.buttons[i]; 611 struct em28xx_button *button = &dev->board.buttons[i];
612 /* Check if polling address is already on the list */ 612 /* Check if polling address is already on the list */
613 addr_new = true; 613 addr_new = true;
@@ -653,11 +653,11 @@ next_button:
653 /* Start polling */ 653 /* Start polling */
654 if (dev->num_button_polling_addresses) { 654 if (dev->num_button_polling_addresses) {
655 memset(dev->button_polling_last_values, 0, 655 memset(dev->button_polling_last_values, 0,
656 EM28XX_NUM_BUTTON_ADDRESSES_MAX); 656 EM28XX_NUM_BUTTON_ADDRESSES_MAX);
657 INIT_DELAYED_WORK(&dev->buttons_query_work, 657 INIT_DELAYED_WORK(&dev->buttons_query_work,
658 em28xx_query_buttons); 658 em28xx_query_buttons);
659 schedule_delayed_work(&dev->buttons_query_work, 659 schedule_delayed_work(&dev->buttons_query_work,
660 msecs_to_jiffies(dev->button_polling_interval)); 660 msecs_to_jiffies(dev->button_polling_interval));
661 } 661 }
662} 662}
663 663
@@ -841,8 +841,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
841 if (!ir) 841 if (!ir)
842 goto ref_put; 842 goto ref_put;
843 843
844 if (ir->rc) 844 rc_unregister_device(ir->rc);
845 rc_unregister_device(ir->rc);
846 845
847 kfree(ir->i2c_client); 846 kfree(ir->i2c_client);
848 847
@@ -887,7 +886,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
887 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); 886 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
888 if (dev->num_button_polling_addresses) 887 if (dev->num_button_polling_addresses)
889 schedule_delayed_work(&dev->buttons_query_work, 888 schedule_delayed_work(&dev->buttons_query_work,
890 msecs_to_jiffies(dev->button_polling_interval)); 889 msecs_to_jiffies(dev->button_polling_interval));
891 return 0; 890 return 0;
892} 891}
893 892
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 311fb349dafa..13cbb7f3ea10 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -49,7 +49,6 @@
49#define EM28XX_CHIPCFG2_TS_PACKETSIZE_564 0x02 49#define EM28XX_CHIPCFG2_TS_PACKETSIZE_564 0x02
50#define EM28XX_CHIPCFG2_TS_PACKETSIZE_752 0x03 50#define EM28XX_CHIPCFG2_TS_PACKETSIZE_752 0x03
51 51
52
53/* GPIO/GPO registers */ 52/* GPIO/GPO registers */
54#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */ 53#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */
55#define EM2820_R08_GPIO_CTRL 0x08 /* em2820-em2873/83 only */ 54#define EM2820_R08_GPIO_CTRL 0x08 /* em2820-em2873/83 only */
@@ -68,7 +67,6 @@
68#define EM28XX_I2C_FREQ_400_KHZ 0x01 67#define EM28XX_I2C_FREQ_400_KHZ 0x01
69#define EM28XX_I2C_FREQ_100_KHZ 0x00 68#define EM28XX_I2C_FREQ_100_KHZ 0x00
70 69
71
72#define EM28XX_R0A_CHIPID 0x0a 70#define EM28XX_R0A_CHIPID 0x0a
73#define EM28XX_R0C_USBSUSP 0x0c 71#define EM28XX_R0C_USBSUSP 0x0c
74#define EM28XX_R0C_USBSUSP_SNAPSHOT 0x20 /* 1=button pressed, needs reset */ 72#define EM28XX_R0C_USBSUSP_SNAPSHOT 0x20 /* 1=button pressed, needs reset */
@@ -157,7 +155,6 @@
157#define EM28XX_OUTFMT_YUV422_Y1UY0V 0x15 155#define EM28XX_OUTFMT_YUV422_Y1UY0V 0x15
158#define EM28XX_OUTFMT_YUV411 0x18 156#define EM28XX_OUTFMT_YUV411 0x18
159 157
160
161#define EM28XX_R28_XMIN 0x28 158#define EM28XX_R28_XMIN 0x28
162#define EM28XX_R29_XMAX 0x29 159#define EM28XX_R29_XMAX 0x29
163#define EM28XX_R2A_YMIN 0x2a 160#define EM28XX_R2A_YMIN 0x2a
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 432862c20bbf..8dfcb56bf4b3 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -14,7 +14,6 @@
14 GNU General Public License for more details. 14 GNU General Public License for more details.
15 */ 15 */
16 16
17
18int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count); 17int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
19void em28xx_stop_vbi_streaming(struct vb2_queue *vq); 18void em28xx_stop_vbi_streaming(struct vb2_queue *vq);
20extern struct vb2_ops em28xx_vbi_qops; 19extern struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 34ee1e03a732..744e7ed743e1 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -92,7 +92,6 @@ vbi_buffer_queue(struct vb2_buffer *vb)
92 spin_unlock_irqrestore(&dev->slock, flags); 92 spin_unlock_irqrestore(&dev->slock, flags);
93} 93}
94 94
95
96struct vb2_ops em28xx_vbi_qops = { 95struct vb2_ops em28xx_vbi_qops = {
97 .queue_setup = vbi_queue_setup, 96 .queue_setup = vbi_queue_setup,
98 .buf_prepare = vbi_buffer_prepare, 97 .buf_prepare = vbi_buffer_prepare,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 03d5ece0319c..cf7f58b76292 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
81MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
82MODULE_VERSION(EM28XX_VERSION); 82MODULE_VERSION(EM28XX_VERSION);
83 83
84
85#define EM25XX_FRMDATAHDR_BYTE1 0x02 84#define EM25XX_FRMDATAHDR_BYTE1 0x02
86#define EM25XX_FRMDATAHDR_BYTE2_STILL_IMAGE 0x20 85#define EM25XX_FRMDATAHDR_BYTE2_STILL_IMAGE 0x20
87#define EM25XX_FRMDATAHDR_BYTE2_FRAME_END 0x02 86#define EM25XX_FRMDATAHDR_BYTE2_FRAME_END 0x02
@@ -90,7 +89,6 @@ MODULE_VERSION(EM28XX_VERSION);
90 EM25XX_FRMDATAHDR_BYTE2_FRAME_END | \ 89 EM25XX_FRMDATAHDR_BYTE2_FRAME_END | \
91 EM25XX_FRMDATAHDR_BYTE2_FRAME_ID) 90 EM25XX_FRMDATAHDR_BYTE2_FRAME_ID)
92 91
93
94static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U }; 92static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
95static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U }; 93static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
96static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U }; 94static unsigned int radio_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = -1U };
@@ -194,9 +192,10 @@ static int em28xx_vbi_supported(struct em28xx *dev)
194static void em28xx_wake_i2c(struct em28xx *dev) 192static void em28xx_wake_i2c(struct em28xx *dev)
195{ 193{
196 struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev; 194 struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev;
195
197 v4l2_device_call_all(v4l2_dev, 0, core, reset, 0); 196 v4l2_device_call_all(v4l2_dev, 0, core, reset, 0);
198 v4l2_device_call_all(v4l2_dev, 0, video, s_routing, 197 v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
199 INPUT(dev->ctl_input)->vmux, 0, 0); 198 INPUT(dev->ctl_input)->vmux, 0, 0);
200 v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0); 199 v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0);
201} 200}
202 201
@@ -275,7 +274,7 @@ static int em28xx_accumulator_set(struct em28xx *dev, u8 xmin, u8 xmax,
275} 274}
276 275
277static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart, 276static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
278 u16 width, u16 height) 277 u16 width, u16 height)
279{ 278{
280 u8 cwidth = width >> 2; 279 u8 cwidth = width >> 2;
281 u8 cheight = height >> 2; 280 u8 cheight = height >> 2;
@@ -283,7 +282,7 @@ static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
283 /* NOTE: size limit: 2047x1023 = 2MPix */ 282 /* NOTE: size limit: 2047x1023 = 2MPix */
284 283
285 em28xx_videodbg("capture area set to (%d,%d): %dx%d\n", 284 em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
286 hstart, vstart, 285 hstart, vstart,
287 ((overflow & 2) << 9 | cwidth << 2), 286 ((overflow & 2) << 9 | cwidth << 2),
288 ((overflow & 1) << 10 | cheight << 2)); 287 ((overflow & 1) << 10 | cheight << 2));
289 288
@@ -406,13 +405,13 @@ set_alt:
406 dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER; 405 dev->packet_multiplier = EM28XX_BULK_PACKET_MULTIPLIER;
407 } else { /* isoc */ 406 } else { /* isoc */
408 em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n", 407 em28xx_videodbg("minimum isoc packet size: %u (alt=%d)\n",
409 min_pkt_size, dev->alt); 408 min_pkt_size, dev->alt);
410 dev->max_pkt_size = 409 dev->max_pkt_size =
411 dev->alt_max_pkt_size_isoc[dev->alt]; 410 dev->alt_max_pkt_size_isoc[dev->alt];
412 dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS; 411 dev->packet_multiplier = EM28XX_NUM_ISOC_PACKETS;
413 } 412 }
414 em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n", 413 em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
415 dev->alt, dev->max_pkt_size); 414 dev->alt, dev->max_pkt_size);
416 errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt); 415 errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
417 if (errCode < 0) { 416 if (errCode < 0) {
418 em28xx_errdev("cannot change alternate number to %d (error=%i)\n", 417 em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
@@ -482,7 +481,7 @@ static void em28xx_copy_video(struct em28xx *dev,
482 481
483 if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) { 482 if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) {
484 em28xx_isocdbg("Overflow of %zu bytes past buffer end (1)\n", 483 em28xx_isocdbg("Overflow of %zu bytes past buffer end (1)\n",
485 ((char *)startwrite + lencopy) - 484 ((char *)startwrite + lencopy) -
486 ((char *)buf->vb_buf + buf->length)); 485 ((char *)buf->vb_buf + buf->length));
487 remain = (char *)buf->vb_buf + buf->length - 486 remain = (char *)buf->vb_buf + buf->length -
488 (char *)startwrite; 487 (char *)startwrite;
@@ -548,7 +547,7 @@ static void em28xx_copy_vbi(struct em28xx *dev,
548} 547}
549 548
550static inline void print_err_status(struct em28xx *dev, 549static inline void print_err_status(struct em28xx *dev,
551 int packet, int status) 550 int packet, int status)
552{ 551{
553 char *errmsg = "Unknown"; 552 char *errmsg = "Unknown";
554 553
@@ -831,7 +830,6 @@ static inline int em28xx_urb_data_copy(struct em28xx *dev, struct urb *urb)
831 return 1; 830 return 1;
832} 831}
833 832
834
835static int get_ressource(enum v4l2_buf_type f_type) 833static int get_ressource(enum v4l2_buf_type f_type)
836{ 834{
837 switch (f_type) { 835 switch (f_type) {
@@ -1003,6 +1001,7 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
1003 } 1001 }
1004 while (!list_empty(&vidq->active)) { 1002 while (!list_empty(&vidq->active)) {
1005 struct em28xx_buffer *buf; 1003 struct em28xx_buffer *buf;
1004
1006 buf = list_entry(vidq->active.next, struct em28xx_buffer, list); 1005 buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
1007 list_del(&buf->list); 1006 list_del(&buf->list);
1008 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1007 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
@@ -1033,6 +1032,7 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
1033 } 1032 }
1034 while (!list_empty(&vbiq->active)) { 1033 while (!list_empty(&vbiq->active)) {
1035 struct em28xx_buffer *buf; 1034 struct em28xx_buffer *buf;
1035
1036 buf = list_entry(vbiq->active.next, struct em28xx_buffer, list); 1036 buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
1037 list_del(&buf->list); 1037 list_del(&buf->list);
1038 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 1038 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
@@ -1109,6 +1109,7 @@ static int em28xx_vb2_setup(struct em28xx *dev)
1109static void video_mux(struct em28xx *dev, int index) 1109static void video_mux(struct em28xx *dev, int index)
1110{ 1110{
1111 struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev; 1111 struct v4l2_device *v4l2_dev = &dev->v4l2->v4l2_dev;
1112
1112 dev->ctl_input = index; 1113 dev->ctl_input = index;
1113 dev->ctl_ainput = INPUT(index)->amux; 1114 dev->ctl_ainput = INPUT(index)->amux;
1114 dev->ctl_aoutput = INPUT(index)->aout; 1115 dev->ctl_aoutput = INPUT(index)->aout;
@@ -1117,21 +1118,22 @@ static void video_mux(struct em28xx *dev, int index)
1117 dev->ctl_aoutput = EM28XX_AOUT_MASTER; 1118 dev->ctl_aoutput = EM28XX_AOUT_MASTER;
1118 1119
1119 v4l2_device_call_all(v4l2_dev, 0, video, s_routing, 1120 v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
1120 INPUT(index)->vmux, 0, 0); 1121 INPUT(index)->vmux, 0, 0);
1121 1122
1122 if (dev->board.has_msp34xx) { 1123 if (dev->board.has_msp34xx) {
1123 if (dev->i2s_speed) { 1124 if (dev->i2s_speed) {
1124 v4l2_device_call_all(v4l2_dev, 0, audio, 1125 v4l2_device_call_all(v4l2_dev, 0, audio,
1125 s_i2s_clock_freq, dev->i2s_speed); 1126 s_i2s_clock_freq, dev->i2s_speed);
1126 } 1127 }
1127 /* Note: this is msp3400 specific */ 1128 /* Note: this is msp3400 specific */
1128 v4l2_device_call_all(v4l2_dev, 0, audio, s_routing, 1129 v4l2_device_call_all(v4l2_dev, 0, audio, s_routing,
1129 dev->ctl_ainput, MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0); 1130 dev->ctl_ainput,
1131 MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0);
1130 } 1132 }
1131 1133
1132 if (dev->board.adecoder != EM28XX_NOADECODER) { 1134 if (dev->board.adecoder != EM28XX_NOADECODER) {
1133 v4l2_device_call_all(v4l2_dev, 0, audio, s_routing, 1135 v4l2_device_call_all(v4l2_dev, 0, audio, s_routing,
1134 dev->ctl_ainput, dev->ctl_aoutput, 0); 1136 dev->ctl_ainput, dev->ctl_aoutput, 0);
1135 } 1137 }
1136 1138
1137 em28xx_audio_analog_set(dev); 1139 em28xx_audio_analog_set(dev);
@@ -1203,7 +1205,7 @@ static const struct v4l2_ctrl_ops em28xx_ctrl_ops = {
1203}; 1205};
1204 1206
1205static void size_to_scale(struct em28xx *dev, 1207static void size_to_scale(struct em28xx *dev,
1206 unsigned int width, unsigned int height, 1208 unsigned int width, unsigned int height,
1207 unsigned int *hscale, unsigned int *vscale) 1209 unsigned int *hscale, unsigned int *vscale)
1208{ 1210{
1209 unsigned int maxw = norm_maxw(dev); 1211 unsigned int maxw = norm_maxw(dev);
@@ -1234,7 +1236,7 @@ static void scale_to_size(struct em28xx *dev,
1234 ------------------------------------------------------------------*/ 1236 ------------------------------------------------------------------*/
1235 1237
1236static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 1238static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1237 struct v4l2_format *f) 1239 struct v4l2_format *f)
1238{ 1240{
1239 struct em28xx *dev = video_drvdata(file); 1241 struct em28xx *dev = video_drvdata(file);
1240 struct em28xx_v4l2 *v4l2 = dev->v4l2; 1242 struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1267,7 +1269,7 @@ static struct em28xx_fmt *format_by_fourcc(unsigned int fourcc)
1267} 1269}
1268 1270
1269static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 1271static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1270 struct v4l2_format *f) 1272 struct v4l2_format *f)
1271{ 1273{
1272 struct em28xx *dev = video_drvdata(file); 1274 struct em28xx *dev = video_drvdata(file);
1273 struct em28xx_v4l2 *v4l2 = dev->v4l2; 1275 struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1338,7 +1340,7 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
1338 1340
1339 /* set new image size */ 1341 /* set new image size */
1340 size_to_scale(dev, v4l2->width, v4l2->height, 1342 size_to_scale(dev, v4l2->width, v4l2->height,
1341 &v4l2->hscale, &v4l2->vscale); 1343 &v4l2->hscale, &v4l2->vscale);
1342 1344
1343 em28xx_resolution_set(dev); 1345 em28xx_resolution_set(dev);
1344 1346
@@ -1346,7 +1348,7 @@ static int em28xx_set_video_format(struct em28xx *dev, unsigned int fourcc,
1346} 1348}
1347 1349
1348static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 1350static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1349 struct v4l2_format *f) 1351 struct v4l2_format *f)
1350{ 1352{
1351 struct em28xx *dev = video_drvdata(file); 1353 struct em28xx *dev = video_drvdata(file);
1352 struct em28xx_v4l2 *v4l2 = dev->v4l2; 1354 struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1401,7 +1403,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1401 v4l2->width = f.fmt.pix.width; 1403 v4l2->width = f.fmt.pix.width;
1402 v4l2->height = f.fmt.pix.height; 1404 v4l2->height = f.fmt.pix.height;
1403 size_to_scale(dev, v4l2->width, v4l2->height, 1405 size_to_scale(dev, v4l2->width, v4l2->height,
1404 &v4l2->hscale, &v4l2->vscale); 1406 &v4l2->hscale, &v4l2->vscale);
1405 1407
1406 em28xx_resolution_set(dev); 1408 em28xx_resolution_set(dev);
1407 v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_std, v4l2->norm); 1409 v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_std, v4l2->norm);
@@ -1422,7 +1424,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
1422 video, g_parm, p); 1424 video, g_parm, p);
1423 else 1425 else
1424 v4l2_video_std_frame_period(v4l2->norm, 1426 v4l2_video_std_frame_period(v4l2->norm,
1425 &p->parm.capture.timeperframe); 1427 &p->parm.capture.timeperframe);
1426 1428
1427 return rc; 1429 return rc;
1428} 1430}
@@ -1450,7 +1452,7 @@ static const char *iname[] = {
1450}; 1452};
1451 1453
1452static int vidioc_enum_input(struct file *file, void *priv, 1454static int vidioc_enum_input(struct file *file, void *priv,
1453 struct v4l2_input *i) 1455 struct v4l2_input *i)
1454{ 1456{
1455 struct em28xx *dev = video_drvdata(file); 1457 struct em28xx *dev = video_drvdata(file);
1456 unsigned int n; 1458 unsigned int n;
@@ -1467,7 +1469,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1467 strcpy(i->name, iname[INPUT(n)->type]); 1469 strcpy(i->name, iname[INPUT(n)->type]);
1468 1470
1469 if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) || 1471 if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
1470 (EM28XX_VMUX_CABLE == INPUT(n)->type)) 1472 (EM28XX_VMUX_CABLE == INPUT(n)->type))
1471 i->type = V4L2_INPUT_TYPE_TUNER; 1473 i->type = V4L2_INPUT_TYPE_TUNER;
1472 1474
1473 i->std = dev->v4l2->vdev->tvnorms; 1475 i->std = dev->v4l2->vdev->tvnorms;
@@ -1558,7 +1560,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
1558} 1560}
1559 1561
1560static int vidioc_g_tuner(struct file *file, void *priv, 1562static int vidioc_g_tuner(struct file *file, void *priv,
1561 struct v4l2_tuner *t) 1563 struct v4l2_tuner *t)
1562{ 1564{
1563 struct em28xx *dev = video_drvdata(file); 1565 struct em28xx *dev = video_drvdata(file);
1564 1566
@@ -1572,7 +1574,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
1572} 1574}
1573 1575
1574static int vidioc_s_tuner(struct file *file, void *priv, 1576static int vidioc_s_tuner(struct file *file, void *priv,
1575 const struct v4l2_tuner *t) 1577 const struct v4l2_tuner *t)
1576{ 1578{
1577 struct em28xx *dev = video_drvdata(file); 1579 struct em28xx *dev = video_drvdata(file);
1578 1580
@@ -1584,7 +1586,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
1584} 1586}
1585 1587
1586static int vidioc_g_frequency(struct file *file, void *priv, 1588static int vidioc_g_frequency(struct file *file, void *priv,
1587 struct v4l2_frequency *f) 1589 struct v4l2_frequency *f)
1588{ 1590{
1589 struct em28xx *dev = video_drvdata(file); 1591 struct em28xx *dev = video_drvdata(file);
1590 struct em28xx_v4l2 *v4l2 = dev->v4l2; 1592 struct em28xx_v4l2 *v4l2 = dev->v4l2;
@@ -1597,7 +1599,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1597} 1599}
1598 1600
1599static int vidioc_s_frequency(struct file *file, void *priv, 1601static int vidioc_s_frequency(struct file *file, void *priv,
1600 const struct v4l2_frequency *f) 1602 const struct v4l2_frequency *f)
1601{ 1603{
1602 struct v4l2_frequency new_freq = *f; 1604 struct v4l2_frequency new_freq = *f;
1603 struct em28xx *dev = video_drvdata(file); 1605 struct em28xx *dev = video_drvdata(file);
@@ -1615,7 +1617,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1615 1617
1616#ifdef CONFIG_VIDEO_ADV_DEBUG 1618#ifdef CONFIG_VIDEO_ADV_DEBUG
1617static int vidioc_g_chip_info(struct file *file, void *priv, 1619static int vidioc_g_chip_info(struct file *file, void *priv,
1618 struct v4l2_dbg_chip_info *chip) 1620 struct v4l2_dbg_chip_info *chip)
1619{ 1621{
1620 struct em28xx *dev = video_drvdata(file); 1622 struct em28xx *dev = video_drvdata(file);
1621 1623
@@ -1670,6 +1672,7 @@ static int vidioc_g_register(struct file *file, void *priv,
1670 reg->val = ret; 1672 reg->val = ret;
1671 } else { 1673 } else {
1672 __le16 val = 0; 1674 __le16 val = 0;
1675
1673 ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS, 1676 ret = dev->em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
1674 reg->reg, (char *)&val, 2); 1677 reg->reg, (char *)&val, 2);
1675 if (ret < 0) 1678 if (ret < 0)
@@ -1700,9 +1703,8 @@ static int vidioc_s_register(struct file *file, void *priv,
1700} 1703}
1701#endif 1704#endif
1702 1705
1703
1704static int vidioc_querycap(struct file *file, void *priv, 1706static int vidioc_querycap(struct file *file, void *priv,
1705 struct v4l2_capability *cap) 1707 struct v4l2_capability *cap)
1706{ 1708{
1707 struct video_device *vdev = video_devdata(file); 1709 struct video_device *vdev = video_devdata(file);
1708 struct em28xx *dev = video_drvdata(file); 1710 struct em28xx *dev = video_drvdata(file);
@@ -1736,7 +1738,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1736} 1738}
1737 1739
1738static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, 1740static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
1739 struct v4l2_fmtdesc *f) 1741 struct v4l2_fmtdesc *f)
1740{ 1742{
1741 if (unlikely(f->index >= ARRAY_SIZE(format))) 1743 if (unlikely(f->index >= ARRAY_SIZE(format)))
1742 return -EINVAL; 1744 return -EINVAL;
@@ -2177,9 +2179,10 @@ static unsigned short msp3400_addrs[] = {
2177 2179
2178/******************************** usb interface ******************************/ 2180/******************************** usb interface ******************************/
2179 2181
2180static struct video_device *em28xx_vdev_init(struct em28xx *dev, 2182static struct video_device
2181 const struct video_device *template, 2183*em28xx_vdev_init(struct em28xx *dev,
2182 const char *type_name) 2184 const struct video_device *template,
2185 const char *type_name)
2183{ 2186{
2184 struct video_device *vfd; 2187 struct video_device *vfd;
2185 2188
@@ -2344,21 +2347,24 @@ static int em28xx_v4l2_init(struct em28xx *dev)
2344 2347
2345 if (dev->board.radio.type) 2348 if (dev->board.radio.type)
2346 v4l2_i2c_new_subdev(&v4l2->v4l2_dev, 2349 v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
2347 &dev->i2c_adap[dev->def_i2c_bus], 2350 &dev->i2c_adap[dev->def_i2c_bus],
2348 "tuner", dev->board.radio_addr, NULL); 2351 "tuner", dev->board.radio_addr,
2352 NULL);
2349 2353
2350 if (has_demod) 2354 if (has_demod)
2351 v4l2_i2c_new_subdev(&v4l2->v4l2_dev, 2355 v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
2352 &dev->i2c_adap[dev->def_i2c_bus], "tuner", 2356 &dev->i2c_adap[dev->def_i2c_bus],
2353 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 2357 "tuner", 0,
2358 v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
2354 if (tuner_addr == 0) { 2359 if (tuner_addr == 0) {
2355 enum v4l2_i2c_tuner_type type = 2360 enum v4l2_i2c_tuner_type type =
2356 has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; 2361 has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
2357 struct v4l2_subdev *sd; 2362 struct v4l2_subdev *sd;
2358 2363
2359 sd = v4l2_i2c_new_subdev(&v4l2->v4l2_dev, 2364 sd = v4l2_i2c_new_subdev(&v4l2->v4l2_dev,
2360 &dev->i2c_adap[dev->def_i2c_bus], "tuner", 2365 &dev->i2c_adap[dev->def_i2c_bus],
2361 0, v4l2_i2c_tuner_addrs(type)); 2366 "tuner", 0,
2367 v4l2_i2c_tuner_addrs(type));
2362 2368
2363 if (sd) 2369 if (sd)
2364 tuner_addr = v4l2_i2c_subdev_addr(sd); 2370 tuner_addr = v4l2_i2c_subdev_addr(sd);
@@ -2378,20 +2384,20 @@ static int em28xx_v4l2_init(struct em28xx *dev)
2378 ret = em28xx_audio_setup(dev); 2384 ret = em28xx_audio_setup(dev);
2379 if (ret < 0) { 2385 if (ret < 0) {
2380 em28xx_errdev("%s: Error while setting audio - error [%d]!\n", 2386 em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
2381 __func__, ret); 2387 __func__, ret);
2382 goto unregister_dev; 2388 goto unregister_dev;
2383 } 2389 }
2384 if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { 2390 if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
2385 v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops, 2391 v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
2386 V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); 2392 V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
2387 v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops, 2393 v4l2_ctrl_new_std(hdl, &em28xx_ctrl_ops,
2388 V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f); 2394 V4L2_CID_AUDIO_VOLUME, 0, 0x1f, 1, 0x1f);
2389 } else { 2395 } else {
2390 /* install the em28xx notify callback */ 2396 /* install the em28xx notify callback */
2391 v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE), 2397 v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_MUTE),
2392 em28xx_ctrl_notify, dev); 2398 em28xx_ctrl_notify, dev);
2393 v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME), 2399 v4l2_ctrl_notify(v4l2_ctrl_find(hdl, V4L2_CID_AUDIO_VOLUME),
2394 em28xx_ctrl_notify, dev); 2400 em28xx_ctrl_notify, dev);
2395 } 2401 }
2396 2402
2397 /* wake i2c devices */ 2403 /* wake i2c devices */
@@ -2518,7 +2524,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
2518 2524
2519 /* register v4l2 video video_device */ 2525 /* register v4l2 video video_device */
2520 ret = video_register_device(v4l2->vdev, VFL_TYPE_GRABBER, 2526 ret = video_register_device(v4l2->vdev, VFL_TYPE_GRABBER,
2521 video_nr[dev->devno]); 2527 video_nr[dev->devno]);
2522 if (ret) { 2528 if (ret) {
2523 em28xx_errdev("unable to register video device (error=%i).\n", 2529 em28xx_errdev("unable to register video device (error=%i).\n",
2524 ret); 2530 ret);
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index a21a7463b557..9c7075344109 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -141,6 +141,8 @@
141#define EM28178_BOARD_PCTV_461E 92 141#define EM28178_BOARD_PCTV_461E 92
142#define EM2874_BOARD_KWORLD_UB435Q_V3 93 142#define EM2874_BOARD_KWORLD_UB435Q_V3 93
143#define EM28178_BOARD_PCTV_292E 94 143#define EM28178_BOARD_PCTV_292E 94
144#define EM2861_BOARD_LEADTEK_VC100 95
145#define EM28178_BOARD_TERRATEC_T2_STICK_HD 96
144 146
145/* Limits minimum and default number of buffers */ 147/* Limits minimum and default number of buffers */
146#define EM28XX_MIN_BUF 4 148#define EM28XX_MIN_BUF 4
@@ -215,7 +217,6 @@ enum em28xx_mode {
215 EM28XX_DIGITAL_MODE, 217 EM28XX_DIGITAL_MODE,
216}; 218};
217 219
218
219struct em28xx; 220struct em28xx;
220 221
221struct em28xx_usb_bufs { 222struct em28xx_usb_bufs {
@@ -243,11 +244,11 @@ struct em28xx_usb_ctl {
243 struct em28xx_usb_bufs digital_bufs; 244 struct em28xx_usb_bufs digital_bufs;
244 245
245 /* Stores already requested buffers */ 246 /* Stores already requested buffers */
246 struct em28xx_buffer *vid_buf; 247 struct em28xx_buffer *vid_buf;
247 struct em28xx_buffer *vbi_buf; 248 struct em28xx_buffer *vbi_buf;
248 249
249 /* copy data from URB */ 250 /* copy data from URB */
250 int (*urb_data_copy) (struct em28xx *dev, struct urb *urb); 251 int (*urb_data_copy)(struct em28xx *dev, struct urb *urb);
251 252
252}; 253};
253 254
@@ -695,14 +696,14 @@ struct em28xx {
695 char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */ 696 char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
696 697
697 /* helper funcs that call usb_control_msg */ 698 /* helper funcs that call usb_control_msg */
698 int (*em28xx_write_regs) (struct em28xx *dev, u16 reg, 699 int (*em28xx_write_regs)(struct em28xx *dev, u16 reg,
699 char *buf, int len); 700 char *buf, int len);
700 int (*em28xx_read_reg) (struct em28xx *dev, u16 reg); 701 int (*em28xx_read_reg)(struct em28xx *dev, u16 reg);
701 int (*em28xx_read_reg_req_len) (struct em28xx *dev, u8 req, u16 reg, 702 int (*em28xx_read_reg_req_len)(struct em28xx *dev, u8 req, u16 reg,
702 char *buf, int len); 703 char *buf, int len);
703 int (*em28xx_write_regs_req) (struct em28xx *dev, u8 req, u16 reg, 704 int (*em28xx_write_regs_req)(struct em28xx *dev, u8 req, u16 reg,
704 char *buf, int len); 705 char *buf, int len);
705 int (*em28xx_read_reg_req) (struct em28xx *dev, u8 req, u16 reg); 706 int (*em28xx_read_reg_req)(struct em28xx *dev, u8 req, u16 reg);
706 707
707 enum em28xx_mode mode; 708 enum em28xx_mode mode;
708 709
@@ -745,7 +746,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
745int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len); 746int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
746int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val); 747int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
747int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val, 748int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
748 u8 bitmask); 749 u8 bitmask);
749int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask); 750int em28xx_toggle_reg_bits(struct em28xx *dev, u16 reg, u8 bitmask);
750 751
751int em28xx_read_ac97(struct em28xx *dev, u8 reg); 752int em28xx_read_ac97(struct em28xx *dev, u8 reg);
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index ec799b4d88be..d6bf982efa42 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -252,7 +252,7 @@ static int set_capture_size(struct go7007 *go, struct v4l2_format *fmt, int try)
252 if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) { 252 if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) {
253 struct v4l2_mbus_framefmt mbus_fmt; 253 struct v4l2_mbus_framefmt mbus_fmt;
254 254
255 mbus_fmt.code = V4L2_MBUS_FMT_FIXED; 255 mbus_fmt.code = MEDIA_BUS_FMT_FIXED;
256 mbus_fmt.width = fmt ? fmt->fmt.pix.width : width; 256 mbus_fmt.width = fmt ? fmt->fmt.pix.width : width;
257 mbus_fmt.height = height; 257 mbus_fmt.height = height;
258 go->encoder_h_halve = 0; 258 go->encoder_h_halve = 0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 7c19ff72e6b3..c8761c71c9d2 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -80,7 +80,7 @@ static void pvr2_context_set_notify(struct pvr2_context *mp, int fl)
80static void pvr2_context_destroy(struct pvr2_context *mp) 80static void pvr2_context_destroy(struct pvr2_context *mp)
81{ 81{
82 pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context %p (destroy)",mp); 82 pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context %p (destroy)",mp);
83 if (mp->hdw) pvr2_hdw_destroy(mp->hdw); 83 pvr2_hdw_destroy(mp->hdw);
84 pvr2_context_set_notify(mp, 0); 84 pvr2_context_set_notify(mp, 0);
85 mutex_lock(&pvr2_context_mutex); 85 mutex_lock(&pvr2_context_mutex);
86 if (mp->exist_next) { 86 if (mp->exist_next) {
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 9623b6218214..2fd9b5e0e2a9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2966,7 +2966,7 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
2966 memset(&fmt, 0, sizeof(fmt)); 2966 memset(&fmt, 0, sizeof(fmt));
2967 fmt.width = hdw->res_hor_val; 2967 fmt.width = hdw->res_hor_val;
2968 fmt.height = hdw->res_ver_val; 2968 fmt.height = hdw->res_ver_val;
2969 fmt.code = V4L2_MBUS_FMT_FIXED; 2969 fmt.code = MEDIA_BUS_FMT_FIXED;
2970 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_size(%dx%d)", 2970 pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_size(%dx%d)",
2971 fmt.width, fmt.height); 2971 fmt.width, fmt.height);
2972 v4l2_device_call_all(&hdw->v4l2_dev, 0, video, s_mbus_fmt, &fmt); 2972 v4l2_device_call_all(&hdw->v4l2_dev, 0, video, s_mbus_fmt, &fmt);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index ccc00099b261..de55e96fed15 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -558,27 +558,30 @@ static void s2255_fwchunk_complete(struct urb *urb)
558 558
559} 559}
560 560
561static int s2255_got_frame(struct s2255_vc *vc, int jpgsize) 561static void s2255_got_frame(struct s2255_vc *vc, int jpgsize)
562{ 562{
563 struct s2255_buffer *buf; 563 struct s2255_buffer *buf;
564 struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); 564 struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev);
565 unsigned long flags = 0; 565 unsigned long flags = 0;
566 int rc = 0; 566
567 spin_lock_irqsave(&vc->qlock, flags); 567 spin_lock_irqsave(&vc->qlock, flags);
568 if (list_empty(&vc->buf_list)) { 568 if (list_empty(&vc->buf_list)) {
569 dprintk(dev, 1, "No active queue to serve\n"); 569 dprintk(dev, 1, "No active queue to serve\n");
570 rc = -1; 570 spin_unlock_irqrestore(&vc->qlock, flags);
571 goto unlock; 571 return;
572 } 572 }
573 buf = list_entry(vc->buf_list.next, 573 buf = list_entry(vc->buf_list.next,
574 struct s2255_buffer, list); 574 struct s2255_buffer, list);
575 list_del(&buf->list); 575 list_del(&buf->list);
576 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 576 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
577 buf->vb.v4l2_buf.field = vc->field;
578 buf->vb.v4l2_buf.sequence = vc->frame_count;
579 spin_unlock_irqrestore(&vc->qlock, flags);
580
577 s2255_fillbuff(vc, buf, jpgsize); 581 s2255_fillbuff(vc, buf, jpgsize);
582 /* tell v4l buffer was filled */
583 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
578 dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf); 584 dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf);
579unlock:
580 spin_unlock_irqrestore(&vc->qlock, flags);
581 return rc;
582} 585}
583 586
584static const struct s2255_fmt *format_by_fourcc(int fourcc) 587static const struct s2255_fmt *format_by_fourcc(int fourcc)
@@ -632,7 +635,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
632 break; 635 break;
633 case V4L2_PIX_FMT_JPEG: 636 case V4L2_PIX_FMT_JPEG:
634 case V4L2_PIX_FMT_MJPEG: 637 case V4L2_PIX_FMT_MJPEG:
635 buf->vb.v4l2_buf.length = jpgsize; 638 vb2_set_plane_payload(&buf->vb, 0, jpgsize);
636 memcpy(vbuf, tmpbuf, jpgsize); 639 memcpy(vbuf, tmpbuf, jpgsize);
637 break; 640 break;
638 case V4L2_PIX_FMT_YUV422P: 641 case V4L2_PIX_FMT_YUV422P:
@@ -649,11 +652,6 @@ static void s2255_fillbuff(struct s2255_vc *vc,
649 } 652 }
650 dprintk(dev, 2, "s2255fill at : Buffer 0x%08lx size= %d\n", 653 dprintk(dev, 2, "s2255fill at : Buffer 0x%08lx size= %d\n",
651 (unsigned long)vbuf, pos); 654 (unsigned long)vbuf, pos);
652 /* tell v4l buffer was filled */
653 buf->vb.v4l2_buf.field = vc->field;
654 buf->vb.v4l2_buf.sequence = vc->frame_count;
655 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
656 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
657} 655}
658 656
659 657
@@ -1976,8 +1974,7 @@ static int s2255_release_sys_buffers(struct s2255_vc *vc)
1976{ 1974{
1977 unsigned long i; 1975 unsigned long i;
1978 for (i = 0; i < SYS_FRAMES; i++) { 1976 for (i = 0; i < SYS_FRAMES; i++) {
1979 if (vc->buffer.frame[i].lpvbits) 1977 vfree(vc->buffer.frame[i].lpvbits);
1980 vfree(vc->buffer.frame[i].lpvbits);
1981 vc->buffer.frame[i].lpvbits = NULL; 1978 vc->buffer.frame[i].lpvbits = NULL;
1982 } 1979 }
1983 return 0; 1980 return 0;
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 233054311a62..a47629108c1b 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -475,7 +475,7 @@ static int vidioc_s_register(struct file *file, void *priv,
475 struct stk1160 *dev = video_drvdata(file); 475 struct stk1160 *dev = video_drvdata(file);
476 476
477 /* Match host */ 477 /* Match host */
478 return stk1160_write_reg(dev, reg->reg, cpu_to_le16(reg->val)); 478 return stk1160_write_reg(dev, reg->reg, reg->val);
479} 479}
480#endif 480#endif
481 481
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 9bfa041e3316..693d5f409138 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -509,11 +509,12 @@ static int vidioc_querycap(struct file *file, void *priv,
509 usbvision_device_data[usbvision->dev_model].model_string, 509 usbvision_device_data[usbvision->dev_model].model_string,
510 sizeof(vc->card)); 510 sizeof(vc->card));
511 usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info)); 511 usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
512 vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | 512 vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
513 V4L2_CAP_AUDIO | 513 V4L2_CAP_AUDIO |
514 V4L2_CAP_READWRITE | 514 V4L2_CAP_READWRITE |
515 V4L2_CAP_STREAMING | 515 V4L2_CAP_STREAMING |
516 (usbvision->have_tuner ? V4L2_CAP_TUNER : 0); 516 (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
517 vc->capabilities = vc->device_caps | V4L2_CAP_DEVICE_CAPS;
517 return 0; 518 return 0;
518} 519}
519 520
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 7c8322d4fc63..6a4b0b8cd270 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -331,6 +331,7 @@ static int uvc_parse_format(struct uvc_device *dev,
331 struct uvc_format_desc *fmtdesc; 331 struct uvc_format_desc *fmtdesc;
332 struct uvc_frame *frame; 332 struct uvc_frame *frame;
333 const unsigned char *start = buffer; 333 const unsigned char *start = buffer;
334 unsigned int width_multiplier = 1;
334 unsigned int interval; 335 unsigned int interval;
335 unsigned int i, n; 336 unsigned int i, n;
336 __u8 ftype; 337 __u8 ftype;
@@ -366,6 +367,20 @@ static int uvc_parse_format(struct uvc_device *dev,
366 } 367 }
367 368
368 format->bpp = buffer[21]; 369 format->bpp = buffer[21];
370
371 /* Some devices report a format that doesn't match what they
372 * really send.
373 */
374 if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
375 if (format->fcc == V4L2_PIX_FMT_YUYV) {
376 strlcpy(format->name, "Greyscale 8-bit (Y8 )",
377 sizeof(format->name));
378 format->fcc = V4L2_PIX_FMT_GREY;
379 format->bpp = 8;
380 width_multiplier = 2;
381 }
382 }
383
369 if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) { 384 if (buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED) {
370 ftype = UVC_VS_FRAME_UNCOMPRESSED; 385 ftype = UVC_VS_FRAME_UNCOMPRESSED;
371 } else { 386 } else {
@@ -474,7 +489,8 @@ static int uvc_parse_format(struct uvc_device *dev,
474 489
475 frame->bFrameIndex = buffer[3]; 490 frame->bFrameIndex = buffer[3];
476 frame->bmCapabilities = buffer[4]; 491 frame->bmCapabilities = buffer[4];
477 frame->wWidth = get_unaligned_le16(&buffer[5]); 492 frame->wWidth = get_unaligned_le16(&buffer[5])
493 * width_multiplier;
478 frame->wHeight = get_unaligned_le16(&buffer[7]); 494 frame->wHeight = get_unaligned_le16(&buffer[7]);
479 frame->dwMinBitRate = get_unaligned_le32(&buffer[9]); 495 frame->dwMinBitRate = get_unaligned_le32(&buffer[9]);
480 frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]); 496 frame->dwMaxBitRate = get_unaligned_le32(&buffer[13]);
@@ -1623,12 +1639,12 @@ static void uvc_delete(struct uvc_device *dev)
1623{ 1639{
1624 struct list_head *p, *n; 1640 struct list_head *p, *n;
1625 1641
1626 usb_put_intf(dev->intf);
1627 usb_put_dev(dev->udev);
1628
1629 uvc_status_cleanup(dev); 1642 uvc_status_cleanup(dev);
1630 uvc_ctrl_cleanup_device(dev); 1643 uvc_ctrl_cleanup_device(dev);
1631 1644
1645 usb_put_intf(dev->intf);
1646 usb_put_dev(dev->udev);
1647
1632 if (dev->vdev.dev) 1648 if (dev->vdev.dev)
1633 v4l2_device_unregister(&dev->vdev); 1649 v4l2_device_unregister(&dev->vdev);
1634#ifdef CONFIG_MEDIA_CONTROLLER 1650#ifdef CONFIG_MEDIA_CONTROLLER
@@ -1718,6 +1734,11 @@ static int uvc_register_video(struct uvc_device *dev,
1718 struct video_device *vdev; 1734 struct video_device *vdev;
1719 int ret; 1735 int ret;
1720 1736
1737 /* Initialize the video buffers queue. */
1738 ret = uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
1739 if (ret)
1740 return ret;
1741
1721 /* Initialize the streaming interface with default streaming 1742 /* Initialize the streaming interface with default streaming
1722 * parameters. 1743 * parameters.
1723 */ 1744 */
@@ -1744,6 +1765,7 @@ static int uvc_register_video(struct uvc_device *dev,
1744 */ 1765 */
1745 vdev->v4l2_dev = &dev->vdev; 1766 vdev->v4l2_dev = &dev->vdev;
1746 vdev->fops = &uvc_fops; 1767 vdev->fops = &uvc_fops;
1768 vdev->ioctl_ops = &uvc_ioctl_ops;
1747 vdev->release = uvc_release; 1769 vdev->release = uvc_release;
1748 vdev->prio = &stream->chain->prio; 1770 vdev->prio = &stream->chain->prio;
1749 if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1771 if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1991,14 +2013,13 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
1991{ 2013{
1992 struct uvc_device *dev = usb_get_intfdata(intf); 2014 struct uvc_device *dev = usb_get_intfdata(intf);
1993 struct uvc_streaming *stream; 2015 struct uvc_streaming *stream;
2016 int ret = 0;
1994 2017
1995 uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n", 2018 uvc_trace(UVC_TRACE_SUSPEND, "Resuming interface %u\n",
1996 intf->cur_altsetting->desc.bInterfaceNumber); 2019 intf->cur_altsetting->desc.bInterfaceNumber);
1997 2020
1998 if (intf->cur_altsetting->desc.bInterfaceSubClass == 2021 if (intf->cur_altsetting->desc.bInterfaceSubClass ==
1999 UVC_SC_VIDEOCONTROL) { 2022 UVC_SC_VIDEOCONTROL) {
2000 int ret = 0;
2001
2002 if (reset) { 2023 if (reset) {
2003 ret = uvc_ctrl_restore_values(dev); 2024 ret = uvc_ctrl_restore_values(dev);
2004 if (ret < 0) 2025 if (ret < 0)
@@ -2014,8 +2035,13 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
2014 } 2035 }
2015 2036
2016 list_for_each_entry(stream, &dev->streams, list) { 2037 list_for_each_entry(stream, &dev->streams, list) {
2017 if (stream->intf == intf) 2038 if (stream->intf == intf) {
2018 return uvc_video_resume(stream, reset); 2039 ret = uvc_video_resume(stream, reset);
2040 if (ret < 0)
2041 uvc_queue_streamoff(&stream->queue,
2042 stream->queue.queue.type);
2043 return ret;
2044 }
2019 } 2045 }
2020 2046
2021 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " 2047 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
@@ -2504,6 +2530,15 @@ static struct usb_device_id uvc_ids[] = {
2504 .bInterfaceProtocol = 0, 2530 .bInterfaceProtocol = 0,
2505 .driver_info = UVC_QUIRK_PROBE_MINMAX 2531 .driver_info = UVC_QUIRK_PROBE_MINMAX
2506 | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, 2532 | UVC_QUIRK_IGNORE_SELECTOR_UNIT },
2533 /* Oculus VR Positional Tracker DK2 */
2534 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2535 | USB_DEVICE_ID_MATCH_INT_INFO,
2536 .idVendor = 0x2833,
2537 .idProduct = 0x0201,
2538 .bInterfaceClass = USB_CLASS_VIDEO,
2539 .bInterfaceSubClass = 1,
2540 .bInterfaceProtocol = 0,
2541 .driver_info = UVC_QUIRK_FORCE_Y8 },
2507 /* Generic USB Video Class */ 2542 /* Generic USB Video Class */
2508 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, 2543 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
2509 {} 2544 {}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 6e92d2080255..cc960723b926 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -36,6 +36,34 @@
36 * the driver. 36 * the driver.
37 */ 37 */
38 38
39static inline struct uvc_streaming *
40uvc_queue_to_stream(struct uvc_video_queue *queue)
41{
42 return container_of(queue, struct uvc_streaming, queue);
43}
44
45/*
46 * Return all queued buffers to videobuf2 in the requested state.
47 *
48 * This function must be called with the queue spinlock held.
49 */
50static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
51 enum uvc_buffer_state state)
52{
53 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
54 ? VB2_BUF_STATE_ERROR
55 : VB2_BUF_STATE_QUEUED;
56
57 while (!list_empty(&queue->irqqueue)) {
58 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
59 struct uvc_buffer,
60 queue);
61 list_del(&buf->queue);
62 buf->state = state;
63 vb2_buffer_done(&buf->buf, vb2_state);
64 }
65}
66
39/* ----------------------------------------------------------------------------- 67/* -----------------------------------------------------------------------------
40 * videobuf2 queue operations 68 * videobuf2 queue operations
41 */ 69 */
@@ -45,8 +73,7 @@ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
45 unsigned int sizes[], void *alloc_ctxs[]) 73 unsigned int sizes[], void *alloc_ctxs[])
46{ 74{
47 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 75 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
48 struct uvc_streaming *stream = 76 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
49 container_of(queue, struct uvc_streaming, queue);
50 77
51 /* Make sure the image size is large enough. */ 78 /* Make sure the image size is large enough. */
52 if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize) 79 if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize)
@@ -109,8 +136,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
109static void uvc_buffer_finish(struct vb2_buffer *vb) 136static void uvc_buffer_finish(struct vb2_buffer *vb)
110{ 137{
111 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 138 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
112 struct uvc_streaming *stream = 139 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
113 container_of(queue, struct uvc_streaming, queue);
114 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 140 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
115 141
116 if (vb->state == VB2_BUF_STATE_DONE) 142 if (vb->state == VB2_BUF_STATE_DONE)
@@ -131,6 +157,39 @@ static void uvc_wait_finish(struct vb2_queue *vq)
131 mutex_lock(&queue->mutex); 157 mutex_lock(&queue->mutex);
132} 158}
133 159
160static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
161{
162 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
163 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
164 unsigned long flags;
165 int ret;
166
167 queue->buf_used = 0;
168
169 ret = uvc_video_enable(stream, 1);
170 if (ret == 0)
171 return 0;
172
173 spin_lock_irqsave(&queue->irqlock, flags);
174 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
175 spin_unlock_irqrestore(&queue->irqlock, flags);
176
177 return ret;
178}
179
180static void uvc_stop_streaming(struct vb2_queue *vq)
181{
182 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
183 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
184 unsigned long flags;
185
186 uvc_video_enable(stream, 0);
187
188 spin_lock_irqsave(&queue->irqlock, flags);
189 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
190 spin_unlock_irqrestore(&queue->irqlock, flags);
191}
192
134static struct vb2_ops uvc_queue_qops = { 193static struct vb2_ops uvc_queue_qops = {
135 .queue_setup = uvc_queue_setup, 194 .queue_setup = uvc_queue_setup,
136 .buf_prepare = uvc_buffer_prepare, 195 .buf_prepare = uvc_buffer_prepare,
@@ -138,6 +197,8 @@ static struct vb2_ops uvc_queue_qops = {
138 .buf_finish = uvc_buffer_finish, 197 .buf_finish = uvc_buffer_finish,
139 .wait_prepare = uvc_wait_prepare, 198 .wait_prepare = uvc_wait_prepare,
140 .wait_finish = uvc_wait_finish, 199 .wait_finish = uvc_wait_finish,
200 .start_streaming = uvc_start_streaming,
201 .stop_streaming = uvc_stop_streaming,
141}; 202};
142 203
143int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 204int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
@@ -165,12 +226,19 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
165 return 0; 226 return 0;
166} 227}
167 228
229void uvc_queue_release(struct uvc_video_queue *queue)
230{
231 mutex_lock(&queue->mutex);
232 vb2_queue_release(&queue->queue);
233 mutex_unlock(&queue->mutex);
234}
235
168/* ----------------------------------------------------------------------------- 236/* -----------------------------------------------------------------------------
169 * V4L2 queue operations 237 * V4L2 queue operations
170 */ 238 */
171 239
172int uvc_alloc_buffers(struct uvc_video_queue *queue, 240int uvc_request_buffers(struct uvc_video_queue *queue,
173 struct v4l2_requestbuffers *rb) 241 struct v4l2_requestbuffers *rb)
174{ 242{
175 int ret; 243 int ret;
176 244
@@ -181,13 +249,6 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue,
181 return ret ? ret : rb->count; 249 return ret ? ret : rb->count;
182} 250}
183 251
184void uvc_free_buffers(struct uvc_video_queue *queue)
185{
186 mutex_lock(&queue->mutex);
187 vb2_queue_release(&queue->queue);
188 mutex_unlock(&queue->mutex);
189}
190
191int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 252int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
192{ 253{
193 int ret; 254 int ret;
@@ -234,6 +295,28 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
234 return ret; 295 return ret;
235} 296}
236 297
298int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
299{
300 int ret;
301
302 mutex_lock(&queue->mutex);
303 ret = vb2_streamon(&queue->queue, type);
304 mutex_unlock(&queue->mutex);
305
306 return ret;
307}
308
309int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
310{
311 int ret;
312
313 mutex_lock(&queue->mutex);
314 ret = vb2_streamoff(&queue->queue, type);
315 mutex_unlock(&queue->mutex);
316
317 return ret;
318}
319
237int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 320int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
238{ 321{
239 int ret; 322 int ret;
@@ -289,49 +372,6 @@ int uvc_queue_allocated(struct uvc_video_queue *queue)
289} 372}
290 373
291/* 374/*
292 * Enable or disable the video buffers queue.
293 *
294 * The queue must be enabled before starting video acquisition and must be
295 * disabled after stopping it. This ensures that the video buffers queue
296 * state can be properly initialized before buffers are accessed from the
297 * interrupt handler.
298 *
299 * Enabling the video queue returns -EBUSY if the queue is already enabled.
300 *
301 * Disabling the video queue cancels the queue and removes all buffers from
302 * the main queue.
303 *
304 * This function can't be called from interrupt context. Use
305 * uvc_queue_cancel() instead.
306 */
307int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
308{
309 unsigned long flags;
310 int ret;
311
312 mutex_lock(&queue->mutex);
313 if (enable) {
314 ret = vb2_streamon(&queue->queue, queue->queue.type);
315 if (ret < 0)
316 goto done;
317
318 queue->buf_used = 0;
319 } else {
320 ret = vb2_streamoff(&queue->queue, queue->queue.type);
321 if (ret < 0)
322 goto done;
323
324 spin_lock_irqsave(&queue->irqlock, flags);
325 INIT_LIST_HEAD(&queue->irqqueue);
326 spin_unlock_irqrestore(&queue->irqlock, flags);
327 }
328
329done:
330 mutex_unlock(&queue->mutex);
331 return ret;
332}
333
334/*
335 * Cancel the video buffers queue. 375 * Cancel the video buffers queue.
336 * 376 *
337 * Cancelling the queue marks all buffers on the irq queue as erroneous, 377 * Cancelling the queue marks all buffers on the irq queue as erroneous,
@@ -345,17 +385,10 @@ done:
345 */ 385 */
346void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 386void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
347{ 387{
348 struct uvc_buffer *buf;
349 unsigned long flags; 388 unsigned long flags;
350 389
351 spin_lock_irqsave(&queue->irqlock, flags); 390 spin_lock_irqsave(&queue->irqlock, flags);
352 while (!list_empty(&queue->irqqueue)) { 391 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
353 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
354 queue);
355 list_del(&buf->queue);
356 buf->state = UVC_BUF_STATE_ERROR;
357 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
358 }
359 /* This must be protected by the irqlock spinlock to avoid race 392 /* This must be protected by the irqlock spinlock to avoid race
360 * conditions between uvc_buffer_queue and the disconnection event that 393 * conditions between uvc_buffer_queue and the disconnection event that
361 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 394 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 378ae02e593b..9c5cbcf16529 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -530,10 +530,8 @@ static int uvc_v4l2_release(struct file *file)
530 uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n"); 530 uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n");
531 531
532 /* Only free resources if this is a privileged handle. */ 532 /* Only free resources if this is a privileged handle. */
533 if (uvc_has_privileges(handle)) { 533 if (uvc_has_privileges(handle))
534 uvc_video_enable(stream, 0); 534 uvc_queue_release(&stream->queue);
535 uvc_free_buffers(&stream->queue);
536 }
537 535
538 /* Release the file handle. */ 536 /* Release the file handle. */
539 uvc_dismiss_privileges(handle); 537 uvc_dismiss_privileges(handle);
@@ -551,553 +549,628 @@ static int uvc_v4l2_release(struct file *file)
551 return 0; 549 return 0;
552} 550}
553 551
554static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) 552static int uvc_ioctl_querycap(struct file *file, void *fh,
553 struct v4l2_capability *cap)
555{ 554{
556 struct video_device *vdev = video_devdata(file); 555 struct video_device *vdev = video_devdata(file);
557 struct uvc_fh *handle = file->private_data; 556 struct uvc_fh *handle = file->private_data;
558 struct uvc_video_chain *chain = handle->chain; 557 struct uvc_video_chain *chain = handle->chain;
559 struct uvc_streaming *stream = handle->stream; 558 struct uvc_streaming *stream = handle->stream;
560 long ret = 0;
561 559
562 switch (cmd) { 560 strlcpy(cap->driver, "uvcvideo", sizeof(cap->driver));
563 /* Query capabilities */ 561 strlcpy(cap->card, vdev->name, sizeof(cap->card));
564 case VIDIOC_QUERYCAP: 562 usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
565 { 563 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
566 struct v4l2_capability *cap = arg; 564 | chain->caps;
567 565 if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
568 memset(cap, 0, sizeof *cap); 566 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
569 strlcpy(cap->driver, "uvcvideo", sizeof cap->driver); 567 else
570 strlcpy(cap->card, vdev->name, sizeof cap->card); 568 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
571 usb_make_path(stream->dev->udev,
572 cap->bus_info, sizeof(cap->bus_info));
573 cap->version = LINUX_VERSION_CODE;
574 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
575 | chain->caps;
576 if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
577 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
578 | V4L2_CAP_STREAMING;
579 else
580 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT
581 | V4L2_CAP_STREAMING;
582 break;
583 }
584 569
585 /* Priority */ 570 return 0;
586 case VIDIOC_G_PRIORITY: 571}
587 *(u32 *)arg = v4l2_prio_max(vdev->prio);
588 break;
589 572
590 case VIDIOC_S_PRIORITY: 573static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
591 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 574 struct v4l2_fmtdesc *fmt)
592 if (ret < 0) 575{
593 return ret; 576 struct uvc_format *format;
577 enum v4l2_buf_type type = fmt->type;
578 __u32 index = fmt->index;
594 579
595 return v4l2_prio_change(vdev->prio, &handle->vfh.prio, 580 if (fmt->type != stream->type || fmt->index >= stream->nformats)
596 *(u32 *)arg); 581 return -EINVAL;
597 582
598 /* Get, Set & Query control */ 583 memset(fmt, 0, sizeof(*fmt));
599 case VIDIOC_QUERYCTRL: 584 fmt->index = index;
600 return uvc_query_v4l2_ctrl(chain, arg); 585 fmt->type = type;
586
587 format = &stream->format[fmt->index];
588 fmt->flags = 0;
589 if (format->flags & UVC_FMT_FLAG_COMPRESSED)
590 fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
591 strlcpy(fmt->description, format->name, sizeof(fmt->description));
592 fmt->description[sizeof(fmt->description) - 1] = 0;
593 fmt->pixelformat = format->fcc;
594 return 0;
595}
601 596
602 case VIDIOC_G_CTRL: 597static int uvc_ioctl_enum_fmt_vid_cap(struct file *file, void *fh,
603 { 598 struct v4l2_fmtdesc *fmt)
604 struct v4l2_control *ctrl = arg; 599{
605 struct v4l2_ext_control xctrl; 600 struct uvc_fh *handle = fh;
601 struct uvc_streaming *stream = handle->stream;
606 602
607 memset(&xctrl, 0, sizeof xctrl); 603 return uvc_ioctl_enum_fmt(stream, fmt);
608 xctrl.id = ctrl->id; 604}
609 605
610 ret = uvc_ctrl_begin(chain); 606static int uvc_ioctl_enum_fmt_vid_out(struct file *file, void *fh,
611 if (ret < 0) 607 struct v4l2_fmtdesc *fmt)
612 return ret; 608{
609 struct uvc_fh *handle = fh;
610 struct uvc_streaming *stream = handle->stream;
613 611
614 ret = uvc_ctrl_get(chain, &xctrl); 612 return uvc_ioctl_enum_fmt(stream, fmt);
615 uvc_ctrl_rollback(handle); 613}
616 if (ret >= 0)
617 ctrl->value = xctrl.value;
618 break;
619 }
620 614
621 case VIDIOC_S_CTRL: 615static int uvc_ioctl_g_fmt_vid_cap(struct file *file, void *fh,
622 { 616 struct v4l2_format *fmt)
623 struct v4l2_control *ctrl = arg; 617{
624 struct v4l2_ext_control xctrl; 618 struct uvc_fh *handle = fh;
619 struct uvc_streaming *stream = handle->stream;
625 620
626 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 621 return uvc_v4l2_get_format(stream, fmt);
627 if (ret < 0) 622}
628 return ret;
629 623
630 memset(&xctrl, 0, sizeof xctrl); 624static int uvc_ioctl_g_fmt_vid_out(struct file *file, void *fh,
631 xctrl.id = ctrl->id; 625 struct v4l2_format *fmt)
632 xctrl.value = ctrl->value; 626{
627 struct uvc_fh *handle = fh;
628 struct uvc_streaming *stream = handle->stream;
633 629
634 ret = uvc_ctrl_begin(chain); 630 return uvc_v4l2_get_format(stream, fmt);
635 if (ret < 0) 631}
636 return ret;
637 632
638 ret = uvc_ctrl_set(chain, &xctrl); 633static int uvc_ioctl_s_fmt_vid_cap(struct file *file, void *fh,
639 if (ret < 0) { 634 struct v4l2_format *fmt)
640 uvc_ctrl_rollback(handle); 635{
641 return ret; 636 struct uvc_fh *handle = fh;
642 } 637 struct uvc_streaming *stream = handle->stream;
643 ret = uvc_ctrl_commit(handle, &xctrl, 1); 638 int ret;
644 if (ret == 0)
645 ctrl->value = xctrl.value;
646 break;
647 }
648 639
649 case VIDIOC_QUERYMENU: 640 ret = uvc_acquire_privileges(handle);
650 return uvc_query_v4l2_menu(chain, arg); 641 if (ret < 0)
642 return ret;
651 643
652 case VIDIOC_G_EXT_CTRLS: 644 return uvc_v4l2_set_format(stream, fmt);
653 { 645}
654 struct v4l2_ext_controls *ctrls = arg;
655 struct v4l2_ext_control *ctrl = ctrls->controls;
656 unsigned int i;
657 646
658 ret = uvc_ctrl_begin(chain); 647static int uvc_ioctl_s_fmt_vid_out(struct file *file, void *fh,
659 if (ret < 0) 648 struct v4l2_format *fmt)
660 return ret; 649{
650 struct uvc_fh *handle = fh;
651 struct uvc_streaming *stream = handle->stream;
652 int ret;
661 653
662 for (i = 0; i < ctrls->count; ++ctrl, ++i) { 654 ret = uvc_acquire_privileges(handle);
663 ret = uvc_ctrl_get(chain, ctrl); 655 if (ret < 0)
664 if (ret < 0) { 656 return ret;
665 uvc_ctrl_rollback(handle);
666 ctrls->error_idx = i;
667 return ret;
668 }
669 }
670 ctrls->error_idx = 0;
671 ret = uvc_ctrl_rollback(handle);
672 break;
673 }
674 657
675 case VIDIOC_S_EXT_CTRLS: 658 return uvc_v4l2_set_format(stream, fmt);
676 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 659}
677 if (ret < 0)
678 return ret;
679 /* Fall through */
680 case VIDIOC_TRY_EXT_CTRLS:
681 {
682 struct v4l2_ext_controls *ctrls = arg;
683 struct v4l2_ext_control *ctrl = ctrls->controls;
684 unsigned int i;
685
686 ret = uvc_ctrl_begin(chain);
687 if (ret < 0)
688 return ret;
689 660
690 for (i = 0; i < ctrls->count; ++ctrl, ++i) { 661static int uvc_ioctl_try_fmt_vid_cap(struct file *file, void *fh,
691 ret = uvc_ctrl_set(chain, ctrl); 662 struct v4l2_format *fmt)
692 if (ret < 0) { 663{
693 uvc_ctrl_rollback(handle); 664 struct uvc_fh *handle = fh;
694 ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS 665 struct uvc_streaming *stream = handle->stream;
695 ? ctrls->count : i; 666 struct uvc_streaming_control probe;
696 return ret;
697 }
698 }
699 667
700 ctrls->error_idx = 0; 668 return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
669}
701 670
702 if (cmd == VIDIOC_S_EXT_CTRLS) 671static int uvc_ioctl_try_fmt_vid_out(struct file *file, void *fh,
703 ret = uvc_ctrl_commit(handle, 672 struct v4l2_format *fmt)
704 ctrls->controls, ctrls->count); 673{
705 else 674 struct uvc_fh *handle = fh;
706 ret = uvc_ctrl_rollback(handle); 675 struct uvc_streaming *stream = handle->stream;
707 break; 676 struct uvc_streaming_control probe;
708 }
709 677
710 /* Get, Set & Enum input */ 678 return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
711 case VIDIOC_ENUMINPUT: 679}
712 {
713 const struct uvc_entity *selector = chain->selector;
714 struct v4l2_input *input = arg;
715 struct uvc_entity *iterm = NULL;
716 u32 index = input->index;
717 int pin = 0;
718
719 if (selector == NULL ||
720 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
721 if (index != 0)
722 return -EINVAL;
723 list_for_each_entry(iterm, &chain->entities, chain) {
724 if (UVC_ENTITY_IS_ITERM(iterm))
725 break;
726 }
727 pin = iterm->id;
728 } else if (index < selector->bNrInPins) {
729 pin = selector->baSourceID[index];
730 list_for_each_entry(iterm, &chain->entities, chain) {
731 if (!UVC_ENTITY_IS_ITERM(iterm))
732 continue;
733 if (iterm->id == pin)
734 break;
735 }
736 }
737 680
738 if (iterm == NULL || iterm->id != pin) 681static int uvc_ioctl_reqbufs(struct file *file, void *fh,
739 return -EINVAL; 682 struct v4l2_requestbuffers *rb)
683{
684 struct uvc_fh *handle = fh;
685 struct uvc_streaming *stream = handle->stream;
686 int ret;
740 687
741 memset(input, 0, sizeof *input); 688 ret = uvc_acquire_privileges(handle);
742 input->index = index; 689 if (ret < 0)
743 strlcpy(input->name, iterm->name, sizeof input->name); 690 return ret;
744 if (UVC_ENTITY_TYPE(iterm) == UVC_ITT_CAMERA)
745 input->type = V4L2_INPUT_TYPE_CAMERA;
746 break;
747 }
748 691
749 case VIDIOC_G_INPUT: 692 mutex_lock(&stream->mutex);
750 { 693 ret = uvc_request_buffers(&stream->queue, rb);
751 u8 input; 694 mutex_unlock(&stream->mutex);
695 if (ret < 0)
696 return ret;
752 697
753 if (chain->selector == NULL || 698 if (ret == 0)
754 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) { 699 uvc_dismiss_privileges(handle);
755 *(int *)arg = 0;
756 break;
757 }
758 700
759 ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, 701 return 0;
760 chain->selector->id, chain->dev->intfnum, 702}
761 UVC_SU_INPUT_SELECT_CONTROL, &input, 1);
762 if (ret < 0)
763 return ret;
764 703
765 *(int *)arg = input - 1; 704static int uvc_ioctl_querybuf(struct file *file, void *fh,
766 break; 705 struct v4l2_buffer *buf)
767 } 706{
707 struct uvc_fh *handle = fh;
708 struct uvc_streaming *stream = handle->stream;
768 709
769 case VIDIOC_S_INPUT: 710 if (!uvc_has_privileges(handle))
770 { 711 return -EBUSY;
771 u32 input = *(u32 *)arg + 1;
772 712
773 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 713 return uvc_query_buffer(&stream->queue, buf);
774 if (ret < 0) 714}
775 return ret;
776 715
777 if ((ret = uvc_acquire_privileges(handle)) < 0) 716static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
778 return ret; 717{
718 struct uvc_fh *handle = fh;
719 struct uvc_streaming *stream = handle->stream;
779 720
780 if (chain->selector == NULL || 721 if (!uvc_has_privileges(handle))
781 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) { 722 return -EBUSY;
782 if (input != 1)
783 return -EINVAL;
784 break;
785 }
786 723
787 if (input == 0 || input > chain->selector->bNrInPins) 724 return uvc_queue_buffer(&stream->queue, buf);
788 return -EINVAL; 725}
789 726
790 return uvc_query_ctrl(chain->dev, UVC_SET_CUR, 727static int uvc_ioctl_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
791 chain->selector->id, chain->dev->intfnum, 728{
792 UVC_SU_INPUT_SELECT_CONTROL, &input, 1); 729 struct uvc_fh *handle = fh;
793 } 730 struct uvc_streaming *stream = handle->stream;
794 731
795 /* Try, Get, Set & Enum format */ 732 if (!uvc_has_privileges(handle))
796 case VIDIOC_ENUM_FMT: 733 return -EBUSY;
797 {
798 struct v4l2_fmtdesc *fmt = arg;
799 struct uvc_format *format;
800 enum v4l2_buf_type type = fmt->type;
801 __u32 index = fmt->index;
802 734
803 if (fmt->type != stream->type || 735 return uvc_dequeue_buffer(&stream->queue, buf,
804 fmt->index >= stream->nformats) 736 file->f_flags & O_NONBLOCK);
805 return -EINVAL; 737}
806 738
807 memset(fmt, 0, sizeof(*fmt)); 739static int uvc_ioctl_create_bufs(struct file *file, void *fh,
808 fmt->index = index; 740 struct v4l2_create_buffers *cb)
809 fmt->type = type; 741{
810 742 struct uvc_fh *handle = fh;
811 format = &stream->format[fmt->index]; 743 struct uvc_streaming *stream = handle->stream;
812 fmt->flags = 0; 744 int ret;
813 if (format->flags & UVC_FMT_FLAG_COMPRESSED)
814 fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
815 strlcpy(fmt->description, format->name,
816 sizeof fmt->description);
817 fmt->description[sizeof fmt->description - 1] = 0;
818 fmt->pixelformat = format->fcc;
819 break;
820 }
821 745
822 case VIDIOC_TRY_FMT: 746 ret = uvc_acquire_privileges(handle);
823 { 747 if (ret < 0)
824 struct uvc_streaming_control probe; 748 return ret;
825 749
826 return uvc_v4l2_try_format(stream, arg, &probe, NULL, NULL); 750 return uvc_create_buffers(&stream->queue, cb);
827 } 751}
828 752
829 case VIDIOC_S_FMT: 753static int uvc_ioctl_streamon(struct file *file, void *fh,
830 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 754 enum v4l2_buf_type type)
831 if (ret < 0) 755{
832 return ret; 756 struct uvc_fh *handle = fh;
757 struct uvc_streaming *stream = handle->stream;
758 int ret;
833 759
834 if ((ret = uvc_acquire_privileges(handle)) < 0) 760 if (!uvc_has_privileges(handle))
835 return ret; 761 return -EBUSY;
836 762
837 return uvc_v4l2_set_format(stream, arg); 763 mutex_lock(&stream->mutex);
764 ret = uvc_queue_streamon(&stream->queue, type);
765 mutex_unlock(&stream->mutex);
838 766
839 case VIDIOC_G_FMT: 767 return ret;
840 return uvc_v4l2_get_format(stream, arg); 768}
841 769
842 /* Frame size enumeration */ 770static int uvc_ioctl_streamoff(struct file *file, void *fh,
843 case VIDIOC_ENUM_FRAMESIZES: 771 enum v4l2_buf_type type)
844 { 772{
845 struct v4l2_frmsizeenum *fsize = arg; 773 struct uvc_fh *handle = fh;
846 struct uvc_format *format = NULL; 774 struct uvc_streaming *stream = handle->stream;
847 struct uvc_frame *frame;
848 int i;
849 775
850 /* Look for the given pixel format */ 776 if (!uvc_has_privileges(handle))
851 for (i = 0; i < stream->nformats; i++) { 777 return -EBUSY;
852 if (stream->format[i].fcc ==
853 fsize->pixel_format) {
854 format = &stream->format[i];
855 break;
856 }
857 }
858 if (format == NULL)
859 return -EINVAL;
860 778
861 if (fsize->index >= format->nframes) 779 mutex_lock(&stream->mutex);
862 return -EINVAL; 780 uvc_queue_streamoff(&stream->queue, type);
781 mutex_unlock(&stream->mutex);
863 782
864 frame = &format->frame[fsize->index]; 783 return 0;
865 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 784}
866 fsize->discrete.width = frame->wWidth;
867 fsize->discrete.height = frame->wHeight;
868 break;
869 }
870 785
871 /* Frame interval enumeration */ 786static int uvc_ioctl_enum_input(struct file *file, void *fh,
872 case VIDIOC_ENUM_FRAMEINTERVALS: 787 struct v4l2_input *input)
873 { 788{
874 struct v4l2_frmivalenum *fival = arg; 789 struct uvc_fh *handle = fh;
875 struct uvc_format *format = NULL; 790 struct uvc_video_chain *chain = handle->chain;
876 struct uvc_frame *frame = NULL; 791 const struct uvc_entity *selector = chain->selector;
877 int i; 792 struct uvc_entity *iterm = NULL;
878 793 u32 index = input->index;
879 /* Look for the given pixel format and frame size */ 794 int pin = 0;
880 for (i = 0; i < stream->nformats; i++) { 795
881 if (stream->format[i].fcc == 796 if (selector == NULL ||
882 fival->pixel_format) { 797 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
883 format = &stream->format[i]; 798 if (index != 0)
884 break;
885 }
886 }
887 if (format == NULL)
888 return -EINVAL; 799 return -EINVAL;
889 800 list_for_each_entry(iterm, &chain->entities, chain) {
890 for (i = 0; i < format->nframes; i++) { 801 if (UVC_ENTITY_IS_ITERM(iterm))
891 if (format->frame[i].wWidth == fival->width &&
892 format->frame[i].wHeight == fival->height) {
893 frame = &format->frame[i];
894 break; 802 break;
895 }
896 } 803 }
897 if (frame == NULL) 804 pin = iterm->id;
898 return -EINVAL; 805 } else if (index < selector->bNrInPins) {
899 806 pin = selector->baSourceID[index];
900 if (frame->bFrameIntervalType) { 807 list_for_each_entry(iterm, &chain->entities, chain) {
901 if (fival->index >= frame->bFrameIntervalType) 808 if (!UVC_ENTITY_IS_ITERM(iterm))
902 return -EINVAL; 809 continue;
903 810 if (iterm->id == pin)
904 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 811 break;
905 fival->discrete.numerator =
906 frame->dwFrameInterval[fival->index];
907 fival->discrete.denominator = 10000000;
908 uvc_simplify_fraction(&fival->discrete.numerator,
909 &fival->discrete.denominator, 8, 333);
910 } else {
911 fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
912 fival->stepwise.min.numerator =
913 frame->dwFrameInterval[0];
914 fival->stepwise.min.denominator = 10000000;
915 fival->stepwise.max.numerator =
916 frame->dwFrameInterval[1];
917 fival->stepwise.max.denominator = 10000000;
918 fival->stepwise.step.numerator =
919 frame->dwFrameInterval[2];
920 fival->stepwise.step.denominator = 10000000;
921 uvc_simplify_fraction(&fival->stepwise.min.numerator,
922 &fival->stepwise.min.denominator, 8, 333);
923 uvc_simplify_fraction(&fival->stepwise.max.numerator,
924 &fival->stepwise.max.denominator, 8, 333);
925 uvc_simplify_fraction(&fival->stepwise.step.numerator,
926 &fival->stepwise.step.denominator, 8, 333);
927 } 812 }
928 break;
929 } 813 }
930 814
931 /* Get & Set streaming parameters */ 815 if (iterm == NULL || iterm->id != pin)
932 case VIDIOC_G_PARM: 816 return -EINVAL;
933 return uvc_v4l2_get_streamparm(stream, arg);
934 817
935 case VIDIOC_S_PARM: 818 memset(input, 0, sizeof(*input));
936 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 819 input->index = index;
937 if (ret < 0) 820 strlcpy(input->name, iterm->name, sizeof(input->name));
938 return ret; 821 if (UVC_ENTITY_TYPE(iterm) == UVC_ITT_CAMERA)
822 input->type = V4L2_INPUT_TYPE_CAMERA;
939 823
940 if ((ret = uvc_acquire_privileges(handle)) < 0) 824 return 0;
941 return ret; 825}
942 826
943 return uvc_v4l2_set_streamparm(stream, arg); 827static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
828{
829 struct uvc_fh *handle = fh;
830 struct uvc_video_chain *chain = handle->chain;
831 int ret;
832 u8 i;
944 833
945 /* Cropping and scaling */ 834 if (chain->selector == NULL ||
946 case VIDIOC_CROPCAP: 835 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
947 { 836 *input = 0;
948 struct v4l2_cropcap *ccap = arg; 837 return 0;
838 }
949 839
950 if (ccap->type != stream->type) 840 ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
951 return -EINVAL; 841 chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
842 &i, 1);
843 if (ret < 0)
844 return ret;
952 845
953 ccap->bounds.left = 0; 846 *input = i - 1;
954 ccap->bounds.top = 0; 847 return 0;
848}
955 849
956 mutex_lock(&stream->mutex); 850static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
957 ccap->bounds.width = stream->cur_frame->wWidth; 851{
958 ccap->bounds.height = stream->cur_frame->wHeight; 852 struct uvc_fh *handle = fh;
959 mutex_unlock(&stream->mutex); 853 struct uvc_video_chain *chain = handle->chain;
854 int ret;
855 u32 i;
960 856
961 ccap->defrect = ccap->bounds; 857 ret = uvc_acquire_privileges(handle);
858 if (ret < 0)
859 return ret;
962 860
963 ccap->pixelaspect.numerator = 1; 861 if (chain->selector == NULL ||
964 ccap->pixelaspect.denominator = 1; 862 (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
965 break; 863 if (input)
864 return -EINVAL;
865 return 0;
966 } 866 }
967 867
968 case VIDIOC_G_CROP: 868 if (input >= chain->selector->bNrInPins)
969 case VIDIOC_S_CROP: 869 return -EINVAL;
970 return -ENOTTY;
971 870
972 /* Buffers & streaming */ 871 i = input + 1;
973 case VIDIOC_REQBUFS: 872 return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
974 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 873 chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
975 if (ret < 0) 874 &i, 1);
976 return ret; 875}
977 876
978 if ((ret = uvc_acquire_privileges(handle)) < 0) 877static int uvc_ioctl_queryctrl(struct file *file, void *fh,
979 return ret; 878 struct v4l2_queryctrl *qc)
879{
880 struct uvc_fh *handle = fh;
881 struct uvc_video_chain *chain = handle->chain;
980 882
981 mutex_lock(&stream->mutex); 883 return uvc_query_v4l2_ctrl(chain, qc);
982 ret = uvc_alloc_buffers(&stream->queue, arg); 884}
983 mutex_unlock(&stream->mutex);
984 if (ret < 0)
985 return ret;
986 885
987 if (ret == 0) 886static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
988 uvc_dismiss_privileges(handle); 887 struct v4l2_control *ctrl)
888{
889 struct uvc_fh *handle = fh;
890 struct uvc_video_chain *chain = handle->chain;
891 struct v4l2_ext_control xctrl;
892 int ret;
989 893
990 ret = 0; 894 memset(&xctrl, 0, sizeof(xctrl));
991 break; 895 xctrl.id = ctrl->id;
896
897 ret = uvc_ctrl_begin(chain);
898 if (ret < 0)
899 return ret;
992 900
993 case VIDIOC_QUERYBUF: 901 ret = uvc_ctrl_get(chain, &xctrl);
994 { 902 uvc_ctrl_rollback(handle);
995 struct v4l2_buffer *buf = arg; 903 if (ret < 0)
904 return ret;
996 905
997 if (!uvc_has_privileges(handle)) 906 ctrl->value = xctrl.value;
998 return -EBUSY; 907 return 0;
908}
999 909
1000 return uvc_query_buffer(&stream->queue, buf); 910static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
1001 } 911 struct v4l2_control *ctrl)
912{
913 struct uvc_fh *handle = fh;
914 struct uvc_video_chain *chain = handle->chain;
915 struct v4l2_ext_control xctrl;
916 int ret;
1002 917
1003 case VIDIOC_CREATE_BUFS: 918 memset(&xctrl, 0, sizeof(xctrl));
1004 { 919 xctrl.id = ctrl->id;
1005 struct v4l2_create_buffers *cb = arg; 920 xctrl.value = ctrl->value;
1006 921
1007 ret = uvc_acquire_privileges(handle); 922 ret = uvc_ctrl_begin(chain);
1008 if (ret < 0) 923 if (ret < 0)
1009 return ret; 924 return ret;
1010 925
1011 return uvc_create_buffers(&stream->queue, cb); 926 ret = uvc_ctrl_set(chain, &xctrl);
927 if (ret < 0) {
928 uvc_ctrl_rollback(handle);
929 return ret;
1012 } 930 }
1013 931
1014 case VIDIOC_QBUF: 932 ret = uvc_ctrl_commit(handle, &xctrl, 1);
1015 if (!uvc_has_privileges(handle)) 933 if (ret < 0)
1016 return -EBUSY; 934 return ret;
1017 935
1018 return uvc_queue_buffer(&stream->queue, arg); 936 ctrl->value = xctrl.value;
937 return 0;
938}
939
940static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
941 struct v4l2_ext_controls *ctrls)
942{
943 struct uvc_fh *handle = fh;
944 struct uvc_video_chain *chain = handle->chain;
945 struct v4l2_ext_control *ctrl = ctrls->controls;
946 unsigned int i;
947 int ret;
1019 948
1020 case VIDIOC_DQBUF: 949 ret = uvc_ctrl_begin(chain);
1021 if (!uvc_has_privileges(handle)) 950 if (ret < 0)
1022 return -EBUSY; 951 return ret;
1023 952
1024 return uvc_dequeue_buffer(&stream->queue, arg, 953 for (i = 0; i < ctrls->count; ++ctrl, ++i) {
1025 file->f_flags & O_NONBLOCK); 954 ret = uvc_ctrl_get(chain, ctrl);
955 if (ret < 0) {
956 uvc_ctrl_rollback(handle);
957 ctrls->error_idx = i;
958 return ret;
959 }
960 }
1026 961
1027 case VIDIOC_STREAMON: 962 ctrls->error_idx = 0;
1028 {
1029 int *type = arg;
1030 963
1031 if (*type != stream->type) 964 return uvc_ctrl_rollback(handle);
1032 return -EINVAL; 965}
1033 966
1034 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 967static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
1035 if (ret < 0) 968 struct v4l2_ext_controls *ctrls,
1036 return ret; 969 bool commit)
970{
971 struct v4l2_ext_control *ctrl = ctrls->controls;
972 struct uvc_video_chain *chain = handle->chain;
973 unsigned int i;
974 int ret;
1037 975
1038 if (!uvc_has_privileges(handle)) 976 ret = uvc_ctrl_begin(chain);
1039 return -EBUSY; 977 if (ret < 0)
978 return ret;
1040 979
1041 mutex_lock(&stream->mutex); 980 for (i = 0; i < ctrls->count; ++ctrl, ++i) {
1042 ret = uvc_video_enable(stream, 1); 981 ret = uvc_ctrl_set(chain, ctrl);
1043 mutex_unlock(&stream->mutex); 982 if (ret < 0) {
1044 if (ret < 0) 983 uvc_ctrl_rollback(handle);
984 ctrls->error_idx = commit ? ctrls->count : i;
1045 return ret; 985 return ret;
1046 break; 986 }
1047 } 987 }
1048 988
1049 case VIDIOC_STREAMOFF: 989 ctrls->error_idx = 0;
1050 {
1051 int *type = arg;
1052 990
1053 if (*type != stream->type) 991 if (commit)
1054 return -EINVAL; 992 return uvc_ctrl_commit(handle, ctrls->controls, ctrls->count);
993 else
994 return uvc_ctrl_rollback(handle);
995}
1055 996
1056 ret = v4l2_prio_check(vdev->prio, handle->vfh.prio); 997static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
1057 if (ret < 0) 998 struct v4l2_ext_controls *ctrls)
1058 return ret; 999{
1000 struct uvc_fh *handle = fh;
1001
1002 return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, true);
1003}
1059 1004
1060 if (!uvc_has_privileges(handle)) 1005static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
1061 return -EBUSY; 1006 struct v4l2_ext_controls *ctrls)
1007{
1008 struct uvc_fh *handle = fh;
1062 1009
1063 return uvc_video_enable(stream, 0); 1010 return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, false);
1011}
1012
1013static int uvc_ioctl_querymenu(struct file *file, void *fh,
1014 struct v4l2_querymenu *qm)
1015{
1016 struct uvc_fh *handle = fh;
1017 struct uvc_video_chain *chain = handle->chain;
1018
1019 return uvc_query_v4l2_menu(chain, qm);
1020}
1021
1022static int uvc_ioctl_cropcap(struct file *file, void *fh,
1023 struct v4l2_cropcap *ccap)
1024{
1025 struct uvc_fh *handle = fh;
1026 struct uvc_streaming *stream = handle->stream;
1027
1028 if (ccap->type != stream->type)
1029 return -EINVAL;
1030
1031 ccap->bounds.left = 0;
1032 ccap->bounds.top = 0;
1033 mutex_lock(&stream->mutex);
1034 ccap->bounds.width = stream->cur_frame->wWidth;
1035 ccap->bounds.height = stream->cur_frame->wHeight;
1036 mutex_unlock(&stream->mutex);
1037
1038 ccap->defrect = ccap->bounds;
1039
1040 ccap->pixelaspect.numerator = 1;
1041 ccap->pixelaspect.denominator = 1;
1042 return 0;
1043}
1044
1045static int uvc_ioctl_g_parm(struct file *file, void *fh,
1046 struct v4l2_streamparm *parm)
1047{
1048 struct uvc_fh *handle = fh;
1049 struct uvc_streaming *stream = handle->stream;
1050
1051 return uvc_v4l2_get_streamparm(stream, parm);
1052}
1053
1054static int uvc_ioctl_s_parm(struct file *file, void *fh,
1055 struct v4l2_streamparm *parm)
1056{
1057 struct uvc_fh *handle = fh;
1058 struct uvc_streaming *stream = handle->stream;
1059 int ret;
1060
1061 ret = uvc_acquire_privileges(handle);
1062 if (ret < 0)
1063 return ret;
1064
1065 return uvc_v4l2_set_streamparm(stream, parm);
1066}
1067
1068static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
1069 struct v4l2_frmsizeenum *fsize)
1070{
1071 struct uvc_fh *handle = fh;
1072 struct uvc_streaming *stream = handle->stream;
1073 struct uvc_format *format = NULL;
1074 struct uvc_frame *frame;
1075 int i;
1076
1077 /* Look for the given pixel format */
1078 for (i = 0; i < stream->nformats; i++) {
1079 if (stream->format[i].fcc == fsize->pixel_format) {
1080 format = &stream->format[i];
1081 break;
1082 }
1064 } 1083 }
1084 if (format == NULL)
1085 return -EINVAL;
1065 1086
1066 case VIDIOC_SUBSCRIBE_EVENT: 1087 if (fsize->index >= format->nframes)
1067 { 1088 return -EINVAL;
1068 struct v4l2_event_subscription *sub = arg;
1069 1089
1070 switch (sub->type) { 1090 frame = &format->frame[fsize->index];
1071 case V4L2_EVENT_CTRL: 1091 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1072 return v4l2_event_subscribe(&handle->vfh, sub, 0, 1092 fsize->discrete.width = frame->wWidth;
1073 &uvc_ctrl_sub_ev_ops); 1093 fsize->discrete.height = frame->wHeight;
1074 default: 1094 return 0;
1075 return -EINVAL; 1095}
1096
1097static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
1098 struct v4l2_frmivalenum *fival)
1099{
1100 struct uvc_fh *handle = fh;
1101 struct uvc_streaming *stream = handle->stream;
1102 struct uvc_format *format = NULL;
1103 struct uvc_frame *frame = NULL;
1104 int i;
1105
1106 /* Look for the given pixel format and frame size */
1107 for (i = 0; i < stream->nformats; i++) {
1108 if (stream->format[i].fcc == fival->pixel_format) {
1109 format = &stream->format[i];
1110 break;
1076 } 1111 }
1077 } 1112 }
1113 if (format == NULL)
1114 return -EINVAL;
1078 1115
1079 case VIDIOC_UNSUBSCRIBE_EVENT: 1116 for (i = 0; i < format->nframes; i++) {
1080 return v4l2_event_unsubscribe(&handle->vfh, arg); 1117 if (format->frame[i].wWidth == fival->width &&
1118 format->frame[i].wHeight == fival->height) {
1119 frame = &format->frame[i];
1120 break;
1121 }
1122 }
1123 if (frame == NULL)
1124 return -EINVAL;
1081 1125
1082 case VIDIOC_DQEVENT: 1126 if (frame->bFrameIntervalType) {
1083 return v4l2_event_dequeue(&handle->vfh, arg, 1127 if (fival->index >= frame->bFrameIntervalType)
1084 file->f_flags & O_NONBLOCK); 1128 return -EINVAL;
1085 1129
1086 /* Analog video standards make no sense for digital cameras. */ 1130 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1087 case VIDIOC_ENUMSTD: 1131 fival->discrete.numerator =
1088 case VIDIOC_QUERYSTD: 1132 frame->dwFrameInterval[fival->index];
1089 case VIDIOC_G_STD: 1133 fival->discrete.denominator = 10000000;
1090 case VIDIOC_S_STD: 1134 uvc_simplify_fraction(&fival->discrete.numerator,
1135 &fival->discrete.denominator, 8, 333);
1136 } else {
1137 fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
1138 fival->stepwise.min.numerator = frame->dwFrameInterval[0];
1139 fival->stepwise.min.denominator = 10000000;
1140 fival->stepwise.max.numerator = frame->dwFrameInterval[1];
1141 fival->stepwise.max.denominator = 10000000;
1142 fival->stepwise.step.numerator = frame->dwFrameInterval[2];
1143 fival->stepwise.step.denominator = 10000000;
1144 uvc_simplify_fraction(&fival->stepwise.min.numerator,
1145 &fival->stepwise.min.denominator, 8, 333);
1146 uvc_simplify_fraction(&fival->stepwise.max.numerator,
1147 &fival->stepwise.max.denominator, 8, 333);
1148 uvc_simplify_fraction(&fival->stepwise.step.numerator,
1149 &fival->stepwise.step.denominator, 8, 333);
1150 }
1091 1151
1092 case VIDIOC_OVERLAY: 1152 return 0;
1153}
1093 1154
1094 case VIDIOC_ENUMAUDIO: 1155static int uvc_ioctl_subscribe_event(struct v4l2_fh *fh,
1095 case VIDIOC_ENUMAUDOUT: 1156 const struct v4l2_event_subscription *sub)
1157{
1158 switch (sub->type) {
1159 case V4L2_EVENT_CTRL:
1160 return v4l2_event_subscribe(fh, sub, 0, &uvc_ctrl_sub_ev_ops);
1161 default:
1162 return -EINVAL;
1163 }
1164}
1096 1165
1097 case VIDIOC_ENUMOUTPUT: 1166static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
1098 uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd); 1167 unsigned int cmd, void *arg)
1099 return -ENOTTY; 1168{
1169 struct uvc_fh *handle = fh;
1170 struct uvc_video_chain *chain = handle->chain;
1100 1171
1172 switch (cmd) {
1173 /* Dynamic controls. */
1101 case UVCIOC_CTRL_MAP: 1174 case UVCIOC_CTRL_MAP:
1102 return uvc_ioctl_ctrl_map(chain, arg); 1175 return uvc_ioctl_ctrl_map(chain, arg);
1103 1176
@@ -1105,23 +1178,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1105 return uvc_xu_ctrl_query(chain, arg); 1178 return uvc_xu_ctrl_query(chain, arg);
1106 1179
1107 default: 1180 default:
1108 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n", cmd);
1109 return -ENOTTY; 1181 return -ENOTTY;
1110 } 1182 }
1111
1112 return ret;
1113}
1114
1115static long uvc_v4l2_ioctl(struct file *file,
1116 unsigned int cmd, unsigned long arg)
1117{
1118 if (uvc_trace_param & UVC_TRACE_IOCTL) {
1119 uvc_printk(KERN_DEBUG, "uvc_v4l2_ioctl(");
1120 v4l_printk_ioctl(NULL, cmd);
1121 printk(")\n");
1122 }
1123
1124 return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
1125} 1183}
1126 1184
1127#ifdef CONFIG_COMPAT 1185#ifdef CONFIG_COMPAT
@@ -1304,7 +1362,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
1304 1362
1305 old_fs = get_fs(); 1363 old_fs = get_fs();
1306 set_fs(KERNEL_DS); 1364 set_fs(KERNEL_DS);
1307 ret = uvc_v4l2_ioctl(file, cmd, (unsigned long)&karg); 1365 ret = video_ioctl2(file, cmd, (unsigned long)&karg);
1308 set_fs(old_fs); 1366 set_fs(old_fs);
1309 1367
1310 if (ret < 0) 1368 if (ret < 0)
@@ -1365,11 +1423,48 @@ static unsigned long uvc_v4l2_get_unmapped_area(struct file *file,
1365} 1423}
1366#endif 1424#endif
1367 1425
1426const struct v4l2_ioctl_ops uvc_ioctl_ops = {
1427 .vidioc_querycap = uvc_ioctl_querycap,
1428 .vidioc_enum_fmt_vid_cap = uvc_ioctl_enum_fmt_vid_cap,
1429 .vidioc_enum_fmt_vid_out = uvc_ioctl_enum_fmt_vid_out,
1430 .vidioc_g_fmt_vid_cap = uvc_ioctl_g_fmt_vid_cap,
1431 .vidioc_g_fmt_vid_out = uvc_ioctl_g_fmt_vid_out,
1432 .vidioc_s_fmt_vid_cap = uvc_ioctl_s_fmt_vid_cap,
1433 .vidioc_s_fmt_vid_out = uvc_ioctl_s_fmt_vid_out,
1434 .vidioc_try_fmt_vid_cap = uvc_ioctl_try_fmt_vid_cap,
1435 .vidioc_try_fmt_vid_out = uvc_ioctl_try_fmt_vid_out,
1436 .vidioc_reqbufs = uvc_ioctl_reqbufs,
1437 .vidioc_querybuf = uvc_ioctl_querybuf,
1438 .vidioc_qbuf = uvc_ioctl_qbuf,
1439 .vidioc_dqbuf = uvc_ioctl_dqbuf,
1440 .vidioc_create_bufs = uvc_ioctl_create_bufs,
1441 .vidioc_streamon = uvc_ioctl_streamon,
1442 .vidioc_streamoff = uvc_ioctl_streamoff,
1443 .vidioc_enum_input = uvc_ioctl_enum_input,
1444 .vidioc_g_input = uvc_ioctl_g_input,
1445 .vidioc_s_input = uvc_ioctl_s_input,
1446 .vidioc_queryctrl = uvc_ioctl_queryctrl,
1447 .vidioc_g_ctrl = uvc_ioctl_g_ctrl,
1448 .vidioc_s_ctrl = uvc_ioctl_s_ctrl,
1449 .vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
1450 .vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
1451 .vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
1452 .vidioc_querymenu = uvc_ioctl_querymenu,
1453 .vidioc_cropcap = uvc_ioctl_cropcap,
1454 .vidioc_g_parm = uvc_ioctl_g_parm,
1455 .vidioc_s_parm = uvc_ioctl_s_parm,
1456 .vidioc_enum_framesizes = uvc_ioctl_enum_framesizes,
1457 .vidioc_enum_frameintervals = uvc_ioctl_enum_frameintervals,
1458 .vidioc_subscribe_event = uvc_ioctl_subscribe_event,
1459 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1460 .vidioc_default = uvc_ioctl_default,
1461};
1462
1368const struct v4l2_file_operations uvc_fops = { 1463const struct v4l2_file_operations uvc_fops = {
1369 .owner = THIS_MODULE, 1464 .owner = THIS_MODULE,
1370 .open = uvc_v4l2_open, 1465 .open = uvc_v4l2_open,
1371 .release = uvc_v4l2_release, 1466 .release = uvc_v4l2_release,
1372 .unlocked_ioctl = uvc_v4l2_ioctl, 1467 .unlocked_ioctl = video_ioctl2,
1373#ifdef CONFIG_COMPAT 1468#ifdef CONFIG_COMPAT
1374 .compat_ioctl32 = uvc_v4l2_compat_ioctl32, 1469 .compat_ioctl32 = uvc_v4l2_compat_ioctl32,
1375#endif 1470#endif
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index df81b9c4faf1..9637e8b86949 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1021,6 +1021,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
1021 1021
1022 uvc_video_get_ts(&ts); 1022 uvc_video_get_ts(&ts);
1023 1023
1024 buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
1024 buf->buf.v4l2_buf.sequence = stream->sequence; 1025 buf->buf.v4l2_buf.sequence = stream->sequence;
1025 buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec; 1026 buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
1026 buf->buf.v4l2_buf.timestamp.tv_usec = 1027 buf->buf.v4l2_buf.timestamp.tv_usec =
@@ -1734,19 +1735,13 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
1734 uvc_video_clock_reset(stream); 1735 uvc_video_clock_reset(stream);
1735 1736
1736 ret = uvc_commit_video(stream, &stream->ctrl); 1737 ret = uvc_commit_video(stream, &stream->ctrl);
1737 if (ret < 0) { 1738 if (ret < 0)
1738 uvc_queue_enable(&stream->queue, 0);
1739 return ret; 1739 return ret;
1740 }
1741 1740
1742 if (!uvc_queue_streaming(&stream->queue)) 1741 if (!uvc_queue_streaming(&stream->queue))
1743 return 0; 1742 return 0;
1744 1743
1745 ret = uvc_init_video(stream, GFP_NOIO); 1744 return uvc_init_video(stream, GFP_NOIO);
1746 if (ret < 0)
1747 uvc_queue_enable(&stream->queue, 0);
1748
1749 return ret;
1750} 1745}
1751 1746
1752/* ------------------------------------------------------------------------ 1747/* ------------------------------------------------------------------------
@@ -1778,11 +1773,6 @@ int uvc_video_init(struct uvc_streaming *stream)
1778 1773
1779 atomic_set(&stream->active, 0); 1774 atomic_set(&stream->active, 0);
1780 1775
1781 /* Initialize the video buffers queue. */
1782 ret = uvc_queue_init(&stream->queue, stream->type, !uvc_no_drop_param);
1783 if (ret)
1784 return ret;
1785
1786 /* Alternate setting 0 should be the default, yet the XBox Live Vision 1776 /* Alternate setting 0 should be the default, yet the XBox Live Vision
1787 * Cam (and possibly other devices) crash or otherwise misbehave if 1777 * Cam (and possibly other devices) crash or otherwise misbehave if
1788 * they don't receive a SET_INTERFACE request before any other video 1778 * they don't receive a SET_INTERFACE request before any other video
@@ -1889,7 +1879,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
1889 usb_clear_halt(stream->dev->udev, pipe); 1879 usb_clear_halt(stream->dev->udev, pipe);
1890 } 1880 }
1891 1881
1892 uvc_queue_enable(&stream->queue, 0);
1893 uvc_video_clock_cleanup(stream); 1882 uvc_video_clock_cleanup(stream);
1894 return 0; 1883 return 0;
1895 } 1884 }
@@ -1898,10 +1887,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
1898 if (ret < 0) 1887 if (ret < 0)
1899 return ret; 1888 return ret;
1900 1889
1901 ret = uvc_queue_enable(&stream->queue, 1);
1902 if (ret < 0)
1903 goto error_queue;
1904
1905 /* Commit the streaming parameters. */ 1890 /* Commit the streaming parameters. */
1906 ret = uvc_commit_video(stream, &stream->ctrl); 1891 ret = uvc_commit_video(stream, &stream->ctrl);
1907 if (ret < 0) 1892 if (ret < 0)
@@ -1916,8 +1901,6 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
1916error_video: 1901error_video:
1917 usb_set_interface(stream->dev->udev, stream->intfnum, 0); 1902 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1918error_commit: 1903error_commit:
1919 uvc_queue_enable(&stream->queue, 0);
1920error_queue:
1921 uvc_video_clock_cleanup(stream); 1904 uvc_video_clock_cleanup(stream);
1922 1905
1923 return ret; 1906 return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 864ada740360..f0a04b532ede 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -148,6 +148,7 @@
148#define UVC_QUIRK_PROBE_DEF 0x00000100 148#define UVC_QUIRK_PROBE_DEF 0x00000100
149#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 149#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
150#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400 150#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
151#define UVC_QUIRK_FORCE_Y8 0x00000800
151 152
152/* Format flags */ 153/* Format flags */
153#define UVC_FMT_FLAG_COMPRESSED 0x00000001 154#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -579,7 +580,6 @@ struct uvc_driver {
579#define UVC_TRACE_FORMAT (1 << 3) 580#define UVC_TRACE_FORMAT (1 << 3)
580#define UVC_TRACE_CAPTURE (1 << 4) 581#define UVC_TRACE_CAPTURE (1 << 4)
581#define UVC_TRACE_CALLS (1 << 5) 582#define UVC_TRACE_CALLS (1 << 5)
582#define UVC_TRACE_IOCTL (1 << 6)
583#define UVC_TRACE_FRAME (1 << 7) 583#define UVC_TRACE_FRAME (1 << 7)
584#define UVC_TRACE_SUSPEND (1 << 8) 584#define UVC_TRACE_SUSPEND (1 << 8)
585#define UVC_TRACE_STATUS (1 << 9) 585#define UVC_TRACE_STATUS (1 << 9)
@@ -623,9 +623,9 @@ extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
623/* Video buffers queue management. */ 623/* Video buffers queue management. */
624extern int uvc_queue_init(struct uvc_video_queue *queue, 624extern int uvc_queue_init(struct uvc_video_queue *queue,
625 enum v4l2_buf_type type, int drop_corrupted); 625 enum v4l2_buf_type type, int drop_corrupted);
626extern int uvc_alloc_buffers(struct uvc_video_queue *queue, 626extern void uvc_queue_release(struct uvc_video_queue *queue);
627extern int uvc_request_buffers(struct uvc_video_queue *queue,
627 struct v4l2_requestbuffers *rb); 628 struct v4l2_requestbuffers *rb);
628extern void uvc_free_buffers(struct uvc_video_queue *queue);
629extern int uvc_query_buffer(struct uvc_video_queue *queue, 629extern int uvc_query_buffer(struct uvc_video_queue *queue,
630 struct v4l2_buffer *v4l2_buf); 630 struct v4l2_buffer *v4l2_buf);
631extern int uvc_create_buffers(struct uvc_video_queue *queue, 631extern int uvc_create_buffers(struct uvc_video_queue *queue,
@@ -634,7 +634,10 @@ extern int uvc_queue_buffer(struct uvc_video_queue *queue,
634 struct v4l2_buffer *v4l2_buf); 634 struct v4l2_buffer *v4l2_buf);
635extern int uvc_dequeue_buffer(struct uvc_video_queue *queue, 635extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
636 struct v4l2_buffer *v4l2_buf, int nonblocking); 636 struct v4l2_buffer *v4l2_buf, int nonblocking);
637extern int uvc_queue_enable(struct uvc_video_queue *queue, int enable); 637extern int uvc_queue_streamon(struct uvc_video_queue *queue,
638 enum v4l2_buf_type type);
639extern int uvc_queue_streamoff(struct uvc_video_queue *queue,
640 enum v4l2_buf_type type);
638extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect); 641extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
639extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 642extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
640 struct uvc_buffer *buf); 643 struct uvc_buffer *buf);
@@ -653,6 +656,7 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
653} 656}
654 657
655/* V4L2 interface */ 658/* V4L2 interface */
659extern const struct v4l2_ioctl_ops uvc_ioctl_ops;
656extern const struct v4l2_file_operations uvc_fops; 660extern const struct v4l2_file_operations uvc_fops;
657 661
658/* Media controller */ 662/* Media controller */
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 2e9d81f4c1a5..5b808500e7e7 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -80,36 +80,6 @@ MODULE_LICENSE("GPL");
80 80
81/* Helper functions for control handling */ 81/* Helper functions for control handling */
82 82
83/* Check for correctness of the ctrl's value based on the data from
84 struct v4l2_queryctrl and the available menu items. Note that
85 menu_items may be NULL, in that case it is ignored. */
86int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
87 const char * const *menu_items)
88{
89 if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
90 return -EINVAL;
91 if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
92 return -EBUSY;
93 if (qctrl->type == V4L2_CTRL_TYPE_STRING)
94 return 0;
95 if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
96 qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
97 qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
98 return 0;
99 if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
100 return -ERANGE;
101 if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
102 if (menu_items[ctrl->value] == NULL ||
103 menu_items[ctrl->value][0] == '\0')
104 return -EINVAL;
105 }
106 if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
107 (ctrl->value & ~qctrl->maximum))
108 return -ERANGE;
109 return 0;
110}
111EXPORT_SYMBOL(v4l2_ctrl_check);
112
113/* Fill in a struct v4l2_queryctrl */ 83/* Fill in a struct v4l2_queryctrl */
114int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) 84int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
115{ 85{
@@ -135,101 +105,6 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _
135} 105}
136EXPORT_SYMBOL(v4l2_ctrl_query_fill); 106EXPORT_SYMBOL(v4l2_ctrl_query_fill);
137 107
138/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
139 the menu. The qctrl pointer may be NULL, in which case it is ignored.
140 If menu_items is NULL, then the menu items are retrieved using
141 v4l2_ctrl_get_menu. */
142int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl,
143 const char * const *menu_items)
144{
145 int i;
146
147 qmenu->reserved = 0;
148 if (menu_items == NULL)
149 menu_items = v4l2_ctrl_get_menu(qmenu->id);
150 if (menu_items == NULL ||
151 (qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum)))
152 return -EINVAL;
153 for (i = 0; i < qmenu->index && menu_items[i]; i++) ;
154 if (menu_items[i] == NULL || menu_items[i][0] == '\0')
155 return -EINVAL;
156 strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name));
157 return 0;
158}
159EXPORT_SYMBOL(v4l2_ctrl_query_menu);
160
161/* Fill in a struct v4l2_querymenu based on the specified array of valid
162 menu items (terminated by V4L2_CTRL_MENU_IDS_END).
163 Use this if there are 'holes' in the list of valid menu items. */
164int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids)
165{
166 const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id);
167
168 qmenu->reserved = 0;
169 if (menu_items == NULL || ids == NULL)
170 return -EINVAL;
171 while (*ids != V4L2_CTRL_MENU_IDS_END) {
172 if (*ids++ == qmenu->index) {
173 strlcpy(qmenu->name, menu_items[qmenu->index],
174 sizeof(qmenu->name));
175 return 0;
176 }
177 }
178 return -EINVAL;
179}
180EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items);
181
182/* ctrl_classes points to an array of u32 pointers, the last element is
183 a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
184 Each array must be sorted low to high and belong to the same control
185 class. The array of u32 pointers must also be sorted, from low class IDs
186 to high class IDs.
187
188 This function returns the first ID that follows after the given ID.
189 When no more controls are available 0 is returned. */
190u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
191{
192 u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
193 const u32 *pctrl;
194
195 if (ctrl_classes == NULL)
196 return 0;
197
198 /* if no query is desired, then check if the ID is part of ctrl_classes */
199 if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
200 /* find class */
201 while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
202 ctrl_classes++;
203 if (*ctrl_classes == NULL)
204 return 0;
205 pctrl = *ctrl_classes;
206 /* find control ID */
207 while (*pctrl && *pctrl != id) pctrl++;
208 return *pctrl ? id : 0;
209 }
210 id &= V4L2_CTRL_ID_MASK;
211 id++; /* select next control */
212 /* find first class that matches (or is greater than) the class of
213 the ID */
214 while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class)
215 ctrl_classes++;
216 /* no more classes */
217 if (*ctrl_classes == NULL)
218 return 0;
219 pctrl = *ctrl_classes;
220 /* find first ctrl within the class that is >= ID */
221 while (*pctrl && *pctrl < id) pctrl++;
222 if (*pctrl)
223 return *pctrl;
224 /* we are at the end of the controls of the current class. */
225 /* continue with next class if available */
226 ctrl_classes++;
227 if (*ctrl_classes == NULL)
228 return 0;
229 return **ctrl_classes;
230}
231EXPORT_SYMBOL(v4l2_ctrl_next);
232
233/* I2C Helper functions */ 108/* I2C Helper functions */
234 109
235#if IS_ENABLED(CONFIG_I2C) 110#if IS_ENABLED(CONFIG_I2C)
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index e502a5fb2994..af635430524e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -222,6 +222,9 @@ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_
222 222
223static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 223static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
224{ 224{
225 if (put_user(kp->type, &up->type))
226 return -EFAULT;
227
225 switch (kp->type) { 228 switch (kp->type) {
226 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 229 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
227 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 230 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -248,8 +251,7 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
248 251
249static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 252static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
250{ 253{
251 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) || 254 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
252 put_user(kp->type, &up->type))
253 return -EFAULT; 255 return -EFAULT;
254 return __put_v4l2_format32(kp, up); 256 return __put_v4l2_format32(kp, up);
255} 257}
@@ -257,8 +259,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
257static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 259static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
258{ 260{
259 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || 261 if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
260 copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format.fmt))) 262 copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
261 return -EFAULT; 263 return -EFAULT;
262 return __put_v4l2_format32(&kp->format, &up->format); 264 return __put_v4l2_format32(&kp->format, &up->format);
263} 265}
264 266
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 86012140923f..45c5b4710601 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1658,10 +1658,8 @@ static int check_range(enum v4l2_ctrl_type type,
1658} 1658}
1659 1659
1660/* Validate a new control */ 1660/* Validate a new control */
1661static int validate_new(const struct v4l2_ctrl *ctrl, 1661static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
1662 struct v4l2_ext_control *c)
1663{ 1662{
1664 union v4l2_ctrl_ptr ptr;
1665 unsigned idx; 1663 unsigned idx;
1666 int err = 0; 1664 int err = 0;
1667 1665
@@ -1674,19 +1672,14 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
1674 case V4L2_CTRL_TYPE_BOOLEAN: 1672 case V4L2_CTRL_TYPE_BOOLEAN:
1675 case V4L2_CTRL_TYPE_BUTTON: 1673 case V4L2_CTRL_TYPE_BUTTON:
1676 case V4L2_CTRL_TYPE_CTRL_CLASS: 1674 case V4L2_CTRL_TYPE_CTRL_CLASS:
1677 ptr.p_s32 = &c->value;
1678 return ctrl->type_ops->validate(ctrl, 0, ptr);
1679
1680 case V4L2_CTRL_TYPE_INTEGER64: 1675 case V4L2_CTRL_TYPE_INTEGER64:
1681 ptr.p_s64 = &c->value64; 1676 return ctrl->type_ops->validate(ctrl, 0, p_new);
1682 return ctrl->type_ops->validate(ctrl, 0, ptr);
1683 default: 1677 default:
1684 break; 1678 break;
1685 } 1679 }
1686 } 1680 }
1687 ptr.p = c->ptr; 1681 for (idx = 0; !err && idx < ctrl->elems; idx++)
1688 for (idx = 0; !err && idx < c->size / ctrl->elem_size; idx++) 1682 err = ctrl->type_ops->validate(ctrl, idx, p_new);
1689 err = ctrl->type_ops->validate(ctrl, idx, ptr);
1690 return err; 1683 return err;
1691} 1684}
1692 1685
@@ -3012,6 +3005,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
3012 cs->error_idx = cs->count; 3005 cs->error_idx = cs->count;
3013 for (i = 0; i < cs->count; i++) { 3006 for (i = 0; i < cs->count; i++) {
3014 struct v4l2_ctrl *ctrl = helpers[i].ctrl; 3007 struct v4l2_ctrl *ctrl = helpers[i].ctrl;
3008 union v4l2_ctrl_ptr p_new;
3015 3009
3016 cs->error_idx = i; 3010 cs->error_idx = i;
3017 3011
@@ -3025,7 +3019,17 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
3025 best-effort to avoid that. */ 3019 best-effort to avoid that. */
3026 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) 3020 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
3027 return -EBUSY; 3021 return -EBUSY;
3028 ret = validate_new(ctrl, &cs->controls[i]); 3022 /*
3023 * Skip validation for now if the payload needs to be copied
3024 * from userspace into kernelspace. We'll validate those later.
3025 */
3026 if (ctrl->is_ptr)
3027 continue;
3028 if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
3029 p_new.p_s64 = &cs->controls[i].value64;
3030 else
3031 p_new.p_s32 = &cs->controls[i].value;
3032 ret = validate_new(ctrl, p_new);
3029 if (ret) 3033 if (ret)
3030 return ret; 3034 return ret;
3031 } 3035 }
@@ -3120,7 +3124,11 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
3120 /* Copy the new caller-supplied control values. 3124 /* Copy the new caller-supplied control values.
3121 user_to_new() sets 'is_new' to 1. */ 3125 user_to_new() sets 'is_new' to 1. */
3122 do { 3126 do {
3123 ret = user_to_new(cs->controls + idx, helpers[idx].ctrl); 3127 struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
3128
3129 ret = user_to_new(cs->controls + idx, ctrl);
3130 if (!ret && ctrl->is_ptr)
3131 ret = validate_new(ctrl, ctrl->p_new);
3124 idx = helpers[idx].next; 3132 idx = helpers[idx].next;
3125 } while (!ret && idx); 3133 } while (!ret && idx);
3126 3134
@@ -3170,10 +3178,10 @@ int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs
3170EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls); 3178EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
3171 3179
3172/* Helper function for VIDIOC_S_CTRL compatibility */ 3180/* Helper function for VIDIOC_S_CTRL compatibility */
3173static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, 3181static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
3174 struct v4l2_ext_control *c, u32 ch_flags)
3175{ 3182{
3176 struct v4l2_ctrl *master = ctrl->cluster[0]; 3183 struct v4l2_ctrl *master = ctrl->cluster[0];
3184 int ret;
3177 int i; 3185 int i;
3178 3186
3179 /* Reset the 'is_new' flags of the cluster */ 3187 /* Reset the 'is_new' flags of the cluster */
@@ -3181,8 +3189,9 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
3181 if (master->cluster[i]) 3189 if (master->cluster[i])
3182 master->cluster[i]->is_new = 0; 3190 master->cluster[i]->is_new = 0;
3183 3191
3184 if (c) 3192 ret = validate_new(ctrl, ctrl->p_new);
3185 user_to_new(c, ctrl); 3193 if (ret)
3194 return ret;
3186 3195
3187 /* For autoclusters with volatiles that are switched from auto to 3196 /* For autoclusters with volatiles that are switched from auto to
3188 manual mode we have to update the current volatile values since 3197 manual mode we have to update the current volatile values since
@@ -3199,15 +3208,14 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
3199static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, 3208static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
3200 struct v4l2_ext_control *c) 3209 struct v4l2_ext_control *c)
3201{ 3210{
3202 int ret = validate_new(ctrl, c); 3211 int ret;
3203 3212
3204 if (!ret) { 3213 v4l2_ctrl_lock(ctrl);
3205 v4l2_ctrl_lock(ctrl); 3214 user_to_new(c, ctrl);
3206 ret = set_ctrl(fh, ctrl, c, 0); 3215 ret = set_ctrl(fh, ctrl, 0);
3207 if (!ret) 3216 if (!ret)
3208 cur_to_user(c, ctrl); 3217 cur_to_user(c, ctrl);
3209 v4l2_ctrl_unlock(ctrl); 3218 v4l2_ctrl_unlock(ctrl);
3210 }
3211 return ret; 3219 return ret;
3212} 3220}
3213 3221
@@ -3215,7 +3223,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
3215 struct v4l2_control *control) 3223 struct v4l2_control *control)
3216{ 3224{
3217 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); 3225 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
3218 struct v4l2_ext_control c; 3226 struct v4l2_ext_control c = { control->id };
3219 int ret; 3227 int ret;
3220 3228
3221 if (ctrl == NULL || !ctrl->is_int) 3229 if (ctrl == NULL || !ctrl->is_int)
@@ -3244,7 +3252,7 @@ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
3244 /* It's a driver bug if this happens. */ 3252 /* It's a driver bug if this happens. */
3245 WARN_ON(!ctrl->is_int); 3253 WARN_ON(!ctrl->is_int);
3246 ctrl->val = val; 3254 ctrl->val = val;
3247 return set_ctrl(NULL, ctrl, NULL, 0); 3255 return set_ctrl(NULL, ctrl, 0);
3248} 3256}
3249EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); 3257EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
3250 3258
@@ -3255,7 +3263,7 @@ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
3255 /* It's a driver bug if this happens. */ 3263 /* It's a driver bug if this happens. */
3256 WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64); 3264 WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
3257 *ctrl->p_new.p_s64 = val; 3265 *ctrl->p_new.p_s64 = val;
3258 return set_ctrl(NULL, ctrl, NULL, 0); 3266 return set_ctrl(NULL, ctrl, 0);
3259} 3267}
3260EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); 3268EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
3261 3269
@@ -3266,7 +3274,7 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
3266 /* It's a driver bug if this happens. */ 3274 /* It's a driver bug if this happens. */
3267 WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING); 3275 WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING);
3268 strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); 3276 strlcpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
3269 return set_ctrl(NULL, ctrl, NULL, 0); 3277 return set_ctrl(NULL, ctrl, 0);
3270} 3278}
3271EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); 3279EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
3272 3280
@@ -3289,8 +3297,8 @@ EXPORT_SYMBOL(v4l2_ctrl_notify);
3289int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, 3297int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
3290 s64 min, s64 max, u64 step, s64 def) 3298 s64 min, s64 max, u64 step, s64 def)
3291{ 3299{
3300 bool changed;
3292 int ret; 3301 int ret;
3293 struct v4l2_ext_control c;
3294 3302
3295 lockdep_assert_held(ctrl->handler->lock); 3303 lockdep_assert_held(ctrl->handler->lock);
3296 3304
@@ -3317,11 +3325,20 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
3317 ctrl->maximum = max; 3325 ctrl->maximum = max;
3318 ctrl->step = step; 3326 ctrl->step = step;
3319 ctrl->default_value = def; 3327 ctrl->default_value = def;
3320 c.value = *ctrl->p_cur.p_s32; 3328 cur_to_new(ctrl);
3321 if (validate_new(ctrl, &c)) 3329 if (validate_new(ctrl, ctrl->p_new)) {
3322 c.value = def; 3330 if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
3323 if (c.value != *ctrl->p_cur.p_s32) 3331 *ctrl->p_new.p_s64 = def;
3324 ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE); 3332 else
3333 *ctrl->p_new.p_s32 = def;
3334 }
3335
3336 if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
3337 changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
3338 else
3339 changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
3340 if (changed)
3341 ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
3325 else 3342 else
3326 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); 3343 send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
3327 return ret; 3344 return ret;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 33617c365acc..9aa530a8bea9 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -194,7 +194,7 @@ static void v4l2_device_release(struct device *cd)
194 mutex_unlock(&videodev_lock); 194 mutex_unlock(&videodev_lock);
195 195
196#if defined(CONFIG_MEDIA_CONTROLLER) 196#if defined(CONFIG_MEDIA_CONTROLLER)
197 if (v4l2_dev && v4l2_dev->mdev && 197 if (v4l2_dev->mdev &&
198 vdev->vfl_type != VFL_TYPE_SUBDEV) 198 vdev->vfl_type != VFL_TYPE_SUBDEV)
199 media_device_unregister_entity(&vdev->entity); 199 media_device_unregister_entity(&vdev->entity);
200#endif 200#endif
@@ -207,7 +207,7 @@ static void v4l2_device_release(struct device *cd)
207 * TODO: In the long run all drivers that use v4l2_device should use the 207 * TODO: In the long run all drivers that use v4l2_device should use the
208 * v4l2_device release callback. This check will then be unnecessary. 208 * v4l2_device release callback. This check will then be unnecessary.
209 */ 209 */
210 if (v4l2_dev && v4l2_dev->release == NULL) 210 if (v4l2_dev->release == NULL)
211 v4l2_dev = NULL; 211 v4l2_dev = NULL;
212 212
213 /* Release video_device and perform other 213 /* Release video_device and perform other
@@ -360,27 +360,22 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
360 * hack but it will have to do for those drivers that are not 360 * hack but it will have to do for those drivers that are not
361 * yet converted to use unlocked_ioctl. 361 * yet converted to use unlocked_ioctl.
362 * 362 *
363 * There are two options: if the driver implements struct 363 * All drivers implement struct v4l2_device, so we use the
364 * v4l2_device, then the lock defined there is used to 364 * lock defined there to serialize the ioctls.
365 * serialize the ioctls. Otherwise the v4l2 core lock defined
366 * below is used. This lock is really bad since it serializes
367 * completely independent devices.
368 * 365 *
369 * Both variants suffer from the same problem: if the driver 366 * However, if the driver sleeps, then it blocks all ioctls
370 * sleeps, then it blocks all ioctls since the lock is still 367 * since the lock is still held. This is very common for
371 * held. This is very common for VIDIOC_DQBUF since that 368 * VIDIOC_DQBUF since that normally waits for a frame to arrive.
372 * normally waits for a frame to arrive. As a result any other 369 * As a result any other ioctl calls will proceed very, very
373 * ioctl calls will proceed very, very slowly since each call 370 * slowly since each call will have to wait for the VIDIOC_QBUF
374 * will have to wait for the VIDIOC_QBUF to finish. Things that 371 * to finish. Things that should take 0.01s may now take 10-20
375 * should take 0.01s may now take 10-20 seconds. 372 * seconds.
376 * 373 *
377 * The workaround is to *not* take the lock for VIDIOC_DQBUF. 374 * The workaround is to *not* take the lock for VIDIOC_DQBUF.
378 * This actually works OK for videobuf-based drivers, since 375 * This actually works OK for videobuf-based drivers, since
379 * videobuf will take its own internal lock. 376 * videobuf will take its own internal lock.
380 */ 377 */
381 static DEFINE_MUTEX(v4l2_ioctl_mutex); 378 struct mutex *m = &vdev->v4l2_dev->ioctl_lock;
382 struct mutex *m = vdev->v4l2_dev ?
383 &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex;
384 379
385 if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) 380 if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m))
386 return -ERESTARTSYS; 381 return -ERESTARTSYS;
@@ -938,12 +933,11 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
938 name_base, nr, video_device_node_name(vdev)); 933 name_base, nr, video_device_node_name(vdev));
939 934
940 /* Increase v4l2_device refcount */ 935 /* Increase v4l2_device refcount */
941 if (vdev->v4l2_dev) 936 v4l2_device_get(vdev->v4l2_dev);
942 v4l2_device_get(vdev->v4l2_dev);
943 937
944#if defined(CONFIG_MEDIA_CONTROLLER) 938#if defined(CONFIG_MEDIA_CONTROLLER)
945 /* Part 5: Register the entity. */ 939 /* Part 5: Register the entity. */
946 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 940 if (vdev->v4l2_dev->mdev &&
947 vdev->vfl_type != VFL_TYPE_SUBDEV) { 941 vdev->vfl_type != VFL_TYPE_SUBDEV) {
948 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; 942 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
949 vdev->entity.name = vdev->name; 943 vdev->entity.name = vdev->name;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 9ccb19a435ef..75658717961f 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -257,7 +257,7 @@ static void v4l_print_format(const void *arg, bool write_only)
257 pr_cont(", width=%u, height=%u, " 257 pr_cont(", width=%u, height=%u, "
258 "pixelformat=%c%c%c%c, field=%s, " 258 "pixelformat=%c%c%c%c, field=%s, "
259 "bytesperline=%u, sizeimage=%u, colorspace=%d, " 259 "bytesperline=%u, sizeimage=%u, colorspace=%d, "
260 "flags %u\n", 260 "flags %x, ycbcr_enc=%u, quantization=%u\n",
261 pix->width, pix->height, 261 pix->width, pix->height,
262 (pix->pixelformat & 0xff), 262 (pix->pixelformat & 0xff),
263 (pix->pixelformat >> 8) & 0xff, 263 (pix->pixelformat >> 8) & 0xff,
@@ -265,21 +265,24 @@ static void v4l_print_format(const void *arg, bool write_only)
265 (pix->pixelformat >> 24) & 0xff, 265 (pix->pixelformat >> 24) & 0xff,
266 prt_names(pix->field, v4l2_field_names), 266 prt_names(pix->field, v4l2_field_names),
267 pix->bytesperline, pix->sizeimage, 267 pix->bytesperline, pix->sizeimage,
268 pix->colorspace, pix->flags); 268 pix->colorspace, pix->flags, pix->ycbcr_enc,
269 pix->quantization);
269 break; 270 break;
270 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: 271 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
271 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: 272 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
272 mp = &p->fmt.pix_mp; 273 mp = &p->fmt.pix_mp;
273 pr_cont(", width=%u, height=%u, " 274 pr_cont(", width=%u, height=%u, "
274 "format=%c%c%c%c, field=%s, " 275 "format=%c%c%c%c, field=%s, "
275 "colorspace=%d, num_planes=%u\n", 276 "colorspace=%d, num_planes=%u, flags=%x, "
277 "ycbcr_enc=%u, quantization=%u\n",
276 mp->width, mp->height, 278 mp->width, mp->height,
277 (mp->pixelformat & 0xff), 279 (mp->pixelformat & 0xff),
278 (mp->pixelformat >> 8) & 0xff, 280 (mp->pixelformat >> 8) & 0xff,
279 (mp->pixelformat >> 16) & 0xff, 281 (mp->pixelformat >> 16) & 0xff,
280 (mp->pixelformat >> 24) & 0xff, 282 (mp->pixelformat >> 24) & 0xff,
281 prt_names(mp->field, v4l2_field_names), 283 prt_names(mp->field, v4l2_field_names),
282 mp->colorspace, mp->num_planes); 284 mp->colorspace, mp->num_planes, mp->flags,
285 mp->ycbcr_enc, mp->quantization);
283 for (i = 0; i < mp->num_planes; i++) 286 for (i = 0; i < mp->num_planes; i++)
284 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, 287 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
285 mp->plane_fmt[i].bytesperline, 288 mp->plane_fmt[i].bytesperline,
@@ -1040,7 +1043,7 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
1040 if (ops->vidioc_g_priority) 1043 if (ops->vidioc_g_priority)
1041 return ops->vidioc_g_priority(file, fh, arg); 1044 return ops->vidioc_g_priority(file, fh, arg);
1042 vfd = video_devdata(file); 1045 vfd = video_devdata(file);
1043 *p = v4l2_prio_max(&vfd->v4l2_dev->prio); 1046 *p = v4l2_prio_max(vfd->prio);
1044 return 0; 1047 return 0;
1045} 1048}
1046 1049
@@ -1055,7 +1058,7 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
1055 return ops->vidioc_s_priority(file, fh, *p); 1058 return ops->vidioc_s_priority(file, fh, *p);
1056 vfd = video_devdata(file); 1059 vfd = video_devdata(file);
1057 vfh = file->private_data; 1060 vfh = file->private_data;
1058 return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p); 1061 return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
1059} 1062}
1060 1063
1061static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, 1064static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index b91a266d0b7e..926836d1813a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -51,6 +51,8 @@ MODULE_LICENSE("GPL");
51 51
52#define CALL(q, f, arg...) \ 52#define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54#define CALLPTR(q, f, arg...) \
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
54 56
55struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) 57struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
56{ 58{
@@ -831,7 +833,7 @@ static int __videobuf_copy_to_user(struct videobuf_queue *q,
831 char __user *data, size_t count, 833 char __user *data, size_t count,
832 int nonblocking) 834 int nonblocking)
833{ 835{
834 void *vaddr = CALL(q, vaddr, buf); 836 void *vaddr = CALLPTR(q, vaddr, buf);
835 837
836 /* copy to userspace */ 838 /* copy to userspace */
837 if (count > buf->size - q->read_off) 839 if (count > buf->size - q->read_off)
@@ -848,7 +850,7 @@ static int __videobuf_copy_stream(struct videobuf_queue *q,
848 char __user *data, size_t count, size_t pos, 850 char __user *data, size_t count, size_t pos,
849 int vbihack, int nonblocking) 851 int vbihack, int nonblocking)
850{ 852{
851 unsigned int *fc = CALL(q, vaddr, buf); 853 unsigned int *fc = CALLPTR(q, vaddr, buf);
852 854
853 if (vbihack) { 855 if (vbihack) {
854 /* dirty, undocumented hack -- pass the frame counter 856 /* dirty, undocumented hack -- pass the frame counter
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index f2e43de3dd87..d09a8916e940 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -189,6 +189,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q);
189static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 189static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
190{ 190{
191 struct vb2_queue *q = vb->vb2_queue; 191 struct vb2_queue *q = vb->vb2_queue;
192 enum dma_data_direction dma_dir =
193 V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
192 void *mem_priv; 194 void *mem_priv;
193 int plane; 195 int plane;
194 196
@@ -200,7 +202,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
200 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); 202 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
201 203
202 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane], 204 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
203 size, q->gfp_flags); 205 size, dma_dir, q->gfp_flags);
204 if (IS_ERR_OR_NULL(mem_priv)) 206 if (IS_ERR_OR_NULL(mem_priv))
205 goto free; 207 goto free;
206 208
@@ -1358,7 +1360,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1358 void *mem_priv; 1360 void *mem_priv;
1359 unsigned int plane; 1361 unsigned int plane;
1360 int ret; 1362 int ret;
1361 int write = !V4L2_TYPE_IS_OUTPUT(q->type); 1363 enum dma_data_direction dma_dir =
1364 V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1362 bool reacquired = vb->planes[0].mem_priv == NULL; 1365 bool reacquired = vb->planes[0].mem_priv == NULL;
1363 1366
1364 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1367 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
@@ -1400,7 +1403,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1400 /* Acquire each plane's memory */ 1403 /* Acquire each plane's memory */
1401 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], 1404 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
1402 planes[plane].m.userptr, 1405 planes[plane].m.userptr,
1403 planes[plane].length, write); 1406 planes[plane].length, dma_dir);
1404 if (IS_ERR_OR_NULL(mem_priv)) { 1407 if (IS_ERR_OR_NULL(mem_priv)) {
1405 dprintk(1, "failed acquiring userspace " 1408 dprintk(1, "failed acquiring userspace "
1406 "memory for plane %d\n", plane); 1409 "memory for plane %d\n", plane);
@@ -1461,7 +1464,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1461 void *mem_priv; 1464 void *mem_priv;
1462 unsigned int plane; 1465 unsigned int plane;
1463 int ret; 1466 int ret;
1464 int write = !V4L2_TYPE_IS_OUTPUT(q->type); 1467 enum dma_data_direction dma_dir =
1468 V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1465 bool reacquired = vb->planes[0].mem_priv == NULL; 1469 bool reacquired = vb->planes[0].mem_priv == NULL;
1466 1470
1467 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1471 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
@@ -1509,7 +1513,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1509 1513
1510 /* Acquire each plane's memory */ 1514 /* Acquire each plane's memory */
1511 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], 1515 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
1512 dbuf, planes[plane].length, write); 1516 dbuf, planes[plane].length, dma_dir);
1513 if (IS_ERR(mem_priv)) { 1517 if (IS_ERR(mem_priv)) {
1514 dprintk(1, "failed to attach dmabuf\n"); 1518 dprintk(1, "failed to attach dmabuf\n");
1515 ret = PTR_ERR(mem_priv); 1519 ret = PTR_ERR(mem_priv);
@@ -3385,14 +3389,14 @@ int _vb2_fop_release(struct file *file, struct mutex *lock)
3385{ 3389{
3386 struct video_device *vdev = video_devdata(file); 3390 struct video_device *vdev = video_devdata(file);
3387 3391
3392 if (lock)
3393 mutex_lock(lock);
3388 if (file->private_data == vdev->queue->owner) { 3394 if (file->private_data == vdev->queue->owner) {
3389 if (lock)
3390 mutex_lock(lock);
3391 vb2_queue_release(vdev->queue); 3395 vb2_queue_release(vdev->queue);
3392 vdev->queue->owner = NULL; 3396 vdev->queue->owner = NULL;
3393 if (lock)
3394 mutex_unlock(lock);
3395 } 3397 }
3398 if (lock)
3399 mutex_unlock(lock);
3396 return v4l2_fh_release(file); 3400 return v4l2_fh_release(file);
3397} 3401}
3398EXPORT_SYMBOL_GPL(_vb2_fop_release); 3402EXPORT_SYMBOL_GPL(_vb2_fop_release);
@@ -3455,27 +3459,16 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3455 struct video_device *vdev = video_devdata(file); 3459 struct video_device *vdev = video_devdata(file);
3456 struct vb2_queue *q = vdev->queue; 3460 struct vb2_queue *q = vdev->queue;
3457 struct mutex *lock = q->lock ? q->lock : vdev->lock; 3461 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3458 unsigned long req_events = poll_requested_events(wait);
3459 unsigned res; 3462 unsigned res;
3460 void *fileio; 3463 void *fileio;
3461 bool must_lock = false;
3462
3463 /* Try to be smart: only lock if polling might start fileio,
3464 otherwise locking will only introduce unwanted delays. */
3465 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
3466 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3467 (req_events & (POLLIN | POLLRDNORM)))
3468 must_lock = true;
3469 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3470 (req_events & (POLLOUT | POLLWRNORM)))
3471 must_lock = true;
3472 }
3473 3464
3474 /* If locking is needed, but this helper doesn't know how, then you 3465 /*
3475 shouldn't be using this helper but you should write your own. */ 3466 * If this helper doesn't know how to lock, then you shouldn't be using
3476 WARN_ON(must_lock && !lock); 3467 * it but you should write your own.
3468 */
3469 WARN_ON(!lock);
3477 3470
3478 if (must_lock && lock && mutex_lock_interruptible(lock)) 3471 if (lock && mutex_lock_interruptible(lock))
3479 return POLLERR; 3472 return POLLERR;
3480 3473
3481 fileio = q->fileio; 3474 fileio = q->fileio;
@@ -3483,9 +3476,9 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3483 res = vb2_poll(vdev->queue, file, wait); 3476 res = vb2_poll(vdev->queue, file, wait);
3484 3477
3485 /* If fileio was started, then we have a new queue owner. */ 3478 /* If fileio was started, then we have a new queue owner. */
3486 if (must_lock && !fileio && q->fileio) 3479 if (!fileio && q->fileio)
3487 q->owner = file->private_data; 3480 q->owner = file->private_data;
3488 if (must_lock && lock) 3481 if (lock)
3489 mutex_unlock(lock); 3482 mutex_unlock(lock);
3490 return res; 3483 return res;
3491} 3484}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4a02ade14b4f..b481d20c8372 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -155,7 +155,8 @@ static void vb2_dc_put(void *buf_priv)
155 kfree(buf); 155 kfree(buf);
156} 156}
157 157
158static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) 158static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
159 enum dma_data_direction dma_dir, gfp_t gfp_flags)
159{ 160{
160 struct vb2_dc_conf *conf = alloc_ctx; 161 struct vb2_dc_conf *conf = alloc_ctx;
161 struct device *dev = conf->dev; 162 struct device *dev = conf->dev;
@@ -176,6 +177,7 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
176 /* Prevent the device from being released while the buffer is used */ 177 /* Prevent the device from being released while the buffer is used */
177 buf->dev = get_device(dev); 178 buf->dev = get_device(dev);
178 buf->size = size; 179 buf->size = size;
180 buf->dma_dir = dma_dir;
179 181
180 buf->handler.refcount = &buf->refcount; 182 buf->handler.refcount = &buf->refcount;
181 buf->handler.put = vb2_dc_put; 183 buf->handler.put = vb2_dc_put;
@@ -229,7 +231,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
229 231
230struct vb2_dc_attachment { 232struct vb2_dc_attachment {
231 struct sg_table sgt; 233 struct sg_table sgt;
232 enum dma_data_direction dir; 234 enum dma_data_direction dma_dir;
233}; 235};
234 236
235static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, 237static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
@@ -264,7 +266,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
264 wr = sg_next(wr); 266 wr = sg_next(wr);
265 } 267 }
266 268
267 attach->dir = DMA_NONE; 269 attach->dma_dir = DMA_NONE;
268 dbuf_attach->priv = attach; 270 dbuf_attach->priv = attach;
269 271
270 return 0; 272 return 0;
@@ -282,16 +284,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
282 sgt = &attach->sgt; 284 sgt = &attach->sgt;
283 285
284 /* release the scatterlist cache */ 286 /* release the scatterlist cache */
285 if (attach->dir != DMA_NONE) 287 if (attach->dma_dir != DMA_NONE)
286 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
287 attach->dir); 289 attach->dma_dir);
288 sg_free_table(sgt); 290 sg_free_table(sgt);
289 kfree(attach); 291 kfree(attach);
290 db_attach->priv = NULL; 292 db_attach->priv = NULL;
291} 293}
292 294
293static struct sg_table *vb2_dc_dmabuf_ops_map( 295static struct sg_table *vb2_dc_dmabuf_ops_map(
294 struct dma_buf_attachment *db_attach, enum dma_data_direction dir) 296 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
295{ 297{
296 struct vb2_dc_attachment *attach = db_attach->priv; 298 struct vb2_dc_attachment *attach = db_attach->priv;
297 /* stealing dmabuf mutex to serialize map/unmap operations */ 299 /* stealing dmabuf mutex to serialize map/unmap operations */
@@ -303,27 +305,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
303 305
304 sgt = &attach->sgt; 306 sgt = &attach->sgt;
305 /* return previously mapped sg table */ 307 /* return previously mapped sg table */
306 if (attach->dir == dir) { 308 if (attach->dma_dir == dma_dir) {
307 mutex_unlock(lock); 309 mutex_unlock(lock);
308 return sgt; 310 return sgt;
309 } 311 }
310 312
311 /* release any previous cache */ 313 /* release any previous cache */
312 if (attach->dir != DMA_NONE) { 314 if (attach->dma_dir != DMA_NONE) {
313 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 315 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
314 attach->dir); 316 attach->dma_dir);
315 attach->dir = DMA_NONE; 317 attach->dma_dir = DMA_NONE;
316 } 318 }
317 319
318 /* mapping to the client with new direction */ 320 /* mapping to the client with new direction */
319 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); 321 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
320 if (ret <= 0) { 322 if (ret <= 0) {
321 pr_err("failed to map scatterlist\n"); 323 pr_err("failed to map scatterlist\n");
322 mutex_unlock(lock); 324 mutex_unlock(lock);
323 return ERR_PTR(-EIO); 325 return ERR_PTR(-EIO);
324 } 326 }
325 327
326 attach->dir = dir; 328 attach->dma_dir = dma_dir;
327 329
328 mutex_unlock(lock); 330 mutex_unlock(lock);
329 331
@@ -331,7 +333,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
331} 333}
332 334
333static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, 335static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
334 struct sg_table *sgt, enum dma_data_direction dir) 336 struct sg_table *sgt, enum dma_data_direction dma_dir)
335{ 337{
336 /* nothing to be done here */ 338 /* nothing to be done here */
337} 339}
@@ -460,7 +462,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
460} 462}
461 463
462static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 464static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
463 int n_pages, struct vm_area_struct *vma, int write) 465 int n_pages, struct vm_area_struct *vma,
466 enum dma_data_direction dma_dir)
464{ 467{
465 if (vma_is_io(vma)) { 468 if (vma_is_io(vma)) {
466 unsigned int i; 469 unsigned int i;
@@ -482,7 +485,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
482 int n; 485 int n;
483 486
484 n = get_user_pages(current, current->mm, start & PAGE_MASK, 487 n = get_user_pages(current, current->mm, start & PAGE_MASK,
485 n_pages, write, 1, pages, NULL); 488 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
486 /* negative error means that no page was pinned */ 489 /* negative error means that no page was pinned */
487 n = max(n, 0); 490 n = max(n, 0);
488 if (n != n_pages) { 491 if (n != n_pages) {
@@ -508,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
508 struct sg_table *sgt = buf->dma_sgt; 511 struct sg_table *sgt = buf->dma_sgt;
509 512
510 if (sgt) { 513 if (sgt) {
511 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 514 DEFINE_DMA_ATTRS(attrs);
515
516 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
517 /*
518 * No need to sync to CPU, it's already synced to the CPU
519 * since the finish() memop will have been called before this.
520 */
521 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
522 buf->dma_dir, &attrs);
512 if (!vma_is_io(buf->vma)) 523 if (!vma_is_io(buf->vma))
513 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 524 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
514 525
@@ -551,7 +562,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn
551#endif 562#endif
552 563
553static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 564static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
554 unsigned long size, int write) 565 unsigned long size, enum dma_data_direction dma_dir)
555{ 566{
556 struct vb2_dc_conf *conf = alloc_ctx; 567 struct vb2_dc_conf *conf = alloc_ctx;
557 struct vb2_dc_buf *buf; 568 struct vb2_dc_buf *buf;
@@ -565,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
565 struct sg_table *sgt; 576 struct sg_table *sgt;
566 unsigned long contig_size; 577 unsigned long contig_size;
567 unsigned long dma_align = dma_get_cache_alignment(); 578 unsigned long dma_align = dma_get_cache_alignment();
579 DEFINE_DMA_ATTRS(attrs);
580
581 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
568 582
569 /* Only cache aligned DMA transfers are reliable */ 583 /* Only cache aligned DMA transfers are reliable */
570 if (!IS_ALIGNED(vaddr | size, dma_align)) { 584 if (!IS_ALIGNED(vaddr | size, dma_align)) {
@@ -582,7 +596,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
582 return ERR_PTR(-ENOMEM); 596 return ERR_PTR(-ENOMEM);
583 597
584 buf->dev = conf->dev; 598 buf->dev = conf->dev;
585 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 599 buf->dma_dir = dma_dir;
586 600
587 start = vaddr & PAGE_MASK; 601 start = vaddr & PAGE_MASK;
588 offset = vaddr & ~PAGE_MASK; 602 offset = vaddr & ~PAGE_MASK;
@@ -618,7 +632,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
618 } 632 }
619 633
620 /* extract page list from userspace mapping */ 634 /* extract page list from userspace mapping */
621 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); 635 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
636 dma_dir == DMA_FROM_DEVICE);
622 if (ret) { 637 if (ret) {
623 unsigned long pfn; 638 unsigned long pfn;
624 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { 639 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
@@ -650,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
650 kfree(pages); 665 kfree(pages);
651 pages = NULL; 666 pages = NULL;
652 667
653 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, 668 /*
654 buf->dma_dir); 669 * No need to sync to the device, this will happen later when the
670 * prepare() memop is called.
671 */
672 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
673 buf->dma_dir, &attrs);
655 if (sgt->nents <= 0) { 674 if (sgt->nents <= 0) {
656 pr_err("failed to map scatterlist\n"); 675 pr_err("failed to map scatterlist\n");
657 ret = -EIO; 676 ret = -EIO;
@@ -673,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
673 return buf; 692 return buf;
674 693
675fail_map_sg: 694fail_map_sg:
676 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 695 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
696 buf->dma_dir, &attrs);
677 697
678fail_sgt_init: 698fail_sgt_init:
679 if (!vma_is_io(buf->vma)) 699 if (!vma_is_io(buf->vma))
@@ -782,7 +802,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
782} 802}
783 803
784static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, 804static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
785 unsigned long size, int write) 805 unsigned long size, enum dma_data_direction dma_dir)
786{ 806{
787 struct vb2_dc_conf *conf = alloc_ctx; 807 struct vb2_dc_conf *conf = alloc_ctx;
788 struct vb2_dc_buf *buf; 808 struct vb2_dc_buf *buf;
@@ -804,7 +824,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
804 return dba; 824 return dba;
805 } 825 }
806 826
807 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 827 buf->dma_dir = dma_dir;
808 buf->size = size; 828 buf->size = size;
809 buf->db_attach = dba; 829 buf->db_attach = dba;
810 830
@@ -850,7 +870,8 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
850 870
851void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) 871void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
852{ 872{
853 kfree(alloc_ctx); 873 if (!IS_ERR_OR_NULL(alloc_ctx))
874 kfree(alloc_ctx);
854} 875}
855EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx); 876EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
856 877
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 9b163a440f89..b1838abb6d00 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -30,17 +30,30 @@ module_param(debug, int, 0644);
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ 30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0) 31 } while (0)
32 32
33struct vb2_dma_sg_conf {
34 struct device *dev;
35};
36
33struct vb2_dma_sg_buf { 37struct vb2_dma_sg_buf {
38 struct device *dev;
34 void *vaddr; 39 void *vaddr;
35 struct page **pages; 40 struct page **pages;
36 int write;
37 int offset; 41 int offset;
42 enum dma_data_direction dma_dir;
38 struct sg_table sg_table; 43 struct sg_table sg_table;
44 /*
45 * This will point to sg_table when used with the MMAP or USERPTR
46 * memory model, and to the dma_buf sglist when used with the
47 * DMABUF memory model.
48 */
49 struct sg_table *dma_sgt;
39 size_t size; 50 size_t size;
40 unsigned int num_pages; 51 unsigned int num_pages;
41 atomic_t refcount; 52 atomic_t refcount;
42 struct vb2_vmarea_handler handler; 53 struct vb2_vmarea_handler handler;
43 struct vm_area_struct *vma; 54 struct vm_area_struct *vma;
55
56 struct dma_buf_attachment *db_attach;
44}; 57};
45 58
46static void vb2_dma_sg_put(void *buf_priv); 59static void vb2_dma_sg_put(void *buf_priv);
@@ -86,22 +99,31 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
86 return 0; 99 return 0;
87} 100}
88 101
89static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) 102static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
103 enum dma_data_direction dma_dir, gfp_t gfp_flags)
90{ 104{
105 struct vb2_dma_sg_conf *conf = alloc_ctx;
91 struct vb2_dma_sg_buf *buf; 106 struct vb2_dma_sg_buf *buf;
107 struct sg_table *sgt;
92 int ret; 108 int ret;
93 int num_pages; 109 int num_pages;
110 DEFINE_DMA_ATTRS(attrs);
94 111
112 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
113
114 if (WARN_ON(alloc_ctx == NULL))
115 return NULL;
95 buf = kzalloc(sizeof *buf, GFP_KERNEL); 116 buf = kzalloc(sizeof *buf, GFP_KERNEL);
96 if (!buf) 117 if (!buf)
97 return NULL; 118 return NULL;
98 119
99 buf->vaddr = NULL; 120 buf->vaddr = NULL;
100 buf->write = 0; 121 buf->dma_dir = dma_dir;
101 buf->offset = 0; 122 buf->offset = 0;
102 buf->size = size; 123 buf->size = size;
103 /* size is already page aligned */ 124 /* size is already page aligned */
104 buf->num_pages = size >> PAGE_SHIFT; 125 buf->num_pages = size >> PAGE_SHIFT;
126 buf->dma_sgt = &buf->sg_table;
105 127
106 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), 128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
107 GFP_KERNEL); 129 GFP_KERNEL);
@@ -112,11 +134,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
112 if (ret) 134 if (ret)
113 goto fail_pages_alloc; 135 goto fail_pages_alloc;
114 136
115 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, 137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
116 buf->num_pages, 0, size, GFP_KERNEL); 138 buf->num_pages, 0, size, GFP_KERNEL);
117 if (ret) 139 if (ret)
118 goto fail_table_alloc; 140 goto fail_table_alloc;
119 141
142 /* Prevent the device from being released while the buffer is used */
143 buf->dev = get_device(conf->dev);
144
145 sgt = &buf->sg_table;
146 /*
147 * No need to sync to the device, this will happen later when the
148 * prepare() memop is called.
149 */
150 if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
151 buf->dma_dir, &attrs) == 0)
152 goto fail_map;
153
120 buf->handler.refcount = &buf->refcount; 154 buf->handler.refcount = &buf->refcount;
121 buf->handler.put = vb2_dma_sg_put; 155 buf->handler.put = vb2_dma_sg_put;
122 buf->handler.arg = buf; 156 buf->handler.arg = buf;
@@ -127,6 +161,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
127 __func__, buf->num_pages); 161 __func__, buf->num_pages);
128 return buf; 162 return buf;
129 163
164fail_map:
165 put_device(buf->dev);
166 sg_free_table(buf->dma_sgt);
130fail_table_alloc: 167fail_table_alloc:
131 num_pages = buf->num_pages; 168 num_pages = buf->num_pages;
132 while (num_pages--) 169 while (num_pages--)
@@ -141,42 +178,81 @@ fail_pages_array_alloc:
141static void vb2_dma_sg_put(void *buf_priv) 178static void vb2_dma_sg_put(void *buf_priv)
142{ 179{
143 struct vb2_dma_sg_buf *buf = buf_priv; 180 struct vb2_dma_sg_buf *buf = buf_priv;
181 struct sg_table *sgt = &buf->sg_table;
144 int i = buf->num_pages; 182 int i = buf->num_pages;
145 183
146 if (atomic_dec_and_test(&buf->refcount)) { 184 if (atomic_dec_and_test(&buf->refcount)) {
185 DEFINE_DMA_ATTRS(attrs);
186
187 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
147 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 188 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
148 buf->num_pages); 189 buf->num_pages);
190 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
191 buf->dma_dir, &attrs);
149 if (buf->vaddr) 192 if (buf->vaddr)
150 vm_unmap_ram(buf->vaddr, buf->num_pages); 193 vm_unmap_ram(buf->vaddr, buf->num_pages);
151 sg_free_table(&buf->sg_table); 194 sg_free_table(buf->dma_sgt);
152 while (--i >= 0) 195 while (--i >= 0)
153 __free_page(buf->pages[i]); 196 __free_page(buf->pages[i]);
154 kfree(buf->pages); 197 kfree(buf->pages);
198 put_device(buf->dev);
155 kfree(buf); 199 kfree(buf);
156 } 200 }
157} 201}
158 202
203static void vb2_dma_sg_prepare(void *buf_priv)
204{
205 struct vb2_dma_sg_buf *buf = buf_priv;
206 struct sg_table *sgt = buf->dma_sgt;
207
208 /* DMABUF exporter will flush the cache for us */
209 if (buf->db_attach)
210 return;
211
212 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
213}
214
215static void vb2_dma_sg_finish(void *buf_priv)
216{
217 struct vb2_dma_sg_buf *buf = buf_priv;
218 struct sg_table *sgt = buf->dma_sgt;
219
220 /* DMABUF exporter will flush the cache for us */
221 if (buf->db_attach)
222 return;
223
224 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
225}
226
159static inline int vma_is_io(struct vm_area_struct *vma) 227static inline int vma_is_io(struct vm_area_struct *vma)
160{ 228{
161 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 229 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
162} 230}
163 231
164static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, 232static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
165 unsigned long size, int write) 233 unsigned long size,
234 enum dma_data_direction dma_dir)
166{ 235{
236 struct vb2_dma_sg_conf *conf = alloc_ctx;
167 struct vb2_dma_sg_buf *buf; 237 struct vb2_dma_sg_buf *buf;
168 unsigned long first, last; 238 unsigned long first, last;
169 int num_pages_from_user; 239 int num_pages_from_user;
170 struct vm_area_struct *vma; 240 struct vm_area_struct *vma;
241 struct sg_table *sgt;
242 DEFINE_DMA_ATTRS(attrs);
243
244 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
171 245
172 buf = kzalloc(sizeof *buf, GFP_KERNEL); 246 buf = kzalloc(sizeof *buf, GFP_KERNEL);
173 if (!buf) 247 if (!buf)
174 return NULL; 248 return NULL;
175 249
176 buf->vaddr = NULL; 250 buf->vaddr = NULL;
177 buf->write = write; 251 buf->dev = conf->dev;
252 buf->dma_dir = dma_dir;
178 buf->offset = vaddr & ~PAGE_MASK; 253 buf->offset = vaddr & ~PAGE_MASK;
179 buf->size = size; 254 buf->size = size;
255 buf->dma_sgt = &buf->sg_table;
180 256
181 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; 257 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
182 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; 258 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
@@ -221,7 +297,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
221 num_pages_from_user = get_user_pages(current, current->mm, 297 num_pages_from_user = get_user_pages(current, current->mm,
222 vaddr & PAGE_MASK, 298 vaddr & PAGE_MASK,
223 buf->num_pages, 299 buf->num_pages,
224 write, 300 buf->dma_dir == DMA_FROM_DEVICE,
225 1, /* force */ 301 1, /* force */
226 buf->pages, 302 buf->pages,
227 NULL); 303 NULL);
@@ -229,12 +305,22 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
229 if (num_pages_from_user != buf->num_pages) 305 if (num_pages_from_user != buf->num_pages)
230 goto userptr_fail_get_user_pages; 306 goto userptr_fail_get_user_pages;
231 307
232 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, 308 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
233 buf->num_pages, buf->offset, size, 0)) 309 buf->num_pages, buf->offset, size, 0))
234 goto userptr_fail_alloc_table_from_pages; 310 goto userptr_fail_alloc_table_from_pages;
235 311
312 sgt = &buf->sg_table;
313 /*
314 * No need to sync to the device, this will happen later when the
315 * prepare() memop is called.
316 */
317 if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
318 buf->dma_dir, &attrs) == 0)
319 goto userptr_fail_map;
236 return buf; 320 return buf;
237 321
322userptr_fail_map:
323 sg_free_table(&buf->sg_table);
238userptr_fail_alloc_table_from_pages: 324userptr_fail_alloc_table_from_pages:
239userptr_fail_get_user_pages: 325userptr_fail_get_user_pages:
240 dprintk(1, "get_user_pages requested/got: %d/%d]\n", 326 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
@@ -257,15 +343,20 @@ userptr_fail_alloc_pages:
257static void vb2_dma_sg_put_userptr(void *buf_priv) 343static void vb2_dma_sg_put_userptr(void *buf_priv)
258{ 344{
259 struct vb2_dma_sg_buf *buf = buf_priv; 345 struct vb2_dma_sg_buf *buf = buf_priv;
346 struct sg_table *sgt = &buf->sg_table;
260 int i = buf->num_pages; 347 int i = buf->num_pages;
348 DEFINE_DMA_ATTRS(attrs);
349
350 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
261 351
262 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 352 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
263 __func__, buf->num_pages); 353 __func__, buf->num_pages);
354 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
264 if (buf->vaddr) 355 if (buf->vaddr)
265 vm_unmap_ram(buf->vaddr, buf->num_pages); 356 vm_unmap_ram(buf->vaddr, buf->num_pages);
266 sg_free_table(&buf->sg_table); 357 sg_free_table(buf->dma_sgt);
267 while (--i >= 0) { 358 while (--i >= 0) {
268 if (buf->write) 359 if (buf->dma_dir == DMA_FROM_DEVICE)
269 set_page_dirty_lock(buf->pages[i]); 360 set_page_dirty_lock(buf->pages[i]);
270 if (!vma_is_io(buf->vma)) 361 if (!vma_is_io(buf->vma))
271 put_page(buf->pages[i]); 362 put_page(buf->pages[i]);
@@ -281,14 +372,16 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
281 372
282 BUG_ON(!buf); 373 BUG_ON(!buf);
283 374
284 if (!buf->vaddr) 375 if (!buf->vaddr) {
285 buf->vaddr = vm_map_ram(buf->pages, 376 if (buf->db_attach)
286 buf->num_pages, 377 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
287 -1, 378 else
288 PAGE_KERNEL); 379 buf->vaddr = vm_map_ram(buf->pages,
380 buf->num_pages, -1, PAGE_KERNEL);
381 }
289 382
290 /* add offset in case userptr is not page-aligned */ 383 /* add offset in case userptr is not page-aligned */
291 return buf->vaddr + buf->offset; 384 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
292} 385}
293 386
294static unsigned int vb2_dma_sg_num_users(void *buf_priv) 387static unsigned int vb2_dma_sg_num_users(void *buf_priv)
@@ -335,11 +428,279 @@ static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
335 return 0; 428 return 0;
336} 429}
337 430
431/*********************************************/
432/* DMABUF ops for exporters */
433/*********************************************/
434
435struct vb2_dma_sg_attachment {
436 struct sg_table sgt;
437 enum dma_data_direction dma_dir;
438};
439
440static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
441 struct dma_buf_attachment *dbuf_attach)
442{
443 struct vb2_dma_sg_attachment *attach;
444 unsigned int i;
445 struct scatterlist *rd, *wr;
446 struct sg_table *sgt;
447 struct vb2_dma_sg_buf *buf = dbuf->priv;
448 int ret;
449
450 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
451 if (!attach)
452 return -ENOMEM;
453
454 sgt = &attach->sgt;
455 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
456 * map the same scatter list to multiple attachments at the same time.
457 */
458 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
459 if (ret) {
460 kfree(attach);
461 return -ENOMEM;
462 }
463
464 rd = buf->dma_sgt->sgl;
465 wr = sgt->sgl;
466 for (i = 0; i < sgt->orig_nents; ++i) {
467 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
468 rd = sg_next(rd);
469 wr = sg_next(wr);
470 }
471
472 attach->dma_dir = DMA_NONE;
473 dbuf_attach->priv = attach;
474
475 return 0;
476}
477
478static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
479 struct dma_buf_attachment *db_attach)
480{
481 struct vb2_dma_sg_attachment *attach = db_attach->priv;
482 struct sg_table *sgt;
483
484 if (!attach)
485 return;
486
487 sgt = &attach->sgt;
488
489 /* release the scatterlist cache */
490 if (attach->dma_dir != DMA_NONE)
491 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
492 attach->dma_dir);
493 sg_free_table(sgt);
494 kfree(attach);
495 db_attach->priv = NULL;
496}
497
498static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
499 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
500{
501 struct vb2_dma_sg_attachment *attach = db_attach->priv;
502 /* stealing dmabuf mutex to serialize map/unmap operations */
503 struct mutex *lock = &db_attach->dmabuf->lock;
504 struct sg_table *sgt;
505 int ret;
506
507 mutex_lock(lock);
508
509 sgt = &attach->sgt;
510 /* return previously mapped sg table */
511 if (attach->dma_dir == dma_dir) {
512 mutex_unlock(lock);
513 return sgt;
514 }
515
516 /* release any previous cache */
517 if (attach->dma_dir != DMA_NONE) {
518 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
519 attach->dma_dir);
520 attach->dma_dir = DMA_NONE;
521 }
522
523 /* mapping to the client with new direction */
524 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
525 if (ret <= 0) {
526 pr_err("failed to map scatterlist\n");
527 mutex_unlock(lock);
528 return ERR_PTR(-EIO);
529 }
530
531 attach->dma_dir = dma_dir;
532
533 mutex_unlock(lock);
534
535 return sgt;
536}
537
538static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
539 struct sg_table *sgt, enum dma_data_direction dma_dir)
540{
541 /* nothing to be done here */
542}
543
544static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
545{
546 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
547 vb2_dma_sg_put(dbuf->priv);
548}
549
550static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
551{
552 struct vb2_dma_sg_buf *buf = dbuf->priv;
553
554 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
555}
556
557static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
558{
559 struct vb2_dma_sg_buf *buf = dbuf->priv;
560
561 return vb2_dma_sg_vaddr(buf);
562}
563
564static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
565 struct vm_area_struct *vma)
566{
567 return vb2_dma_sg_mmap(dbuf->priv, vma);
568}
569
570static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
571 .attach = vb2_dma_sg_dmabuf_ops_attach,
572 .detach = vb2_dma_sg_dmabuf_ops_detach,
573 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
574 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
575 .kmap = vb2_dma_sg_dmabuf_ops_kmap,
576 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
577 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
578 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
579 .release = vb2_dma_sg_dmabuf_ops_release,
580};
581
582static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
583{
584 struct vb2_dma_sg_buf *buf = buf_priv;
585 struct dma_buf *dbuf;
586
587 if (WARN_ON(!buf->dma_sgt))
588 return NULL;
589
590 dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL);
591 if (IS_ERR(dbuf))
592 return NULL;
593
594 /* dmabuf keeps reference to vb2 buffer */
595 atomic_inc(&buf->refcount);
596
597 return dbuf;
598}
599
600/*********************************************/
601/* callbacks for DMABUF buffers */
602/*********************************************/
603
604static int vb2_dma_sg_map_dmabuf(void *mem_priv)
605{
606 struct vb2_dma_sg_buf *buf = mem_priv;
607 struct sg_table *sgt;
608
609 if (WARN_ON(!buf->db_attach)) {
610 pr_err("trying to pin a non attached buffer\n");
611 return -EINVAL;
612 }
613
614 if (WARN_ON(buf->dma_sgt)) {
615 pr_err("dmabuf buffer is already pinned\n");
616 return 0;
617 }
618
619 /* get the associated scatterlist for this buffer */
620 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
621 if (IS_ERR(sgt)) {
622 pr_err("Error getting dmabuf scatterlist\n");
623 return -EINVAL;
624 }
625
626 buf->dma_sgt = sgt;
627 buf->vaddr = NULL;
628
629 return 0;
630}
631
632static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
633{
634 struct vb2_dma_sg_buf *buf = mem_priv;
635 struct sg_table *sgt = buf->dma_sgt;
636
637 if (WARN_ON(!buf->db_attach)) {
638 pr_err("trying to unpin a not attached buffer\n");
639 return;
640 }
641
642 if (WARN_ON(!sgt)) {
643 pr_err("dmabuf buffer is already unpinned\n");
644 return;
645 }
646
647 if (buf->vaddr) {
648 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
649 buf->vaddr = NULL;
650 }
651 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
652
653 buf->dma_sgt = NULL;
654}
655
656static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
657{
658 struct vb2_dma_sg_buf *buf = mem_priv;
659
660 /* if vb2 works correctly you should never detach mapped buffer */
661 if (WARN_ON(buf->dma_sgt))
662 vb2_dma_sg_unmap_dmabuf(buf);
663
664 /* detach this attachment */
665 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
666 kfree(buf);
667}
668
669static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
670 unsigned long size, enum dma_data_direction dma_dir)
671{
672 struct vb2_dma_sg_conf *conf = alloc_ctx;
673 struct vb2_dma_sg_buf *buf;
674 struct dma_buf_attachment *dba;
675
676 if (dbuf->size < size)
677 return ERR_PTR(-EFAULT);
678
679 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
680 if (!buf)
681 return ERR_PTR(-ENOMEM);
682
683 buf->dev = conf->dev;
684 /* create attachment for the dmabuf with the user device */
685 dba = dma_buf_attach(dbuf, buf->dev);
686 if (IS_ERR(dba)) {
687 pr_err("failed to attach dmabuf\n");
688 kfree(buf);
689 return dba;
690 }
691
692 buf->dma_dir = dma_dir;
693 buf->size = size;
694 buf->db_attach = dba;
695
696 return buf;
697}
698
338static void *vb2_dma_sg_cookie(void *buf_priv) 699static void *vb2_dma_sg_cookie(void *buf_priv)
339{ 700{
340 struct vb2_dma_sg_buf *buf = buf_priv; 701 struct vb2_dma_sg_buf *buf = buf_priv;
341 702
342 return &buf->sg_table; 703 return buf->dma_sgt;
343} 704}
344 705
345const struct vb2_mem_ops vb2_dma_sg_memops = { 706const struct vb2_mem_ops vb2_dma_sg_memops = {
@@ -347,13 +708,41 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
347 .put = vb2_dma_sg_put, 708 .put = vb2_dma_sg_put,
348 .get_userptr = vb2_dma_sg_get_userptr, 709 .get_userptr = vb2_dma_sg_get_userptr,
349 .put_userptr = vb2_dma_sg_put_userptr, 710 .put_userptr = vb2_dma_sg_put_userptr,
711 .prepare = vb2_dma_sg_prepare,
712 .finish = vb2_dma_sg_finish,
350 .vaddr = vb2_dma_sg_vaddr, 713 .vaddr = vb2_dma_sg_vaddr,
351 .mmap = vb2_dma_sg_mmap, 714 .mmap = vb2_dma_sg_mmap,
352 .num_users = vb2_dma_sg_num_users, 715 .num_users = vb2_dma_sg_num_users,
716 .get_dmabuf = vb2_dma_sg_get_dmabuf,
717 .map_dmabuf = vb2_dma_sg_map_dmabuf,
718 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
719 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
720 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
353 .cookie = vb2_dma_sg_cookie, 721 .cookie = vb2_dma_sg_cookie,
354}; 722};
355EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); 723EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
356 724
725void *vb2_dma_sg_init_ctx(struct device *dev)
726{
727 struct vb2_dma_sg_conf *conf;
728
729 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
730 if (!conf)
731 return ERR_PTR(-ENOMEM);
732
733 conf->dev = dev;
734
735 return conf;
736}
737EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
738
739void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
740{
741 if (!IS_ERR_OR_NULL(alloc_ctx))
742 kfree(alloc_ctx);
743}
744EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
745
357MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); 746MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
358MODULE_AUTHOR("Andrzej Pietrasiewicz"); 747MODULE_AUTHOR("Andrzej Pietrasiewicz");
359MODULE_LICENSE("GPL"); 748MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 313d9771b2bc..fba944e50227 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf {
25 void *vaddr; 25 void *vaddr;
26 struct page **pages; 26 struct page **pages;
27 struct vm_area_struct *vma; 27 struct vm_area_struct *vma;
28 int write; 28 enum dma_data_direction dma_dir;
29 unsigned long size; 29 unsigned long size;
30 unsigned int n_pages; 30 unsigned int n_pages;
31 atomic_t refcount; 31 atomic_t refcount;
@@ -35,7 +35,8 @@ struct vb2_vmalloc_buf {
35 35
36static void vb2_vmalloc_put(void *buf_priv); 36static void vb2_vmalloc_put(void *buf_priv);
37 37
38static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) 38static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
39 enum dma_data_direction dma_dir, gfp_t gfp_flags)
39{ 40{
40 struct vb2_vmalloc_buf *buf; 41 struct vb2_vmalloc_buf *buf;
41 42
@@ -45,6 +46,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fl
45 46
46 buf->size = size; 47 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size); 48 buf->vaddr = vmalloc_user(buf->size);
49 buf->dma_dir = dma_dir;
48 buf->handler.refcount = &buf->refcount; 50 buf->handler.refcount = &buf->refcount;
49 buf->handler.put = vb2_vmalloc_put; 51 buf->handler.put = vb2_vmalloc_put;
50 buf->handler.arg = buf; 52 buf->handler.arg = buf;
@@ -70,7 +72,8 @@ static void vb2_vmalloc_put(void *buf_priv)
70} 72}
71 73
72static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, 74static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
73 unsigned long size, int write) 75 unsigned long size,
76 enum dma_data_direction dma_dir)
74{ 77{
75 struct vb2_vmalloc_buf *buf; 78 struct vb2_vmalloc_buf *buf;
76 unsigned long first, last; 79 unsigned long first, last;
@@ -82,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
82 if (!buf) 85 if (!buf)
83 return NULL; 86 return NULL;
84 87
85 buf->write = write; 88 buf->dma_dir = dma_dir;
86 offset = vaddr & ~PAGE_MASK; 89 offset = vaddr & ~PAGE_MASK;
87 buf->size = size; 90 buf->size = size;
88 91
@@ -107,7 +110,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
107 /* current->mm->mmap_sem is taken by videobuf2 core */ 110 /* current->mm->mmap_sem is taken by videobuf2 core */
108 n_pages = get_user_pages(current, current->mm, 111 n_pages = get_user_pages(current, current->mm,
109 vaddr & PAGE_MASK, buf->n_pages, 112 vaddr & PAGE_MASK, buf->n_pages,
110 write, 1, /* force */ 113 dma_dir == DMA_FROM_DEVICE,
114 1, /* force */
111 buf->pages, NULL); 115 buf->pages, NULL);
112 if (n_pages != buf->n_pages) 116 if (n_pages != buf->n_pages)
113 goto fail_get_user_pages; 117 goto fail_get_user_pages;
@@ -144,14 +148,13 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
144 if (vaddr) 148 if (vaddr)
145 vm_unmap_ram((void *)vaddr, buf->n_pages); 149 vm_unmap_ram((void *)vaddr, buf->n_pages);
146 for (i = 0; i < buf->n_pages; ++i) { 150 for (i = 0; i < buf->n_pages; ++i) {
147 if (buf->write) 151 if (buf->dma_dir == DMA_FROM_DEVICE)
148 set_page_dirty_lock(buf->pages[i]); 152 set_page_dirty_lock(buf->pages[i]);
149 put_page(buf->pages[i]); 153 put_page(buf->pages[i]);
150 } 154 }
151 kfree(buf->pages); 155 kfree(buf->pages);
152 } else { 156 } else {
153 if (buf->vma) 157 vb2_put_vma(buf->vma);
154 vb2_put_vma(buf->vma);
155 iounmap(buf->vaddr); 158 iounmap(buf->vaddr);
156 } 159 }
157 kfree(buf); 160 kfree(buf);
@@ -209,6 +212,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
209} 212}
210 213
211/*********************************************/ 214/*********************************************/
215/* DMABUF ops for exporters */
216/*********************************************/
217
218struct vb2_vmalloc_attachment {
219 struct sg_table sgt;
220 enum dma_data_direction dma_dir;
221};
222
223static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
224 struct dma_buf_attachment *dbuf_attach)
225{
226 struct vb2_vmalloc_attachment *attach;
227 struct vb2_vmalloc_buf *buf = dbuf->priv;
228 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
229 struct sg_table *sgt;
230 struct scatterlist *sg;
231 void *vaddr = buf->vaddr;
232 int ret;
233 int i;
234
235 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
236 if (!attach)
237 return -ENOMEM;
238
239 sgt = &attach->sgt;
240 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
241 if (ret) {
242 kfree(attach);
243 return ret;
244 }
245 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
246 struct page *page = vmalloc_to_page(vaddr);
247
248 if (!page) {
249 sg_free_table(sgt);
250 kfree(attach);
251 return -ENOMEM;
252 }
253 sg_set_page(sg, page, PAGE_SIZE, 0);
254 vaddr += PAGE_SIZE;
255 }
256
257 attach->dma_dir = DMA_NONE;
258 dbuf_attach->priv = attach;
259 return 0;
260}
261
262static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
263 struct dma_buf_attachment *db_attach)
264{
265 struct vb2_vmalloc_attachment *attach = db_attach->priv;
266 struct sg_table *sgt;
267
268 if (!attach)
269 return;
270
271 sgt = &attach->sgt;
272
273 /* release the scatterlist cache */
274 if (attach->dma_dir != DMA_NONE)
275 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
276 attach->dma_dir);
277 sg_free_table(sgt);
278 kfree(attach);
279 db_attach->priv = NULL;
280}
281
282static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
283 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
284{
285 struct vb2_vmalloc_attachment *attach = db_attach->priv;
286 /* stealing dmabuf mutex to serialize map/unmap operations */
287 struct mutex *lock = &db_attach->dmabuf->lock;
288 struct sg_table *sgt;
289 int ret;
290
291 mutex_lock(lock);
292
293 sgt = &attach->sgt;
294 /* return previously mapped sg table */
295 if (attach->dma_dir == dma_dir) {
296 mutex_unlock(lock);
297 return sgt;
298 }
299
300 /* release any previous cache */
301 if (attach->dma_dir != DMA_NONE) {
302 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 attach->dma_dir);
304 attach->dma_dir = DMA_NONE;
305 }
306
307 /* mapping to the client with new direction */
308 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
309 if (ret <= 0) {
310 pr_err("failed to map scatterlist\n");
311 mutex_unlock(lock);
312 return ERR_PTR(-EIO);
313 }
314
315 attach->dma_dir = dma_dir;
316
317 mutex_unlock(lock);
318
319 return sgt;
320}
321
322static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
323 struct sg_table *sgt, enum dma_data_direction dma_dir)
324{
325 /* nothing to be done here */
326}
327
328static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
329{
330 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
331 vb2_vmalloc_put(dbuf->priv);
332}
333
334static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
335{
336 struct vb2_vmalloc_buf *buf = dbuf->priv;
337
338 return buf->vaddr + pgnum * PAGE_SIZE;
339}
340
341static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
342{
343 struct vb2_vmalloc_buf *buf = dbuf->priv;
344
345 return buf->vaddr;
346}
347
348static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
349 struct vm_area_struct *vma)
350{
351 return vb2_vmalloc_mmap(dbuf->priv, vma);
352}
353
354static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
355 .attach = vb2_vmalloc_dmabuf_ops_attach,
356 .detach = vb2_vmalloc_dmabuf_ops_detach,
357 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
358 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
359 .kmap = vb2_vmalloc_dmabuf_ops_kmap,
360 .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
361 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
362 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
363 .release = vb2_vmalloc_dmabuf_ops_release,
364};
365
366static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
367{
368 struct vb2_vmalloc_buf *buf = buf_priv;
369 struct dma_buf *dbuf;
370
371 if (WARN_ON(!buf->vaddr))
372 return NULL;
373
374 dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
375 if (IS_ERR(dbuf))
376 return NULL;
377
378 /* dmabuf keeps reference to vb2 buffer */
379 atomic_inc(&buf->refcount);
380
381 return dbuf;
382}
383
384/*********************************************/
212/* callbacks for DMABUF buffers */ 385/* callbacks for DMABUF buffers */
213/*********************************************/ 386/*********************************************/
214 387
@@ -240,7 +413,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
240} 413}
241 414
242static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, 415static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
243 unsigned long size, int write) 416 unsigned long size, enum dma_data_direction dma_dir)
244{ 417{
245 struct vb2_vmalloc_buf *buf; 418 struct vb2_vmalloc_buf *buf;
246 419
@@ -252,7 +425,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
252 return ERR_PTR(-ENOMEM); 425 return ERR_PTR(-ENOMEM);
253 426
254 buf->dbuf = dbuf; 427 buf->dbuf = dbuf;
255 buf->write = write; 428 buf->dma_dir = dma_dir;
256 buf->size = size; 429 buf->size = size;
257 430
258 return buf; 431 return buf;
@@ -264,6 +437,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = {
264 .put = vb2_vmalloc_put, 437 .put = vb2_vmalloc_put,
265 .get_userptr = vb2_vmalloc_get_userptr, 438 .get_userptr = vb2_vmalloc_get_userptr,
266 .put_userptr = vb2_vmalloc_put_userptr, 439 .put_userptr = vb2_vmalloc_put_userptr,
440 .get_dmabuf = vb2_vmalloc_get_dmabuf,
267 .map_dmabuf = vb2_vmalloc_map_dmabuf, 441 .map_dmabuf = vb2_vmalloc_map_dmabuf,
268 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, 442 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
269 .attach_dmabuf = vb2_vmalloc_attach_dmabuf, 443 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 6d91c27fd4c8..191383d8c94d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -41,6 +41,14 @@ config TI_EMIF
41 parameters and other settings during frequency, voltage and 41 parameters and other settings during frequency, voltage and
42 temperature changes 42 temperature changes
43 43
44config OMAP_GPMC
45 bool
46 help
47 This driver is for the General Purpose Memory Controller (GPMC)
48 present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
49 interfacing to a variety of asynchronous as well as synchronous
50 memory drives like NOR, NAND, OneNAND, SRAM.
51
44config MVEBU_DEVBUS 52config MVEBU_DEVBUS
45 bool "Marvell EBU Device Bus Controller" 53 bool "Marvell EBU Device Bus Controller"
46 default y 54 default y
@@ -61,16 +69,6 @@ config TEGRA20_MC
61 analysis, especially for IOMMU/GART(Graphics Address 69 analysis, especially for IOMMU/GART(Graphics Address
62 Relocation Table) module. 70 Relocation Table) module.
63 71
64config TEGRA30_MC
65 bool "Tegra30 Memory Controller(MC) driver"
66 default y
67 depends on ARCH_TEGRA_3x_SOC
68 help
69 This driver is for the Memory Controller(MC) module available
70 in Tegra30 SoCs, mainly for a address translation fault
71 analysis, especially for IOMMU/SMMU(System Memory Management
72 Unit) module.
73
74config FSL_CORENET_CF 72config FSL_CORENET_CF
75 tristate "Freescale CoreNet Error Reporting" 73 tristate "Freescale CoreNet Error Reporting"
76 depends on FSL_SOC_BOOKE 74 depends on FSL_SOC_BOOKE
@@ -85,4 +83,6 @@ config FSL_IFC
85 bool 83 bool
86 depends on FSL_SOC 84 depends on FSL_SOC
87 85
86source "drivers/memory/tegra/Kconfig"
87
88endif 88endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index c32d31981be3..6b6548124473 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -8,8 +8,10 @@ endif
8obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o 8obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
9obj-$(CONFIG_TI_AEMIF) += ti-aemif.o 9obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
10obj-$(CONFIG_TI_EMIF) += emif.o 10obj-$(CONFIG_TI_EMIF) += emif.o
11obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
11obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o 12obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
12obj-$(CONFIG_FSL_IFC) += fsl_ifc.o 13obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
13obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o 14obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
14obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o 15obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
15obj-$(CONFIG_TEGRA30_MC) += tegra30-mc.o 16
17obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
new file mode 100644
index 000000000000..ffc5e60c0664
--- /dev/null
+++ b/drivers/memory/omap-gpmc.c
@@ -0,0 +1,2092 @@
1/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#undef DEBUG
16
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/ioport.h>
23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_mtd.h>
31#include <linux/of_device.h>
32#include <linux/omap-gpmc.h>
33#include <linux/mtd/nand.h>
34#include <linux/pm_runtime.h>
35
36#include <linux/platform_data/mtd-nand-omap2.h>
37#include <linux/platform_data/mtd-onenand-omap2.h>
38
39#include <asm/mach-types.h>
40
41#define DEVICE_NAME "omap-gpmc"
42
43/* GPMC register offsets */
44#define GPMC_REVISION 0x00
45#define GPMC_SYSCONFIG 0x10
46#define GPMC_SYSSTATUS 0x14
47#define GPMC_IRQSTATUS 0x18
48#define GPMC_IRQENABLE 0x1c
49#define GPMC_TIMEOUT_CONTROL 0x40
50#define GPMC_ERR_ADDRESS 0x44
51#define GPMC_ERR_TYPE 0x48
52#define GPMC_CONFIG 0x50
53#define GPMC_STATUS 0x54
54#define GPMC_PREFETCH_CONFIG1 0x1e0
55#define GPMC_PREFETCH_CONFIG2 0x1e4
56#define GPMC_PREFETCH_CONTROL 0x1ec
57#define GPMC_PREFETCH_STATUS 0x1f0
58#define GPMC_ECC_CONFIG 0x1f4
59#define GPMC_ECC_CONTROL 0x1f8
60#define GPMC_ECC_SIZE_CONFIG 0x1fc
61#define GPMC_ECC1_RESULT 0x200
62#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
63#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
64#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
65#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
66#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */
67#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */
68#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */
69
70/* GPMC ECC control settings */
71#define GPMC_ECC_CTRL_ECCCLEAR 0x100
72#define GPMC_ECC_CTRL_ECCDISABLE 0x000
73#define GPMC_ECC_CTRL_ECCREG1 0x001
74#define GPMC_ECC_CTRL_ECCREG2 0x002
75#define GPMC_ECC_CTRL_ECCREG3 0x003
76#define GPMC_ECC_CTRL_ECCREG4 0x004
77#define GPMC_ECC_CTRL_ECCREG5 0x005
78#define GPMC_ECC_CTRL_ECCREG6 0x006
79#define GPMC_ECC_CTRL_ECCREG7 0x007
80#define GPMC_ECC_CTRL_ECCREG8 0x008
81#define GPMC_ECC_CTRL_ECCREG9 0x009
82
83#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
84
85#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
86#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
87#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
88#define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
89#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
90#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
91
92#define GPMC_CS0_OFFSET 0x60
93#define GPMC_CS_SIZE 0x30
94#define GPMC_BCH_SIZE 0x10
95
96#define GPMC_MEM_END 0x3FFFFFFF
97
98#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
99#define GPMC_SECTION_SHIFT 28 /* 128 MB */
100
101#define CS_NUM_SHIFT 24
102#define ENABLE_PREFETCH (0x1 << 7)
103#define DMA_MPU_MODE 2
104
105#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
106#define GPMC_REVISION_MINOR(l) (l & 0xf)
107
108#define GPMC_HAS_WR_ACCESS 0x1
109#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
110#define GPMC_HAS_MUX_AAD 0x4
111
112#define GPMC_NR_WAITPINS 4
113
114#define GPMC_CS_CONFIG1 0x00
115#define GPMC_CS_CONFIG2 0x04
116#define GPMC_CS_CONFIG3 0x08
117#define GPMC_CS_CONFIG4 0x0c
118#define GPMC_CS_CONFIG5 0x10
119#define GPMC_CS_CONFIG6 0x14
120#define GPMC_CS_CONFIG7 0x18
121#define GPMC_CS_NAND_COMMAND 0x1c
122#define GPMC_CS_NAND_ADDRESS 0x20
123#define GPMC_CS_NAND_DATA 0x24
124
125/* Control Commands */
126#define GPMC_CONFIG_RDY_BSY 0x00000001
127#define GPMC_CONFIG_DEV_SIZE 0x00000002
128#define GPMC_CONFIG_DEV_TYPE 0x00000003
129#define GPMC_SET_IRQ_STATUS 0x00000004
130
131#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
132#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
133#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29)
134#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29)
135#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
136#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
137#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
138#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
139#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
140#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
141#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
142#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18)
143#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
144#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
145#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
146#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
147#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
148#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
149#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
150#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
151#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
152#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
153#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
154#define GPMC_CONFIG7_CSVALID (1 << 6)
155
156#define GPMC_DEVICETYPE_NOR 0
157#define GPMC_DEVICETYPE_NAND 2
158#define GPMC_CONFIG_WRITEPROTECT 0x00000010
159#define WR_RD_PIN_MONITORING 0x00600000
160
161#define GPMC_ENABLE_IRQ 0x0000000d
162
163/* ECC commands */
164#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
165#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
166#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
167
168/* XXX: Only NAND irq has been considered,currently these are the only ones used
169 */
170#define GPMC_NR_IRQ 2
171
172struct gpmc_cs_data {
173 const char *name;
174
175#define GPMC_CS_RESERVED (1 << 0)
176 u32 flags;
177
178 struct resource mem;
179};
180
181struct gpmc_client_irq {
182 unsigned irq;
183 u32 bitmask;
184};
185
186/* Structure to save gpmc cs context */
187struct gpmc_cs_config {
188 u32 config1;
189 u32 config2;
190 u32 config3;
191 u32 config4;
192 u32 config5;
193 u32 config6;
194 u32 config7;
195 int is_valid;
196};
197
198/*
199 * Structure to save/restore gpmc context
200 * to support core off on OMAP3
201 */
202struct omap3_gpmc_regs {
203 u32 sysconfig;
204 u32 irqenable;
205 u32 timeout_ctrl;
206 u32 config;
207 u32 prefetch_config1;
208 u32 prefetch_config2;
209 u32 prefetch_control;
210 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
211};
212
213static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
214static struct irq_chip gpmc_irq_chip;
215static int gpmc_irq_start;
216
217static struct resource gpmc_mem_root;
218static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
219static DEFINE_SPINLOCK(gpmc_mem_lock);
220/* Define chip-selects as reserved by default until probe completes */
221static unsigned int gpmc_cs_num = GPMC_CS_NUM;
222static unsigned int gpmc_nr_waitpins;
223static struct device *gpmc_dev;
224static int gpmc_irq;
225static resource_size_t phys_base, mem_size;
226static unsigned gpmc_capability;
227static void __iomem *gpmc_base;
228
229static struct clk *gpmc_l3_clk;
230
231static irqreturn_t gpmc_handle_irq(int irq, void *dev);
232
233static void gpmc_write_reg(int idx, u32 val)
234{
235 writel_relaxed(val, gpmc_base + idx);
236}
237
238static u32 gpmc_read_reg(int idx)
239{
240 return readl_relaxed(gpmc_base + idx);
241}
242
243void gpmc_cs_write_reg(int cs, int idx, u32 val)
244{
245 void __iomem *reg_addr;
246
247 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
248 writel_relaxed(val, reg_addr);
249}
250
251static u32 gpmc_cs_read_reg(int cs, int idx)
252{
253 void __iomem *reg_addr;
254
255 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
256 return readl_relaxed(reg_addr);
257}
258
259/* TODO: Add support for gpmc_fck to clock framework and use it */
260static unsigned long gpmc_get_fclk_period(void)
261{
262 unsigned long rate = clk_get_rate(gpmc_l3_clk);
263
264 rate /= 1000;
265 rate = 1000000000 / rate; /* In picoseconds */
266
267 return rate;
268}
269
270static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
271{
272 unsigned long tick_ps;
273
274 /* Calculate in picosecs to yield more exact results */
275 tick_ps = gpmc_get_fclk_period();
276
277 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
278}
279
280static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
281{
282 unsigned long tick_ps;
283
284 /* Calculate in picosecs to yield more exact results */
285 tick_ps = gpmc_get_fclk_period();
286
287 return (time_ps + tick_ps - 1) / tick_ps;
288}
289
290unsigned int gpmc_ticks_to_ns(unsigned int ticks)
291{
292 return ticks * gpmc_get_fclk_period() / 1000;
293}
294
295static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
296{
297 return ticks * gpmc_get_fclk_period();
298}
299
300static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
301{
302 unsigned long ticks = gpmc_ps_to_ticks(time_ps);
303
304 return ticks * gpmc_get_fclk_period();
305}
306
307static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
308{
309 u32 l;
310
311 l = gpmc_cs_read_reg(cs, reg);
312 if (value)
313 l |= mask;
314 else
315 l &= ~mask;
316 gpmc_cs_write_reg(cs, reg, l);
317}
318
319static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
320{
321 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
322 GPMC_CONFIG1_TIME_PARA_GRAN,
323 p->time_para_granularity);
324 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
325 GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
326 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
327 GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
328 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
329 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
330 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
331 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
332 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
333 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
334 p->cycle2cyclesamecsen);
335 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
336 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
337 p->cycle2cyclediffcsen);
338}
339
340#ifdef DEBUG
341static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
342 bool raw, bool noval, int shift,
343 const char *name)
344{
345 u32 l;
346 int nr_bits, max_value, mask;
347
348 l = gpmc_cs_read_reg(cs, reg);
349 nr_bits = end_bit - st_bit + 1;
350 max_value = (1 << nr_bits) - 1;
351 mask = max_value << st_bit;
352 l = (l & mask) >> st_bit;
353 if (shift)
354 l = (shift << l);
355 if (noval && (l == 0))
356 return 0;
357 if (!raw) {
358 unsigned int time_ns_min, time_ns, time_ns_max;
359
360 time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0);
361 time_ns = gpmc_ticks_to_ns(l);
362 time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ?
363 max_value : l + 1);
364 pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n",
365 name, time_ns, time_ns_min, time_ns_max, l);
366 } else {
367 pr_info("gpmc,%s = <%u>\n", name, l);
368 }
369
370 return l;
371}
372
373#define GPMC_PRINT_CONFIG(cs, config) \
374 pr_info("cs%i %s: 0x%08x\n", cs, #config, \
375 gpmc_cs_read_reg(cs, config))
376#define GPMC_GET_RAW(reg, st, end, field) \
377 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field)
378#define GPMC_GET_RAW_BOOL(reg, st, end, field) \
379 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field)
380#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \
381 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field)
382#define GPMC_GET_TICKS(reg, st, end, field) \
383 get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field)
384
385static void gpmc_show_regs(int cs, const char *desc)
386{
387 pr_info("gpmc cs%i %s:\n", cs, desc);
388 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
389 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
390 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
391 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
392 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
393 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
394}
395
396/*
397 * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
398 * see commit c9fb809.
399 */
400static void gpmc_cs_show_timings(int cs, const char *desc)
401{
402 gpmc_show_regs(cs, desc);
403
404 pr_info("gpmc cs%i access configuration:\n", cs);
405 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
406 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
407 GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width");
408 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
409 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
410 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
411 GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length");
412 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
413 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
414 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
415 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
416 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
417
418 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay");
419
420 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay");
421
422 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
423 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay");
424
425 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen");
426 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen");
427
428 pr_info("gpmc cs%i timings configuration:\n", cs);
429 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns");
430 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns");
431 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
432
433 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
434 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
435 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
436
437 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
438 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
439 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
440 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
441
442 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns");
443 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns");
444 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
445
446 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
447
448 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
449 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
450
451 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns");
452 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns");
453
454 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
455 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
456}
457#else
458static inline void gpmc_cs_show_timings(int cs, const char *desc)
459{
460}
461#endif
462
463static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
464 int time, const char *name)
465{
466 u32 l;
467 int ticks, mask, nr_bits;
468
469 if (time == 0)
470 ticks = 0;
471 else
472 ticks = gpmc_ns_to_ticks(time);
473 nr_bits = end_bit - st_bit + 1;
474 mask = (1 << nr_bits) - 1;
475
476 if (ticks > mask) {
477 pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n",
478 __func__, cs, name, time, ticks, mask);
479
480 return -1;
481 }
482
483 l = gpmc_cs_read_reg(cs, reg);
484#ifdef DEBUG
485 printk(KERN_INFO
486 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
487 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
488 (l >> st_bit) & mask, time);
489#endif
490 l &= ~(mask << st_bit);
491 l |= ticks << st_bit;
492 gpmc_cs_write_reg(cs, reg, l);
493
494 return 0;
495}
496
497#define GPMC_SET_ONE(reg, st, end, field) \
498 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
499 t->field, #field) < 0) \
500 return -1
501
502int gpmc_calc_divider(unsigned int sync_clk)
503{
504 int div;
505 u32 l;
506
507 l = sync_clk + (gpmc_get_fclk_period() - 1);
508 div = l / gpmc_get_fclk_period();
509 if (div > 4)
510 return -1;
511 if (div <= 0)
512 div = 1;
513
514 return div;
515}
516
517int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
518{
519 int div;
520 u32 l;
521
522 gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
523 div = gpmc_calc_divider(t->sync_clk);
524 if (div < 0)
525 return div;
526
527 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
528 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
529 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
530
531 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
532 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
533 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
534
535 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
536 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
537 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
538 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
539
540 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
541 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
542 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
543
544 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
545
546 GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
547 GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
548
549 GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
550 GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
551
552 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
553 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
554 if (gpmc_capability & GPMC_HAS_WR_ACCESS)
555 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
556
557 /* caller is expected to have initialized CONFIG1 to cover
558 * at least sync vs async
559 */
560 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
561 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
562#ifdef DEBUG
563 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
564 cs, (div * gpmc_get_fclk_period()) / 1000, div);
565#endif
566 l &= ~0x03;
567 l |= (div - 1);
568 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
569 }
570
571 gpmc_cs_bool_timings(cs, &t->bool_timings);
572 gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
573
574 return 0;
575}
576
577static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
578{
579 u32 l;
580 u32 mask;
581
582 /*
583 * Ensure that base address is aligned on a
584 * boundary equal to or greater than size.
585 */
586 if (base & (size - 1))
587 return -EINVAL;
588
589 mask = (1 << GPMC_SECTION_SHIFT) - size;
590 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
591 l &= ~0x3f;
592 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
593 l &= ~(0x0f << 8);
594 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
595 l |= GPMC_CONFIG7_CSVALID;
596 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
597
598 return 0;
599}
600
601static void gpmc_cs_enable_mem(int cs)
602{
603 u32 l;
604
605 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
606 l |= GPMC_CONFIG7_CSVALID;
607 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
608}
609
610static void gpmc_cs_disable_mem(int cs)
611{
612 u32 l;
613
614 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
615 l &= ~GPMC_CONFIG7_CSVALID;
616 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
617}
618
619static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
620{
621 u32 l;
622 u32 mask;
623
624 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
625 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
626 mask = (l >> 8) & 0x0f;
627 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
628}
629
630static int gpmc_cs_mem_enabled(int cs)
631{
632 u32 l;
633
634 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
635 return l & GPMC_CONFIG7_CSVALID;
636}
637
638static void gpmc_cs_set_reserved(int cs, int reserved)
639{
640 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
641
642 gpmc->flags |= GPMC_CS_RESERVED;
643}
644
645static bool gpmc_cs_reserved(int cs)
646{
647 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
648
649 return gpmc->flags & GPMC_CS_RESERVED;
650}
651
652static void gpmc_cs_set_name(int cs, const char *name)
653{
654 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
655
656 gpmc->name = name;
657}
658
659const char *gpmc_cs_get_name(int cs)
660{
661 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
662
663 return gpmc->name;
664}
665
666static unsigned long gpmc_mem_align(unsigned long size)
667{
668 int order;
669
670 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
671 order = GPMC_CHUNK_SHIFT - 1;
672 do {
673 size >>= 1;
674 order++;
675 } while (size);
676 size = 1 << order;
677 return size;
678}
679
680static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
681{
682 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
683 struct resource *res = &gpmc->mem;
684 int r;
685
686 size = gpmc_mem_align(size);
687 spin_lock(&gpmc_mem_lock);
688 res->start = base;
689 res->end = base + size - 1;
690 r = request_resource(&gpmc_mem_root, res);
691 spin_unlock(&gpmc_mem_lock);
692
693 return r;
694}
695
696static int gpmc_cs_delete_mem(int cs)
697{
698 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
699 struct resource *res = &gpmc->mem;
700 int r;
701
702 spin_lock(&gpmc_mem_lock);
703 r = release_resource(res);
704 res->start = 0;
705 res->end = 0;
706 spin_unlock(&gpmc_mem_lock);
707
708 return r;
709}
710
711/**
712 * gpmc_cs_remap - remaps a chip-select physical base address
713 * @cs: chip-select to remap
714 * @base: physical base address to re-map chip-select to
715 *
716 * Re-maps a chip-select to a new physical base address specified by
717 * "base". Returns 0 on success and appropriate negative error code
718 * on failure.
719 */
720static int gpmc_cs_remap(int cs, u32 base)
721{
722 int ret;
723 u32 old_base, size;
724
725 if (cs > gpmc_cs_num) {
726 pr_err("%s: requested chip-select is disabled\n", __func__);
727 return -ENODEV;
728 }
729
730 /*
731 * Make sure we ignore any device offsets from the GPMC partition
732 * allocated for the chip select and that the new base confirms
733 * to the GPMC 16MB minimum granularity.
734 */
735 base &= ~(SZ_16M - 1);
736
737 gpmc_cs_get_memconf(cs, &old_base, &size);
738 if (base == old_base)
739 return 0;
740
741 ret = gpmc_cs_delete_mem(cs);
742 if (ret < 0)
743 return ret;
744
745 ret = gpmc_cs_insert_mem(cs, base, size);
746 if (ret < 0)
747 return ret;
748
749 ret = gpmc_cs_set_memconf(cs, base, size);
750
751 return ret;
752}
753
754int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
755{
756 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
757 struct resource *res = &gpmc->mem;
758 int r = -1;
759
760 if (cs > gpmc_cs_num) {
761 pr_err("%s: requested chip-select is disabled\n", __func__);
762 return -ENODEV;
763 }
764 size = gpmc_mem_align(size);
765 if (size > (1 << GPMC_SECTION_SHIFT))
766 return -ENOMEM;
767
768 spin_lock(&gpmc_mem_lock);
769 if (gpmc_cs_reserved(cs)) {
770 r = -EBUSY;
771 goto out;
772 }
773 if (gpmc_cs_mem_enabled(cs))
774 r = adjust_resource(res, res->start & ~(size - 1), size);
775 if (r < 0)
776 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
777 size, NULL, NULL);
778 if (r < 0)
779 goto out;
780
781 /* Disable CS while changing base address and size mask */
782 gpmc_cs_disable_mem(cs);
783
784 r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
785 if (r < 0) {
786 release_resource(res);
787 goto out;
788 }
789
790 /* Enable CS */
791 gpmc_cs_enable_mem(cs);
792 *base = res->start;
793 gpmc_cs_set_reserved(cs, 1);
794out:
795 spin_unlock(&gpmc_mem_lock);
796 return r;
797}
798EXPORT_SYMBOL(gpmc_cs_request);
799
800void gpmc_cs_free(int cs)
801{
802 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
803 struct resource *res = &gpmc->mem;
804
805 spin_lock(&gpmc_mem_lock);
806 if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
807 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
808 BUG();
809 spin_unlock(&gpmc_mem_lock);
810 return;
811 }
812 gpmc_cs_disable_mem(cs);
813 if (res->flags)
814 release_resource(res);
815 gpmc_cs_set_reserved(cs, 0);
816 spin_unlock(&gpmc_mem_lock);
817}
818EXPORT_SYMBOL(gpmc_cs_free);
819
820/**
821 * gpmc_configure - write request to configure gpmc
822 * @cmd: command type
823 * @wval: value to write
824 * @return status of the operation
825 */
826int gpmc_configure(int cmd, int wval)
827{
828 u32 regval;
829
830 switch (cmd) {
831 case GPMC_ENABLE_IRQ:
832 gpmc_write_reg(GPMC_IRQENABLE, wval);
833 break;
834
835 case GPMC_SET_IRQ_STATUS:
836 gpmc_write_reg(GPMC_IRQSTATUS, wval);
837 break;
838
839 case GPMC_CONFIG_WP:
840 regval = gpmc_read_reg(GPMC_CONFIG);
841 if (wval)
842 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
843 else
844 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
845 gpmc_write_reg(GPMC_CONFIG, regval);
846 break;
847
848 default:
849 pr_err("%s: command not supported\n", __func__);
850 return -EINVAL;
851 }
852
853 return 0;
854}
855EXPORT_SYMBOL(gpmc_configure);
856
857void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
858{
859 int i;
860
861 reg->gpmc_status = gpmc_base + GPMC_STATUS;
862 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
863 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
864 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
865 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
866 reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
867 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
868 reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
869 reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
870 reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
871 reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
872 reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
873 reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
874 reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
875 reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
876
877 for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
878 reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
879 GPMC_BCH_SIZE * i;
880 reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
881 GPMC_BCH_SIZE * i;
882 reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
883 GPMC_BCH_SIZE * i;
884 reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
885 GPMC_BCH_SIZE * i;
886 reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
887 i * GPMC_BCH_SIZE;
888 reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
889 i * GPMC_BCH_SIZE;
890 reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
891 i * GPMC_BCH_SIZE;
892 }
893}
894
895int gpmc_get_client_irq(unsigned irq_config)
896{
897 int i;
898
899 if (hweight32(irq_config) > 1)
900 return 0;
901
902 for (i = 0; i < GPMC_NR_IRQ; i++)
903 if (gpmc_client_irq[i].bitmask & irq_config)
904 return gpmc_client_irq[i].irq;
905
906 return 0;
907}
908
909static int gpmc_irq_endis(unsigned irq, bool endis)
910{
911 int i;
912 u32 regval;
913
914 for (i = 0; i < GPMC_NR_IRQ; i++)
915 if (irq == gpmc_client_irq[i].irq) {
916 regval = gpmc_read_reg(GPMC_IRQENABLE);
917 if (endis)
918 regval |= gpmc_client_irq[i].bitmask;
919 else
920 regval &= ~gpmc_client_irq[i].bitmask;
921 gpmc_write_reg(GPMC_IRQENABLE, regval);
922 break;
923 }
924
925 return 0;
926}
927
928static void gpmc_irq_disable(struct irq_data *p)
929{
930 gpmc_irq_endis(p->irq, false);
931}
932
933static void gpmc_irq_enable(struct irq_data *p)
934{
935 gpmc_irq_endis(p->irq, true);
936}
937
938static void gpmc_irq_noop(struct irq_data *data) { }
939
940static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
941
942static int gpmc_setup_irq(void)
943{
944 int i;
945 u32 regval;
946
947 if (!gpmc_irq)
948 return -EINVAL;
949
950 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
951 if (gpmc_irq_start < 0) {
952 pr_err("irq_alloc_descs failed\n");
953 return gpmc_irq_start;
954 }
955
956 gpmc_irq_chip.name = "gpmc";
957 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
958 gpmc_irq_chip.irq_enable = gpmc_irq_enable;
959 gpmc_irq_chip.irq_disable = gpmc_irq_disable;
960 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
961 gpmc_irq_chip.irq_ack = gpmc_irq_noop;
962 gpmc_irq_chip.irq_mask = gpmc_irq_noop;
963 gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
964
965 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
966 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
967
968 for (i = 0; i < GPMC_NR_IRQ; i++) {
969 gpmc_client_irq[i].irq = gpmc_irq_start + i;
970 irq_set_chip_and_handler(gpmc_client_irq[i].irq,
971 &gpmc_irq_chip, handle_simple_irq);
972 set_irq_flags(gpmc_client_irq[i].irq,
973 IRQF_VALID | IRQF_NOAUTOEN);
974 }
975
976 /* Disable interrupts */
977 gpmc_write_reg(GPMC_IRQENABLE, 0);
978
979 /* clear interrupts */
980 regval = gpmc_read_reg(GPMC_IRQSTATUS);
981 gpmc_write_reg(GPMC_IRQSTATUS, regval);
982
983 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
984}
985
986static int gpmc_free_irq(void)
987{
988 int i;
989
990 if (gpmc_irq)
991 free_irq(gpmc_irq, NULL);
992
993 for (i = 0; i < GPMC_NR_IRQ; i++) {
994 irq_set_handler(gpmc_client_irq[i].irq, NULL);
995 irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
996 irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
997 }
998
999 irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
1000
1001 return 0;
1002}
1003
1004static void gpmc_mem_exit(void)
1005{
1006 int cs;
1007
1008 for (cs = 0; cs < gpmc_cs_num; cs++) {
1009 if (!gpmc_cs_mem_enabled(cs))
1010 continue;
1011 gpmc_cs_delete_mem(cs);
1012 }
1013
1014}
1015
1016static void gpmc_mem_init(void)
1017{
1018 int cs;
1019
1020 /*
1021 * The first 1MB of GPMC address space is typically mapped to
1022 * the internal ROM. Never allocate the first page, to
1023 * facilitate bug detection; even if we didn't boot from ROM.
1024 */
1025 gpmc_mem_root.start = SZ_1M;
1026 gpmc_mem_root.end = GPMC_MEM_END;
1027
1028 /* Reserve all regions that has been set up by bootloader */
1029 for (cs = 0; cs < gpmc_cs_num; cs++) {
1030 u32 base, size;
1031
1032 if (!gpmc_cs_mem_enabled(cs))
1033 continue;
1034 gpmc_cs_get_memconf(cs, &base, &size);
1035 if (gpmc_cs_insert_mem(cs, base, size)) {
1036 pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
1037 __func__, cs, base, base + size);
1038 gpmc_cs_disable_mem(cs);
1039 }
1040 }
1041}
1042
1043static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
1044{
1045 u32 temp;
1046 int div;
1047
1048 div = gpmc_calc_divider(sync_clk);
1049 temp = gpmc_ps_to_ticks(time_ps);
1050 temp = (temp + div - 1) / div;
1051 return gpmc_ticks_to_ps(temp * div);
1052}
1053
1054/* XXX: can the cycles be avoided ? */
1055static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
1056 struct gpmc_device_timings *dev_t,
1057 bool mux)
1058{
1059 u32 temp;
1060
1061 /* adv_rd_off */
1062 temp = dev_t->t_avdp_r;
1063 /* XXX: mux check required ? */
1064 if (mux) {
1065 /* XXX: t_avdp not to be required for sync, only added for tusb
1066 * this indirectly necessitates requirement of t_avdp_r and
1067 * t_avdp_w instead of having a single t_avdp
1068 */
1069 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
1070 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1071 }
1072 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1073
1074 /* oe_on */
1075 temp = dev_t->t_oeasu; /* XXX: remove this ? */
1076 if (mux) {
1077 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
1078 temp = max_t(u32, temp, gpmc_t->adv_rd_off +
1079 gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
1080 }
1081 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1082
1083 /* access */
1084 /* XXX: any scope for improvement ?, by combining oe_on
1085 * and clk_activation, need to check whether
1086 * access = clk_activation + round to sync clk ?
1087 */
1088 temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
1089 temp += gpmc_t->clk_activation;
1090 if (dev_t->cyc_oe)
1091 temp = max_t(u32, temp, gpmc_t->oe_on +
1092 gpmc_ticks_to_ps(dev_t->cyc_oe));
1093 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1094
1095 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1096 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1097
1098 /* rd_cycle */
1099 temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
1100 temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
1101 gpmc_t->access;
1102 /* XXX: barter t_ce_rdyz with t_cez_r ? */
1103 if (dev_t->t_ce_rdyz)
1104 temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
1105 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1106
1107 return 0;
1108}
1109
1110static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
1111 struct gpmc_device_timings *dev_t,
1112 bool mux)
1113{
1114 u32 temp;
1115
1116 /* adv_wr_off */
1117 temp = dev_t->t_avdp_w;
1118 if (mux) {
1119 temp = max_t(u32, temp,
1120 gpmc_t->clk_activation + dev_t->t_avdh);
1121 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1122 }
1123 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1124
1125 /* wr_data_mux_bus */
1126 temp = max_t(u32, dev_t->t_weasu,
1127 gpmc_t->clk_activation + dev_t->t_rdyo);
1128 /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
1129 * and in that case remember to handle we_on properly
1130 */
1131 if (mux) {
1132 temp = max_t(u32, temp,
1133 gpmc_t->adv_wr_off + dev_t->t_aavdh);
1134 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1135 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1136 }
1137 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1138
1139 /* we_on */
1140 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1141 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1142 else
1143 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1144
1145 /* wr_access */
1146 /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
1147 gpmc_t->wr_access = gpmc_t->access;
1148
1149 /* we_off */
1150 temp = gpmc_t->we_on + dev_t->t_wpl;
1151 temp = max_t(u32, temp,
1152 gpmc_t->wr_access + gpmc_ticks_to_ps(1));
1153 temp = max_t(u32, temp,
1154 gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
1155 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1156
1157 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1158 dev_t->t_wph);
1159
1160 /* wr_cycle */
1161 temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
1162 temp += gpmc_t->wr_access;
1163 /* XXX: barter t_ce_rdyz with t_cez_w ? */
1164 if (dev_t->t_ce_rdyz)
1165 temp = max_t(u32, temp,
1166 gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
1167 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1168
1169 return 0;
1170}
1171
1172static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
1173 struct gpmc_device_timings *dev_t,
1174 bool mux)
1175{
1176 u32 temp;
1177
1178 /* adv_rd_off */
1179 temp = dev_t->t_avdp_r;
1180 if (mux)
1181 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1182 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1183
1184 /* oe_on */
1185 temp = dev_t->t_oeasu;
1186 if (mux)
1187 temp = max_t(u32, temp,
1188 gpmc_t->adv_rd_off + dev_t->t_aavdh);
1189 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1190
1191 /* access */
1192 temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
1193 gpmc_t->oe_on + dev_t->t_oe);
1194 temp = max_t(u32, temp,
1195 gpmc_t->cs_on + dev_t->t_ce);
1196 temp = max_t(u32, temp,
1197 gpmc_t->adv_on + dev_t->t_aa);
1198 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1199
1200 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1201 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1202
1203 /* rd_cycle */
1204 temp = max_t(u32, dev_t->t_rd_cycle,
1205 gpmc_t->cs_rd_off + dev_t->t_cez_r);
1206 temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
1207 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1208
1209 return 0;
1210}
1211
1212static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
1213 struct gpmc_device_timings *dev_t,
1214 bool mux)
1215{
1216 u32 temp;
1217
1218 /* adv_wr_off */
1219 temp = dev_t->t_avdp_w;
1220 if (mux)
1221 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1222 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1223
1224 /* wr_data_mux_bus */
1225 temp = dev_t->t_weasu;
1226 if (mux) {
1227 temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
1228 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1229 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1230 }
1231 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1232
1233 /* we_on */
1234 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1235 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1236 else
1237 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1238
1239 /* we_off */
1240 temp = gpmc_t->we_on + dev_t->t_wpl;
1241 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1242
1243 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1244 dev_t->t_wph);
1245
1246 /* wr_cycle */
1247 temp = max_t(u32, dev_t->t_wr_cycle,
1248 gpmc_t->cs_wr_off + dev_t->t_cez_w);
1249 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1250
1251 return 0;
1252}
1253
1254static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
1255 struct gpmc_device_timings *dev_t)
1256{
1257 u32 temp;
1258
1259 gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
1260 gpmc_get_fclk_period();
1261
1262 gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
1263 dev_t->t_bacc,
1264 gpmc_t->sync_clk);
1265
1266 temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
1267 gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
1268
1269 if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
1270 return 0;
1271
1272 if (dev_t->ce_xdelay)
1273 gpmc_t->bool_timings.cs_extra_delay = true;
1274 if (dev_t->avd_xdelay)
1275 gpmc_t->bool_timings.adv_extra_delay = true;
1276 if (dev_t->oe_xdelay)
1277 gpmc_t->bool_timings.oe_extra_delay = true;
1278 if (dev_t->we_xdelay)
1279 gpmc_t->bool_timings.we_extra_delay = true;
1280
1281 return 0;
1282}
1283
1284static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
1285 struct gpmc_device_timings *dev_t,
1286 bool sync)
1287{
1288 u32 temp;
1289
1290 /* cs_on */
1291 gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
1292
1293 /* adv_on */
1294 temp = dev_t->t_avdasu;
1295 if (dev_t->t_ce_avd)
1296 temp = max_t(u32, temp,
1297 gpmc_t->cs_on + dev_t->t_ce_avd);
1298 gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
1299
1300 if (sync)
1301 gpmc_calc_sync_common_timings(gpmc_t, dev_t);
1302
1303 return 0;
1304}
1305
1306/* TODO: remove this function once all peripherals are confirmed to
1307 * work with generic timing. Simultaneously gpmc_cs_set_timings()
1308 * has to be modified to handle timings in ps instead of ns
1309*/
1310static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
1311{
1312 t->cs_on /= 1000;
1313 t->cs_rd_off /= 1000;
1314 t->cs_wr_off /= 1000;
1315 t->adv_on /= 1000;
1316 t->adv_rd_off /= 1000;
1317 t->adv_wr_off /= 1000;
1318 t->we_on /= 1000;
1319 t->we_off /= 1000;
1320 t->oe_on /= 1000;
1321 t->oe_off /= 1000;
1322 t->page_burst_access /= 1000;
1323 t->access /= 1000;
1324 t->rd_cycle /= 1000;
1325 t->wr_cycle /= 1000;
1326 t->bus_turnaround /= 1000;
1327 t->cycle2cycle_delay /= 1000;
1328 t->wait_monitoring /= 1000;
1329 t->clk_activation /= 1000;
1330 t->wr_access /= 1000;
1331 t->wr_data_mux_bus /= 1000;
1332}
1333
1334int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
1335 struct gpmc_settings *gpmc_s,
1336 struct gpmc_device_timings *dev_t)
1337{
1338 bool mux = false, sync = false;
1339
1340 if (gpmc_s) {
1341 mux = gpmc_s->mux_add_data ? true : false;
1342 sync = (gpmc_s->sync_read || gpmc_s->sync_write);
1343 }
1344
1345 memset(gpmc_t, 0, sizeof(*gpmc_t));
1346
1347 gpmc_calc_common_timings(gpmc_t, dev_t, sync);
1348
1349 if (gpmc_s && gpmc_s->sync_read)
1350 gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
1351 else
1352 gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
1353
1354 if (gpmc_s && gpmc_s->sync_write)
1355 gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
1356 else
1357 gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
1358
1359 /* TODO: remove, see function definition */
1360 gpmc_convert_ps_to_ns(gpmc_t);
1361
1362 return 0;
1363}
1364
1365/**
1366 * gpmc_cs_program_settings - programs non-timing related settings
1367 * @cs: GPMC chip-select to program
1368 * @p: pointer to GPMC settings structure
1369 *
1370 * Programs non-timing related settings for a GPMC chip-select, such as
1371 * bus-width, burst configuration, etc. Function should be called once
1372 * for each chip-select that is being used and must be called before
1373 * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
1374 * register will be initialised to zero by this function. Returns 0 on
1375 * success and appropriate negative error code on failure.
1376 */
1377int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
1378{
1379 u32 config1;
1380
1381 if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
1382 pr_err("%s: invalid width %d!", __func__, p->device_width);
1383 return -EINVAL;
1384 }
1385
1386 /* Address-data multiplexing not supported for NAND devices */
1387 if (p->device_nand && p->mux_add_data) {
1388 pr_err("%s: invalid configuration!\n", __func__);
1389 return -EINVAL;
1390 }
1391
1392 if ((p->mux_add_data > GPMC_MUX_AD) ||
1393 ((p->mux_add_data == GPMC_MUX_AAD) &&
1394 !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
1395 pr_err("%s: invalid multiplex configuration!\n", __func__);
1396 return -EINVAL;
1397 }
1398
1399 /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
1400 if (p->burst_read || p->burst_write) {
1401 switch (p->burst_len) {
1402 case GPMC_BURST_4:
1403 case GPMC_BURST_8:
1404 case GPMC_BURST_16:
1405 break;
1406 default:
1407 pr_err("%s: invalid page/burst-length (%d)\n",
1408 __func__, p->burst_len);
1409 return -EINVAL;
1410 }
1411 }
1412
1413 if (p->wait_pin > gpmc_nr_waitpins) {
1414 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
1415 return -EINVAL;
1416 }
1417
1418 config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
1419
1420 if (p->sync_read)
1421 config1 |= GPMC_CONFIG1_READTYPE_SYNC;
1422 if (p->sync_write)
1423 config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
1424 if (p->wait_on_read)
1425 config1 |= GPMC_CONFIG1_WAIT_READ_MON;
1426 if (p->wait_on_write)
1427 config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
1428 if (p->wait_on_read || p->wait_on_write)
1429 config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
1430 if (p->device_nand)
1431 config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
1432 if (p->mux_add_data)
1433 config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
1434 if (p->burst_read)
1435 config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
1436 if (p->burst_write)
1437 config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
1438 if (p->burst_read || p->burst_write) {
1439 config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
1440 config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
1441 }
1442
1443 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
1444
1445 return 0;
1446}
1447
1448#ifdef CONFIG_OF
1449static const struct of_device_id gpmc_dt_ids[] = {
1450 { .compatible = "ti,omap2420-gpmc" },
1451 { .compatible = "ti,omap2430-gpmc" },
1452 { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
1453 { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
1454 { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
1455 { }
1456};
1457MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
1458
1459/**
1460 * gpmc_read_settings_dt - read gpmc settings from device-tree
1461 * @np: pointer to device-tree node for a gpmc child device
1462 * @p: pointer to gpmc settings structure
1463 *
1464 * Reads the GPMC settings for a GPMC child device from device-tree and
1465 * stores them in the GPMC settings structure passed. The GPMC settings
1466 * structure is initialised to zero by this function and so any
1467 * previously stored settings will be cleared.
1468 */
1469void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
1470{
1471 memset(p, 0, sizeof(struct gpmc_settings));
1472
1473 p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
1474 p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
1475 of_property_read_u32(np, "gpmc,device-width", &p->device_width);
1476 of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
1477
1478 if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
1479 p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
1480 p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
1481 p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
1482 if (!p->burst_read && !p->burst_write)
1483 pr_warn("%s: page/burst-length set but not used!\n",
1484 __func__);
1485 }
1486
1487 if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
1488 p->wait_on_read = of_property_read_bool(np,
1489 "gpmc,wait-on-read");
1490 p->wait_on_write = of_property_read_bool(np,
1491 "gpmc,wait-on-write");
1492 if (!p->wait_on_read && !p->wait_on_write)
1493 pr_debug("%s: rd/wr wait monitoring not enabled!\n",
1494 __func__);
1495 }
1496}
1497
1498static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1499 struct gpmc_timings *gpmc_t)
1500{
1501 struct gpmc_bool_timings *p;
1502
1503 if (!np || !gpmc_t)
1504 return;
1505
1506 memset(gpmc_t, 0, sizeof(*gpmc_t));
1507
1508 /* minimum clock period for syncronous mode */
1509 of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
1510
1511 /* chip select timtings */
1512 of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
1513 of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
1514 of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
1515
1516 /* ADV signal timings */
1517 of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
1518 of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
1519 of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
1520
1521 /* WE signal timings */
1522 of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
1523 of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
1524
1525 /* OE signal timings */
1526 of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
1527 of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
1528
1529 /* access and cycle timings */
1530 of_property_read_u32(np, "gpmc,page-burst-access-ns",
1531 &gpmc_t->page_burst_access);
1532 of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
1533 of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
1534 of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
1535 of_property_read_u32(np, "gpmc,bus-turnaround-ns",
1536 &gpmc_t->bus_turnaround);
1537 of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
1538 &gpmc_t->cycle2cycle_delay);
1539 of_property_read_u32(np, "gpmc,wait-monitoring-ns",
1540 &gpmc_t->wait_monitoring);
1541 of_property_read_u32(np, "gpmc,clk-activation-ns",
1542 &gpmc_t->clk_activation);
1543
1544 /* only applicable to OMAP3+ */
1545 of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
1546 of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
1547 &gpmc_t->wr_data_mux_bus);
1548
1549 /* bool timing parameters */
1550 p = &gpmc_t->bool_timings;
1551
1552 p->cycle2cyclediffcsen =
1553 of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
1554 p->cycle2cyclesamecsen =
1555 of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
1556 p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
1557 p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
1558 p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
1559 p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
1560 p->time_para_granularity =
1561 of_property_read_bool(np, "gpmc,time-para-granularity");
1562}
1563
1564#if IS_ENABLED(CONFIG_MTD_NAND)
1565
1566static const char * const nand_xfer_types[] = {
1567 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1568 [NAND_OMAP_POLLED] = "polled",
1569 [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1570 [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1571};
1572
1573static int gpmc_probe_nand_child(struct platform_device *pdev,
1574 struct device_node *child)
1575{
1576 u32 val;
1577 const char *s;
1578 struct gpmc_timings gpmc_t;
1579 struct omap_nand_platform_data *gpmc_nand_data;
1580
1581 if (of_property_read_u32(child, "reg", &val) < 0) {
1582 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1583 child->full_name);
1584 return -ENODEV;
1585 }
1586
1587 gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
1588 GFP_KERNEL);
1589 if (!gpmc_nand_data)
1590 return -ENOMEM;
1591
1592 gpmc_nand_data->cs = val;
1593 gpmc_nand_data->of_node = child;
1594
1595 /* Detect availability of ELM module */
1596 gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1597 if (gpmc_nand_data->elm_of_node == NULL)
1598 gpmc_nand_data->elm_of_node =
1599 of_parse_phandle(child, "elm_id", 0);
1600
1601 /* select ecc-scheme for NAND */
1602 if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1603 pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
1604 return -ENODEV;
1605 }
1606
1607 if (!strcmp(s, "sw"))
1608 gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1609 else if (!strcmp(s, "ham1") ||
1610 !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
1611 gpmc_nand_data->ecc_opt =
1612 OMAP_ECC_HAM1_CODE_HW;
1613 else if (!strcmp(s, "bch4"))
1614 if (gpmc_nand_data->elm_of_node)
1615 gpmc_nand_data->ecc_opt =
1616 OMAP_ECC_BCH4_CODE_HW;
1617 else
1618 gpmc_nand_data->ecc_opt =
1619 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1620 else if (!strcmp(s, "bch8"))
1621 if (gpmc_nand_data->elm_of_node)
1622 gpmc_nand_data->ecc_opt =
1623 OMAP_ECC_BCH8_CODE_HW;
1624 else
1625 gpmc_nand_data->ecc_opt =
1626 OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1627 else if (!strcmp(s, "bch16"))
1628 if (gpmc_nand_data->elm_of_node)
1629 gpmc_nand_data->ecc_opt =
1630 OMAP_ECC_BCH16_CODE_HW;
1631 else
1632 pr_err("%s: BCH16 requires ELM support\n", __func__);
1633 else
1634 pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
1635
1636 /* select data transfer mode for NAND controller */
1637 if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
1638 for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
1639 if (!strcasecmp(s, nand_xfer_types[val])) {
1640 gpmc_nand_data->xfer_type = val;
1641 break;
1642 }
1643
1644 gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
1645
1646 val = of_get_nand_bus_width(child);
1647 if (val == 16)
1648 gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
1649
1650 gpmc_read_timings_dt(child, &gpmc_t);
1651 gpmc_nand_init(gpmc_nand_data, &gpmc_t);
1652
1653 return 0;
1654}
1655#else
1656static int gpmc_probe_nand_child(struct platform_device *pdev,
1657 struct device_node *child)
1658{
1659 return 0;
1660}
1661#endif
1662
1663#if IS_ENABLED(CONFIG_MTD_ONENAND)
1664static int gpmc_probe_onenand_child(struct platform_device *pdev,
1665 struct device_node *child)
1666{
1667 u32 val;
1668 struct omap_onenand_platform_data *gpmc_onenand_data;
1669
1670 if (of_property_read_u32(child, "reg", &val) < 0) {
1671 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1672 child->full_name);
1673 return -ENODEV;
1674 }
1675
1676 gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1677 GFP_KERNEL);
1678 if (!gpmc_onenand_data)
1679 return -ENOMEM;
1680
1681 gpmc_onenand_data->cs = val;
1682 gpmc_onenand_data->of_node = child;
1683 gpmc_onenand_data->dma_channel = -1;
1684
1685 if (!of_property_read_u32(child, "dma-channel", &val))
1686 gpmc_onenand_data->dma_channel = val;
1687
1688 gpmc_onenand_init(gpmc_onenand_data);
1689
1690 return 0;
1691}
1692#else
1693static int gpmc_probe_onenand_child(struct platform_device *pdev,
1694 struct device_node *child)
1695{
1696 return 0;
1697}
1698#endif
1699
1700/**
1701 * gpmc_probe_generic_child - configures the gpmc for a child device
1702 * @pdev: pointer to gpmc platform device
1703 * @child: pointer to device-tree node for child device
1704 *
1705 * Allocates and configures a GPMC chip-select for a child device.
1706 * Returns 0 on success and appropriate negative error code on failure.
1707 */
1708static int gpmc_probe_generic_child(struct platform_device *pdev,
1709 struct device_node *child)
1710{
1711 struct gpmc_settings gpmc_s;
1712 struct gpmc_timings gpmc_t;
1713 struct resource res;
1714 unsigned long base;
1715 const char *name;
1716 int ret, cs;
1717 u32 val;
1718
1719 if (of_property_read_u32(child, "reg", &cs) < 0) {
1720 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1721 child->full_name);
1722 return -ENODEV;
1723 }
1724
1725 if (of_address_to_resource(child, 0, &res) < 0) {
1726 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1727 child->full_name);
1728 return -ENODEV;
1729 }
1730
1731 /*
1732 * Check if we have multiple instances of the same device
1733 * on a single chip select. If so, use the already initialized
1734 * timings.
1735 */
1736 name = gpmc_cs_get_name(cs);
1737 if (name && child->name && of_node_cmp(child->name, name) == 0)
1738 goto no_timings;
1739
1740 ret = gpmc_cs_request(cs, resource_size(&res), &base);
1741 if (ret < 0) {
1742 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1743 return ret;
1744 }
1745 gpmc_cs_set_name(cs, child->name);
1746
1747 gpmc_read_settings_dt(child, &gpmc_s);
1748 gpmc_read_timings_dt(child, &gpmc_t);
1749
1750 /*
1751 * For some GPMC devices we still need to rely on the bootloader
1752 * timings because the devices can be connected via FPGA.
1753 * REVISIT: Add timing support from slls644g.pdf.
1754 */
1755 if (!gpmc_t.cs_rd_off) {
1756 WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
1757 cs);
1758 gpmc_cs_show_timings(cs,
1759 "please add GPMC bootloader timings to .dts");
1760 goto no_timings;
1761 }
1762
1763 /* CS must be disabled while making changes to gpmc configuration */
1764 gpmc_cs_disable_mem(cs);
1765
1766 /*
1767 * FIXME: gpmc_cs_request() will map the CS to an arbitary
1768 * location in the gpmc address space. When booting with
1769 * device-tree we want the NOR flash to be mapped to the
1770 * location specified in the device-tree blob. So remap the
1771 * CS to this location. Once DT migration is complete should
1772 * just make gpmc_cs_request() map a specific address.
1773 */
1774 ret = gpmc_cs_remap(cs, res.start);
1775 if (ret < 0) {
1776 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
1777 cs, &res.start);
1778 goto err;
1779 }
1780
1781 ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
1782 if (ret < 0)
1783 goto err;
1784
1785 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1786 if (ret < 0)
1787 goto err;
1788
1789 ret = gpmc_cs_set_timings(cs, &gpmc_t);
1790 if (ret) {
1791 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
1792 child->name);
1793 goto err;
1794 }
1795
1796 /* Clear limited address i.e. enable A26-A11 */
1797 val = gpmc_read_reg(GPMC_CONFIG);
1798 val &= ~GPMC_CONFIG_LIMITEDADDRESS;
1799 gpmc_write_reg(GPMC_CONFIG, val);
1800
1801 /* Enable CS region */
1802 gpmc_cs_enable_mem(cs);
1803
1804no_timings:
1805 if (of_platform_device_create(child, NULL, &pdev->dev))
1806 return 0;
1807
1808 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
1809 ret = -ENODEV;
1810
1811err:
1812 gpmc_cs_free(cs);
1813
1814 return ret;
1815}
1816
1817static int gpmc_probe_dt(struct platform_device *pdev)
1818{
1819 int ret;
1820 struct device_node *child;
1821 const struct of_device_id *of_id =
1822 of_match_device(gpmc_dt_ids, &pdev->dev);
1823
1824 if (!of_id)
1825 return 0;
1826
1827 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
1828 &gpmc_cs_num);
1829 if (ret < 0) {
1830 pr_err("%s: number of chip-selects not defined\n", __func__);
1831 return ret;
1832 } else if (gpmc_cs_num < 1) {
1833 pr_err("%s: all chip-selects are disabled\n", __func__);
1834 return -EINVAL;
1835 } else if (gpmc_cs_num > GPMC_CS_NUM) {
1836 pr_err("%s: number of supported chip-selects cannot be > %d\n",
1837 __func__, GPMC_CS_NUM);
1838 return -EINVAL;
1839 }
1840
1841 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
1842 &gpmc_nr_waitpins);
1843 if (ret < 0) {
1844 pr_err("%s: number of wait pins not found!\n", __func__);
1845 return ret;
1846 }
1847
1848 for_each_available_child_of_node(pdev->dev.of_node, child) {
1849
1850 if (!child->name)
1851 continue;
1852
1853 if (of_node_cmp(child->name, "nand") == 0)
1854 ret = gpmc_probe_nand_child(pdev, child);
1855 else if (of_node_cmp(child->name, "onenand") == 0)
1856 ret = gpmc_probe_onenand_child(pdev, child);
1857 else if (of_node_cmp(child->name, "ethernet") == 0 ||
1858 of_node_cmp(child->name, "nor") == 0 ||
1859 of_node_cmp(child->name, "uart") == 0)
1860 ret = gpmc_probe_generic_child(pdev, child);
1861
1862 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
1863 __func__, child->full_name))
1864 of_node_put(child);
1865 }
1866
1867 return 0;
1868}
1869#else
1870static int gpmc_probe_dt(struct platform_device *pdev)
1871{
1872 return 0;
1873}
1874#endif
1875
1876static int gpmc_probe(struct platform_device *pdev)
1877{
1878 int rc;
1879 u32 l;
1880 struct resource *res;
1881
1882 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1883 if (res == NULL)
1884 return -ENOENT;
1885
1886 phys_base = res->start;
1887 mem_size = resource_size(res);
1888
1889 gpmc_base = devm_ioremap_resource(&pdev->dev, res);
1890 if (IS_ERR(gpmc_base))
1891 return PTR_ERR(gpmc_base);
1892
1893 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1894 if (res == NULL)
1895 dev_warn(&pdev->dev, "Failed to get resource: irq\n");
1896 else
1897 gpmc_irq = res->start;
1898
1899 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
1900 if (IS_ERR(gpmc_l3_clk)) {
1901 dev_err(&pdev->dev, "Failed to get GPMC fck\n");
1902 gpmc_irq = 0;
1903 return PTR_ERR(gpmc_l3_clk);
1904 }
1905
1906 if (!clk_get_rate(gpmc_l3_clk)) {
1907 dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
1908 return -EINVAL;
1909 }
1910
1911 pm_runtime_enable(&pdev->dev);
1912 pm_runtime_get_sync(&pdev->dev);
1913
1914 gpmc_dev = &pdev->dev;
1915
1916 l = gpmc_read_reg(GPMC_REVISION);
1917
1918 /*
1919 * FIXME: Once device-tree migration is complete the below flags
1920 * should be populated based upon the device-tree compatible
1921 * string. For now just use the IP revision. OMAP3+ devices have
1922 * the wr_access and wr_data_mux_bus register fields. OMAP4+
1923 * devices support the addr-addr-data multiplex protocol.
1924 *
1925 * GPMC IP revisions:
1926 * - OMAP24xx = 2.0
1927 * - OMAP3xxx = 5.0
1928 * - OMAP44xx/54xx/AM335x = 6.0
1929 */
1930 if (GPMC_REVISION_MAJOR(l) > 0x4)
1931 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
1932 if (GPMC_REVISION_MAJOR(l) > 0x5)
1933 gpmc_capability |= GPMC_HAS_MUX_AAD;
1934 dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
1935 GPMC_REVISION_MINOR(l));
1936
1937 gpmc_mem_init();
1938
1939 if (gpmc_setup_irq() < 0)
1940 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1941
1942 if (!pdev->dev.of_node) {
1943 gpmc_cs_num = GPMC_CS_NUM;
1944 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
1945 }
1946
1947 rc = gpmc_probe_dt(pdev);
1948 if (rc < 0) {
1949 pm_runtime_put_sync(&pdev->dev);
1950 dev_err(gpmc_dev, "failed to probe DT parameters\n");
1951 return rc;
1952 }
1953
1954 return 0;
1955}
1956
1957static int gpmc_remove(struct platform_device *pdev)
1958{
1959 gpmc_free_irq();
1960 gpmc_mem_exit();
1961 pm_runtime_put_sync(&pdev->dev);
1962 pm_runtime_disable(&pdev->dev);
1963 gpmc_dev = NULL;
1964 return 0;
1965}
1966
1967#ifdef CONFIG_PM_SLEEP
1968static int gpmc_suspend(struct device *dev)
1969{
1970 omap3_gpmc_save_context();
1971 pm_runtime_put_sync(dev);
1972 return 0;
1973}
1974
1975static int gpmc_resume(struct device *dev)
1976{
1977 pm_runtime_get_sync(dev);
1978 omap3_gpmc_restore_context();
1979 return 0;
1980}
1981#endif
1982
1983static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
1984
1985static struct platform_driver gpmc_driver = {
1986 .probe = gpmc_probe,
1987 .remove = gpmc_remove,
1988 .driver = {
1989 .name = DEVICE_NAME,
1990 .owner = THIS_MODULE,
1991 .of_match_table = of_match_ptr(gpmc_dt_ids),
1992 .pm = &gpmc_pm_ops,
1993 },
1994};
1995
1996static __init int gpmc_init(void)
1997{
1998 return platform_driver_register(&gpmc_driver);
1999}
2000
2001static __exit void gpmc_exit(void)
2002{
2003 platform_driver_unregister(&gpmc_driver);
2004
2005}
2006
2007postcore_initcall(gpmc_init);
2008module_exit(gpmc_exit);
2009
2010static irqreturn_t gpmc_handle_irq(int irq, void *dev)
2011{
2012 int i;
2013 u32 regval;
2014
2015 regval = gpmc_read_reg(GPMC_IRQSTATUS);
2016
2017 if (!regval)
2018 return IRQ_NONE;
2019
2020 for (i = 0; i < GPMC_NR_IRQ; i++)
2021 if (regval & gpmc_client_irq[i].bitmask)
2022 generic_handle_irq(gpmc_client_irq[i].irq);
2023
2024 gpmc_write_reg(GPMC_IRQSTATUS, regval);
2025
2026 return IRQ_HANDLED;
2027}
2028
2029static struct omap3_gpmc_regs gpmc_context;
2030
2031void omap3_gpmc_save_context(void)
2032{
2033 int i;
2034
2035 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2036 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2037 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
2038 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
2039 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
2040 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
2041 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
2042 for (i = 0; i < gpmc_cs_num; i++) {
2043 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
2044 if (gpmc_context.cs_context[i].is_valid) {
2045 gpmc_context.cs_context[i].config1 =
2046 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
2047 gpmc_context.cs_context[i].config2 =
2048 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
2049 gpmc_context.cs_context[i].config3 =
2050 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
2051 gpmc_context.cs_context[i].config4 =
2052 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
2053 gpmc_context.cs_context[i].config5 =
2054 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
2055 gpmc_context.cs_context[i].config6 =
2056 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
2057 gpmc_context.cs_context[i].config7 =
2058 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
2059 }
2060 }
2061}
2062
2063void omap3_gpmc_restore_context(void)
2064{
2065 int i;
2066
2067 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2068 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2069 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
2070 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
2071 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
2072 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
2073 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
2074 for (i = 0; i < gpmc_cs_num; i++) {
2075 if (gpmc_context.cs_context[i].is_valid) {
2076 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
2077 gpmc_context.cs_context[i].config1);
2078 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
2079 gpmc_context.cs_context[i].config2);
2080 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
2081 gpmc_context.cs_context[i].config3);
2082 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
2083 gpmc_context.cs_context[i].config4);
2084 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
2085 gpmc_context.cs_context[i].config5);
2086 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
2087 gpmc_context.cs_context[i].config6);
2088 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
2089 gpmc_context.cs_context[i].config7);
2090 }
2091 }
2092}
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
new file mode 100644
index 000000000000..571087621827
--- /dev/null
+++ b/drivers/memory/tegra/Kconfig
@@ -0,0 +1,7 @@
1config TEGRA_MC
2 bool "NVIDIA Tegra Memory Controller support"
3 default y
4 depends on ARCH_TEGRA
5 help
6 This driver supports the Memory Controller (MC) hardware found on
7 NVIDIA Tegra SoCs.
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
new file mode 100644
index 000000000000..0d9f497b786c
--- /dev/null
+++ b/drivers/memory/tegra/Makefile
@@ -0,0 +1,7 @@
1tegra-mc-y := mc.o
2
3tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
4tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
5tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
6
7obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
new file mode 100644
index 000000000000..fe3c44e7e1d1
--- /dev/null
+++ b/drivers/memory/tegra/mc.c
@@ -0,0 +1,301 @@
1/*
2 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16
17#include "mc.h"
18
19#define MC_INTSTATUS 0x000
20#define MC_INT_DECERR_MTS (1 << 16)
21#define MC_INT_SECERR_SEC (1 << 13)
22#define MC_INT_DECERR_VPR (1 << 12)
23#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
24#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
25#define MC_INT_ARBITRATION_EMEM (1 << 9)
26#define MC_INT_SECURITY_VIOLATION (1 << 8)
27#define MC_INT_DECERR_EMEM (1 << 6)
28
29#define MC_INTMASK 0x004
30
31#define MC_ERR_STATUS 0x08
32#define MC_ERR_STATUS_TYPE_SHIFT 28
33#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (6 << MC_ERR_STATUS_TYPE_SHIFT)
34#define MC_ERR_STATUS_TYPE_MASK (0x7 << MC_ERR_STATUS_TYPE_SHIFT)
35#define MC_ERR_STATUS_READABLE (1 << 27)
36#define MC_ERR_STATUS_WRITABLE (1 << 26)
37#define MC_ERR_STATUS_NONSECURE (1 << 25)
38#define MC_ERR_STATUS_ADR_HI_SHIFT 20
39#define MC_ERR_STATUS_ADR_HI_MASK 0x3
40#define MC_ERR_STATUS_SECURITY (1 << 17)
41#define MC_ERR_STATUS_RW (1 << 16)
42#define MC_ERR_STATUS_CLIENT_MASK 0x7f
43
44#define MC_ERR_ADR 0x0c
45
46#define MC_EMEM_ARB_CFG 0x90
47#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) (((x) & 0x1ff) << 0)
48#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
49#define MC_EMEM_ARB_MISC0 0xd8
50
51static const struct of_device_id tegra_mc_of_match[] = {
52#ifdef CONFIG_ARCH_TEGRA_3x_SOC
53 { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
54#endif
55#ifdef CONFIG_ARCH_TEGRA_114_SOC
56 { .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
57#endif
58#ifdef CONFIG_ARCH_TEGRA_124_SOC
59 { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
60#endif
61 { }
62};
63MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
64
65static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
66{
67 unsigned long long tick;
68 unsigned int i;
69 u32 value;
70
71 /* compute the number of MC clock cycles per tick */
72 tick = mc->tick * clk_get_rate(mc->clk);
73 do_div(tick, NSEC_PER_SEC);
74
75 value = readl(mc->regs + MC_EMEM_ARB_CFG);
76 value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
77 value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
78 writel(value, mc->regs + MC_EMEM_ARB_CFG);
79
80 /* write latency allowance defaults */
81 for (i = 0; i < mc->soc->num_clients; i++) {
82 const struct tegra_mc_la *la = &mc->soc->clients[i].la;
83 u32 value;
84
85 value = readl(mc->regs + la->reg);
86 value &= ~(la->mask << la->shift);
87 value |= (la->def & la->mask) << la->shift;
88 writel(value, mc->regs + la->reg);
89 }
90
91 return 0;
92}
93
94static const char *const status_names[32] = {
95 [ 1] = "External interrupt",
96 [ 6] = "EMEM address decode error",
97 [ 8] = "Security violation",
98 [ 9] = "EMEM arbitration error",
99 [10] = "Page fault",
100 [11] = "Invalid APB ASID update",
101 [12] = "VPR violation",
102 [13] = "Secure carveout violation",
103 [16] = "MTS carveout violation",
104};
105
106static const char *const error_names[8] = {
107 [2] = "EMEM decode error",
108 [3] = "TrustZone violation",
109 [4] = "Carveout violation",
110 [6] = "SMMU translation error",
111};
112
113static irqreturn_t tegra_mc_irq(int irq, void *data)
114{
115 struct tegra_mc *mc = data;
116 unsigned long status, mask;
117 unsigned int bit;
118
119 /* mask all interrupts to avoid flooding */
120 status = mc_readl(mc, MC_INTSTATUS);
121 mask = mc_readl(mc, MC_INTMASK);
122
123 for_each_set_bit(bit, &status, 32) {
124 const char *error = status_names[bit] ?: "unknown";
125 const char *client = "unknown", *desc;
126 const char *direction, *secure;
127 phys_addr_t addr = 0;
128 unsigned int i;
129 char perm[7];
130 u8 id, type;
131 u32 value;
132
133 value = mc_readl(mc, MC_ERR_STATUS);
134
135#ifdef CONFIG_PHYS_ADDR_T_64BIT
136 if (mc->soc->num_address_bits > 32) {
137 addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
138 MC_ERR_STATUS_ADR_HI_MASK);
139 addr <<= 32;
140 }
141#endif
142
143 if (value & MC_ERR_STATUS_RW)
144 direction = "write";
145 else
146 direction = "read";
147
148 if (value & MC_ERR_STATUS_SECURITY)
149 secure = "secure ";
150 else
151 secure = "";
152
153 id = value & MC_ERR_STATUS_CLIENT_MASK;
154
155 for (i = 0; i < mc->soc->num_clients; i++) {
156 if (mc->soc->clients[i].id == id) {
157 client = mc->soc->clients[i].name;
158 break;
159 }
160 }
161
162 type = (value & MC_ERR_STATUS_TYPE_MASK) >>
163 MC_ERR_STATUS_TYPE_SHIFT;
164 desc = error_names[type];
165
166 switch (value & MC_ERR_STATUS_TYPE_MASK) {
167 case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
168 perm[0] = ' ';
169 perm[1] = '[';
170
171 if (value & MC_ERR_STATUS_READABLE)
172 perm[2] = 'R';
173 else
174 perm[2] = '-';
175
176 if (value & MC_ERR_STATUS_WRITABLE)
177 perm[3] = 'W';
178 else
179 perm[3] = '-';
180
181 if (value & MC_ERR_STATUS_NONSECURE)
182 perm[4] = '-';
183 else
184 perm[4] = 'S';
185
186 perm[5] = ']';
187 perm[6] = '\0';
188 break;
189
190 default:
191 perm[0] = '\0';
192 break;
193 }
194
195 value = mc_readl(mc, MC_ERR_ADR);
196 addr |= value;
197
198 dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
199 client, secure, direction, &addr, error,
200 desc, perm);
201 }
202
203 /* clear interrupts */
204 mc_writel(mc, status, MC_INTSTATUS);
205
206 return IRQ_HANDLED;
207}
208
209static int tegra_mc_probe(struct platform_device *pdev)
210{
211 const struct of_device_id *match;
212 struct resource *res;
213 struct tegra_mc *mc;
214 u32 value;
215 int err;
216
217 match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
218 if (!match)
219 return -ENODEV;
220
221 mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
222 if (!mc)
223 return -ENOMEM;
224
225 platform_set_drvdata(pdev, mc);
226 mc->soc = match->data;
227 mc->dev = &pdev->dev;
228
229 /* length of MC tick in nanoseconds */
230 mc->tick = 30;
231
232 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
233 mc->regs = devm_ioremap_resource(&pdev->dev, res);
234 if (IS_ERR(mc->regs))
235 return PTR_ERR(mc->regs);
236
237 mc->clk = devm_clk_get(&pdev->dev, "mc");
238 if (IS_ERR(mc->clk)) {
239 dev_err(&pdev->dev, "failed to get MC clock: %ld\n",
240 PTR_ERR(mc->clk));
241 return PTR_ERR(mc->clk);
242 }
243
244 err = tegra_mc_setup_latency_allowance(mc);
245 if (err < 0) {
246 dev_err(&pdev->dev, "failed to setup latency allowance: %d\n",
247 err);
248 return err;
249 }
250
251 if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) {
252 mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
253 if (IS_ERR(mc->smmu)) {
254 dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
255 PTR_ERR(mc->smmu));
256 return PTR_ERR(mc->smmu);
257 }
258 }
259
260 mc->irq = platform_get_irq(pdev, 0);
261 if (mc->irq < 0) {
262 dev_err(&pdev->dev, "interrupt not specified\n");
263 return mc->irq;
264 }
265
266 err = devm_request_irq(&pdev->dev, mc->irq, tegra_mc_irq, IRQF_SHARED,
267 dev_name(&pdev->dev), mc);
268 if (err < 0) {
269 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
270 err);
271 return err;
272 }
273
274 value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
275 MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
276 MC_INT_ARBITRATION_EMEM | MC_INT_SECURITY_VIOLATION |
277 MC_INT_DECERR_EMEM;
278 mc_writel(mc, value, MC_INTMASK);
279
280 return 0;
281}
282
283static struct platform_driver tegra_mc_driver = {
284 .driver = {
285 .name = "tegra-mc",
286 .of_match_table = tegra_mc_of_match,
287 .suppress_bind_attrs = true,
288 },
289 .prevent_deferred_probe = true,
290 .probe = tegra_mc_probe,
291};
292
293static int tegra_mc_init(void)
294{
295 return platform_driver_register(&tegra_mc_driver);
296}
297arch_initcall(tegra_mc_init);
298
299MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
300MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
301MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
new file mode 100644
index 000000000000..d5d21147fc77
--- /dev/null
+++ b/drivers/memory/tegra/mc.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef MEMORY_TEGRA_MC_H
10#define MEMORY_TEGRA_MC_H
11
12#include <linux/io.h>
13#include <linux/types.h>
14
15#include <soc/tegra/mc.h>
16
17static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
18{
19 return readl(mc->regs + offset);
20}
21
22static inline void mc_writel(struct tegra_mc *mc, u32 value,
23 unsigned long offset)
24{
25 writel(value, mc->regs + offset);
26}
27
28#ifdef CONFIG_ARCH_TEGRA_3x_SOC
29extern const struct tegra_mc_soc tegra30_mc_soc;
30#endif
31
32#ifdef CONFIG_ARCH_TEGRA_114_SOC
33extern const struct tegra_mc_soc tegra114_mc_soc;
34#endif
35
36#ifdef CONFIG_ARCH_TEGRA_124_SOC
37extern const struct tegra_mc_soc tegra124_mc_soc;
38#endif
39
40#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
new file mode 100644
index 000000000000..511e9a25c151
--- /dev/null
+++ b/drivers/memory/tegra/tegra114.c
@@ -0,0 +1,948 @@
1/*
2 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/of.h>
10#include <linux/mm.h>
11
12#include <asm/cacheflush.h>
13
14#include <dt-bindings/memory/tegra114-mc.h>
15
16#include "mc.h"
17
18static const struct tegra_mc_client tegra114_mc_clients[] = {
19 {
20 .id = 0x00,
21 .name = "ptcr",
22 .swgroup = TEGRA_SWGROUP_PTC,
23 }, {
24 .id = 0x01,
25 .name = "display0a",
26 .swgroup = TEGRA_SWGROUP_DC,
27 .smmu = {
28 .reg = 0x228,
29 .bit = 1,
30 },
31 .la = {
32 .reg = 0x2e8,
33 .shift = 0,
34 .mask = 0xff,
35 .def = 0x4e,
36 },
37 }, {
38 .id = 0x02,
39 .name = "display0ab",
40 .swgroup = TEGRA_SWGROUP_DCB,
41 .smmu = {
42 .reg = 0x228,
43 .bit = 2,
44 },
45 .la = {
46 .reg = 0x2f4,
47 .shift = 0,
48 .mask = 0xff,
49 .def = 0x4e,
50 },
51 }, {
52 .id = 0x03,
53 .name = "display0b",
54 .swgroup = TEGRA_SWGROUP_DC,
55 .smmu = {
56 .reg = 0x228,
57 .bit = 3,
58 },
59 .la = {
60 .reg = 0x2e8,
61 .shift = 16,
62 .mask = 0xff,
63 .def = 0x4e,
64 },
65 }, {
66 .id = 0x04,
67 .name = "display0bb",
68 .swgroup = TEGRA_SWGROUP_DCB,
69 .smmu = {
70 .reg = 0x228,
71 .bit = 4,
72 },
73 .la = {
74 .reg = 0x2f4,
75 .shift = 16,
76 .mask = 0xff,
77 .def = 0x4e,
78 },
79 }, {
80 .id = 0x05,
81 .name = "display0c",
82 .swgroup = TEGRA_SWGROUP_DC,
83 .smmu = {
84 .reg = 0x228,
85 .bit = 5,
86 },
87 .la = {
88 .reg = 0x2ec,
89 .shift = 0,
90 .mask = 0xff,
91 .def = 0x4e,
92 },
93 }, {
94 .id = 0x06,
95 .name = "display0cb",
96 .swgroup = TEGRA_SWGROUP_DCB,
97 .smmu = {
98 .reg = 0x228,
99 .bit = 6,
100 },
101 .la = {
102 .reg = 0x2f8,
103 .shift = 0,
104 .mask = 0xff,
105 .def = 0x4e,
106 },
107 }, {
108 .id = 0x09,
109 .name = "eppup",
110 .swgroup = TEGRA_SWGROUP_EPP,
111 .smmu = {
112 .reg = 0x228,
113 .bit = 9,
114 },
115 .la = {
116 .reg = 0x300,
117 .shift = 0,
118 .mask = 0xff,
119 .def = 0x33,
120 },
121 }, {
122 .id = 0x0a,
123 .name = "g2pr",
124 .swgroup = TEGRA_SWGROUP_G2,
125 .smmu = {
126 .reg = 0x228,
127 .bit = 10,
128 },
129 .la = {
130 .reg = 0x308,
131 .shift = 0,
132 .mask = 0xff,
133 .def = 0x09,
134 },
135 }, {
136 .id = 0x0b,
137 .name = "g2sr",
138 .swgroup = TEGRA_SWGROUP_G2,
139 .smmu = {
140 .reg = 0x228,
141 .bit = 11,
142 },
143 .la = {
144 .reg = 0x308,
145 .shift = 16,
146 .mask = 0xff,
147 .def = 0x09,
148 },
149 }, {
150 .id = 0x0f,
151 .name = "avpcarm7r",
152 .swgroup = TEGRA_SWGROUP_AVPC,
153 .smmu = {
154 .reg = 0x228,
155 .bit = 15,
156 },
157 .la = {
158 .reg = 0x2e4,
159 .shift = 0,
160 .mask = 0xff,
161 .def = 0x04,
162 },
163 }, {
164 .id = 0x10,
165 .name = "displayhc",
166 .swgroup = TEGRA_SWGROUP_DC,
167 .smmu = {
168 .reg = 0x228,
169 .bit = 16,
170 },
171 .la = {
172 .reg = 0x2f0,
173 .shift = 0,
174 .mask = 0xff,
175 .def = 0x68,
176 },
177 }, {
178 .id = 0x11,
179 .name = "displayhcb",
180 .swgroup = TEGRA_SWGROUP_DCB,
181 .smmu = {
182 .reg = 0x228,
183 .bit = 17,
184 },
185 .la = {
186 .reg = 0x2fc,
187 .shift = 0,
188 .mask = 0xff,
189 .def = 0x68,
190 },
191 }, {
192 .id = 0x12,
193 .name = "fdcdrd",
194 .swgroup = TEGRA_SWGROUP_NV,
195 .smmu = {
196 .reg = 0x228,
197 .bit = 18,
198 },
199 .la = {
200 .reg = 0x334,
201 .shift = 0,
202 .mask = 0xff,
203 .def = 0x0c,
204 },
205 }, {
206 .id = 0x13,
207 .name = "fdcdrd2",
208 .swgroup = TEGRA_SWGROUP_NV,
209 .smmu = {
210 .reg = 0x228,
211 .bit = 19,
212 },
213 .la = {
214 .reg = 0x33c,
215 .shift = 0,
216 .mask = 0xff,
217 .def = 0x0c,
218 },
219 }, {
220 .id = 0x14,
221 .name = "g2dr",
222 .swgroup = TEGRA_SWGROUP_G2,
223 .smmu = {
224 .reg = 0x228,
225 .bit = 20,
226 },
227 .la = {
228 .reg = 0x30c,
229 .shift = 0,
230 .mask = 0xff,
231 .def = 0x0a,
232 },
233 }, {
234 .id = 0x15,
235 .name = "hdar",
236 .swgroup = TEGRA_SWGROUP_HDA,
237 .smmu = {
238 .reg = 0x228,
239 .bit = 21,
240 },
241 .la = {
242 .reg = 0x318,
243 .shift = 0,
244 .mask = 0xff,
245 .def = 0xff,
246 },
247 }, {
248 .id = 0x16,
249 .name = "host1xdmar",
250 .swgroup = TEGRA_SWGROUP_HC,
251 .smmu = {
252 .reg = 0x228,
253 .bit = 22,
254 },
255 .la = {
256 .reg = 0x310,
257 .shift = 0,
258 .mask = 0xff,
259 .def = 0x10,
260 },
261 }, {
262 .id = 0x17,
263 .name = "host1xr",
264 .swgroup = TEGRA_SWGROUP_HC,
265 .smmu = {
266 .reg = 0x228,
267 .bit = 23,
268 },
269 .la = {
270 .reg = 0x310,
271 .shift = 16,
272 .mask = 0xff,
273 .def = 0xa5,
274 },
275 }, {
276 .id = 0x18,
277 .name = "idxsrd",
278 .swgroup = TEGRA_SWGROUP_NV,
279 .smmu = {
280 .reg = 0x228,
281 .bit = 24,
282 },
283 .la = {
284 .reg = 0x334,
285 .shift = 16,
286 .mask = 0xff,
287 .def = 0x0b,
288 },
289 }, {
290 .id = 0x1c,
291 .name = "msencsrd",
292 .swgroup = TEGRA_SWGROUP_MSENC,
293 .smmu = {
294 .reg = 0x228,
295 .bit = 28,
296 },
297 .la = {
298 .reg = 0x328,
299 .shift = 0,
300 .mask = 0xff,
301 .def = 0x80,
302 },
303 }, {
304 .id = 0x1d,
305 .name = "ppcsahbdmar",
306 .swgroup = TEGRA_SWGROUP_PPCS,
307 .smmu = {
308 .reg = 0x228,
309 .bit = 29,
310 },
311 .la = {
312 .reg = 0x344,
313 .shift = 0,
314 .mask = 0xff,
315 .def = 0x50,
316 },
317 }, {
318 .id = 0x1e,
319 .name = "ppcsahbslvr",
320 .swgroup = TEGRA_SWGROUP_PPCS,
321 .smmu = {
322 .reg = 0x228,
323 .bit = 30,
324 },
325 .la = {
326 .reg = 0x344,
327 .shift = 16,
328 .mask = 0xff,
329 .def = 0xe8,
330 },
331 }, {
332 .id = 0x20,
333 .name = "texl2srd",
334 .swgroup = TEGRA_SWGROUP_NV,
335 .smmu = {
336 .reg = 0x22c,
337 .bit = 0,
338 },
339 .la = {
340 .reg = 0x338,
341 .shift = 0,
342 .mask = 0xff,
343 .def = 0x0c,
344 },
345 }, {
346 .id = 0x22,
347 .name = "vdebsevr",
348 .swgroup = TEGRA_SWGROUP_VDE,
349 .smmu = {
350 .reg = 0x22c,
351 .bit = 2,
352 },
353 .la = {
354 .reg = 0x354,
355 .shift = 0,
356 .mask = 0xff,
357 .def = 0xff,
358 },
359 }, {
360 .id = 0x23,
361 .name = "vdember",
362 .swgroup = TEGRA_SWGROUP_VDE,
363 .smmu = {
364 .reg = 0x22c,
365 .bit = 3,
366 },
367 .la = {
368 .reg = 0x354,
369 .shift = 16,
370 .mask = 0xff,
371 .def = 0xff,
372 },
373 }, {
374 .id = 0x24,
375 .name = "vdemcer",
376 .swgroup = TEGRA_SWGROUP_VDE,
377 .smmu = {
378 .reg = 0x22c,
379 .bit = 4,
380 },
381 .la = {
382 .reg = 0x358,
383 .shift = 0,
384 .mask = 0xff,
385 .def = 0xb8,
386 },
387 }, {
388 .id = 0x25,
389 .name = "vdetper",
390 .swgroup = TEGRA_SWGROUP_VDE,
391 .smmu = {
392 .reg = 0x22c,
393 .bit = 5,
394 },
395 .la = {
396 .reg = 0x358,
397 .shift = 16,
398 .mask = 0xff,
399 .def = 0xee,
400 },
401 }, {
402 .id = 0x26,
403 .name = "mpcorelpr",
404 .swgroup = TEGRA_SWGROUP_MPCORELP,
405 .la = {
406 .reg = 0x324,
407 .shift = 0,
408 .mask = 0xff,
409 .def = 0x04,
410 },
411 }, {
412 .id = 0x27,
413 .name = "mpcorer",
414 .swgroup = TEGRA_SWGROUP_MPCORE,
415 .la = {
416 .reg = 0x320,
417 .shift = 0,
418 .mask = 0xff,
419 .def = 0x04,
420 },
421 }, {
422 .id = 0x28,
423 .name = "eppu",
424 .swgroup = TEGRA_SWGROUP_EPP,
425 .smmu = {
426 .reg = 0x22c,
427 .bit = 8,
428 },
429 .la = {
430 .reg = 0x300,
431 .shift = 16,
432 .mask = 0xff,
433 .def = 0x33,
434 },
435 }, {
436 .id = 0x29,
437 .name = "eppv",
438 .swgroup = TEGRA_SWGROUP_EPP,
439 .smmu = {
440 .reg = 0x22c,
441 .bit = 9,
442 },
443 .la = {
444 .reg = 0x304,
445 .shift = 0,
446 .mask = 0xff,
447 .def = 0x6c,
448 },
449 }, {
450 .id = 0x2a,
451 .name = "eppy",
452 .swgroup = TEGRA_SWGROUP_EPP,
453 .smmu = {
454 .reg = 0x22c,
455 .bit = 10,
456 },
457 .la = {
458 .reg = 0x304,
459 .shift = 16,
460 .mask = 0xff,
461 .def = 0x6c,
462 },
463 }, {
464 .id = 0x2b,
465 .name = "msencswr",
466 .swgroup = TEGRA_SWGROUP_MSENC,
467 .smmu = {
468 .reg = 0x22c,
469 .bit = 11,
470 },
471 .la = {
472 .reg = 0x328,
473 .shift = 16,
474 .mask = 0xff,
475 .def = 0x80,
476 },
477 }, {
478 .id = 0x2c,
479 .name = "viwsb",
480 .swgroup = TEGRA_SWGROUP_VI,
481 .smmu = {
482 .reg = 0x22c,
483 .bit = 12,
484 },
485 .la = {
486 .reg = 0x364,
487 .shift = 0,
488 .mask = 0xff,
489 .def = 0x47,
490 },
491 }, {
492 .id = 0x2d,
493 .name = "viwu",
494 .swgroup = TEGRA_SWGROUP_VI,
495 .smmu = {
496 .reg = 0x22c,
497 .bit = 13,
498 },
499 .la = {
500 .reg = 0x368,
501 .shift = 0,
502 .mask = 0xff,
503 .def = 0xff,
504 },
505 }, {
506 .id = 0x2e,
507 .name = "viwv",
508 .swgroup = TEGRA_SWGROUP_VI,
509 .smmu = {
510 .reg = 0x22c,
511 .bit = 14,
512 },
513 .la = {
514 .reg = 0x368,
515 .shift = 16,
516 .mask = 0xff,
517 .def = 0xff,
518 },
519 }, {
520 .id = 0x2f,
521 .name = "viwy",
522 .swgroup = TEGRA_SWGROUP_VI,
523 .smmu = {
524 .reg = 0x22c,
525 .bit = 15,
526 },
527 .la = {
528 .reg = 0x36c,
529 .shift = 0,
530 .mask = 0xff,
531 .def = 0x47,
532 },
533 }, {
534 .id = 0x30,
535 .name = "g2dw",
536 .swgroup = TEGRA_SWGROUP_G2,
537 .smmu = {
538 .reg = 0x22c,
539 .bit = 16,
540 },
541 .la = {
542 .reg = 0x30c,
543 .shift = 16,
544 .mask = 0xff,
545 .def = 0x9,
546 },
547 }, {
548 .id = 0x32,
549 .name = "avpcarm7w",
550 .swgroup = TEGRA_SWGROUP_AVPC,
551 .smmu = {
552 .reg = 0x22c,
553 .bit = 18,
554 },
555 .la = {
556 .reg = 0x2e4,
557 .shift = 16,
558 .mask = 0xff,
559 .def = 0x0e,
560 },
561 }, {
562 .id = 0x33,
563 .name = "fdcdwr",
564 .swgroup = TEGRA_SWGROUP_NV,
565 .smmu = {
566 .reg = 0x22c,
567 .bit = 19,
568 },
569 .la = {
570 .reg = 0x338,
571 .shift = 16,
572 .mask = 0xff,
573 .def = 0x10,
574 },
575 }, {
576 .id = 0x34,
577 .name = "fdcwr2",
578 .swgroup = TEGRA_SWGROUP_NV,
579 .smmu = {
580 .reg = 0x22c,
581 .bit = 20,
582 },
583 .la = {
584 .reg = 0x340,
585 .shift = 0,
586 .mask = 0xff,
587 .def = 0x10,
588 },
589 }, {
590 .id = 0x35,
591 .name = "hdaw",
592 .swgroup = TEGRA_SWGROUP_HDA,
593 .smmu = {
594 .reg = 0x22c,
595 .bit = 21,
596 },
597 .la = {
598 .reg = 0x318,
599 .shift = 16,
600 .mask = 0xff,
601 .def = 0xff,
602 },
603 }, {
604 .id = 0x36,
605 .name = "host1xw",
606 .swgroup = TEGRA_SWGROUP_HC,
607 .smmu = {
608 .reg = 0x22c,
609 .bit = 22,
610 },
611 .la = {
612 .reg = 0x314,
613 .shift = 0,
614 .mask = 0xff,
615 .def = 0x25,
616 },
617 }, {
618 .id = 0x37,
619 .name = "ispw",
620 .swgroup = TEGRA_SWGROUP_ISP,
621 .smmu = {
622 .reg = 0x22c,
623 .bit = 23,
624 },
625 .la = {
626 .reg = 0x31c,
627 .shift = 0,
628 .mask = 0xff,
629 .def = 0xff,
630 },
631 }, {
632 .id = 0x38,
633 .name = "mpcorelpw",
634 .swgroup = TEGRA_SWGROUP_MPCORELP,
635 .la = {
636 .reg = 0x324,
637 .shift = 16,
638 .mask = 0xff,
639 .def = 0x80,
640 },
641 }, {
642 .id = 0x39,
643 .name = "mpcorew",
644 .swgroup = TEGRA_SWGROUP_MPCORE,
645 .la = {
646 .reg = 0x320,
647 .shift = 16,
648 .mask = 0xff,
649 .def = 0x0e,
650 },
651 }, {
652 .id = 0x3b,
653 .name = "ppcsahbdmaw",
654 .swgroup = TEGRA_SWGROUP_PPCS,
655 .smmu = {
656 .reg = 0x22c,
657 .bit = 27,
658 },
659 .la = {
660 .reg = 0x348,
661 .shift = 0,
662 .mask = 0xff,
663 .def = 0xa5,
664 },
665 }, {
666 .id = 0x3c,
667 .name = "ppcsahbslvw",
668 .swgroup = TEGRA_SWGROUP_PPCS,
669 .smmu = {
670 .reg = 0x22c,
671 .bit = 28,
672 },
673 .la = {
674 .reg = 0x348,
675 .shift = 16,
676 .mask = 0xff,
677 .def = 0xe8,
678 },
679 }, {
680 .id = 0x3e,
681 .name = "vdebsevw",
682 .swgroup = TEGRA_SWGROUP_VDE,
683 .smmu = {
684 .reg = 0x22c,
685 .bit = 30,
686 },
687 .la = {
688 .reg = 0x35c,
689 .shift = 0,
690 .mask = 0xff,
691 .def = 0xff,
692 },
693 }, {
694 .id = 0x3f,
695 .name = "vdedbgw",
696 .swgroup = TEGRA_SWGROUP_VDE,
697 .smmu = {
698 .reg = 0x22c,
699 .bit = 31,
700 },
701 .la = {
702 .reg = 0x35c,
703 .shift = 16,
704 .mask = 0xff,
705 .def = 0xff,
706 },
707 }, {
708 .id = 0x40,
709 .name = "vdembew",
710 .swgroup = TEGRA_SWGROUP_VDE,
711 .smmu = {
712 .reg = 0x230,
713 .bit = 0,
714 },
715 .la = {
716 .reg = 0x360,
717 .shift = 0,
718 .mask = 0xff,
719 .def = 0x89,
720 },
721 }, {
722 .id = 0x41,
723 .name = "vdetpmw",
724 .swgroup = TEGRA_SWGROUP_VDE,
725 .smmu = {
726 .reg = 0x230,
727 .bit = 1,
728 },
729 .la = {
730 .reg = 0x360,
731 .shift = 16,
732 .mask = 0xff,
733 .def = 0x59,
734 },
735 }, {
736 .id = 0x4a,
737 .name = "xusb_hostr",
738 .swgroup = TEGRA_SWGROUP_XUSB_HOST,
739 .smmu = {
740 .reg = 0x230,
741 .bit = 10,
742 },
743 .la = {
744 .reg = 0x37c,
745 .shift = 0,
746 .mask = 0xff,
747 .def = 0xa5,
748 },
749 }, {
750 .id = 0x4b,
751 .name = "xusb_hostw",
752 .swgroup = TEGRA_SWGROUP_XUSB_HOST,
753 .smmu = {
754 .reg = 0x230,
755 .bit = 11,
756 },
757 .la = {
758 .reg = 0x37c,
759 .shift = 16,
760 .mask = 0xff,
761 .def = 0xa5,
762 },
763 }, {
764 .id = 0x4c,
765 .name = "xusb_devr",
766 .swgroup = TEGRA_SWGROUP_XUSB_DEV,
767 .smmu = {
768 .reg = 0x230,
769 .bit = 12,
770 },
771 .la = {
772 .reg = 0x380,
773 .shift = 0,
774 .mask = 0xff,
775 .def = 0xa5,
776 },
777 }, {
778 .id = 0x4d,
779 .name = "xusb_devw",
780 .swgroup = TEGRA_SWGROUP_XUSB_DEV,
781 .smmu = {
782 .reg = 0x230,
783 .bit = 13,
784 },
785 .la = {
786 .reg = 0x380,
787 .shift = 16,
788 .mask = 0xff,
789 .def = 0xa5,
790 },
791 }, {
792 .id = 0x4e,
793 .name = "fdcdwr3",
794 .swgroup = TEGRA_SWGROUP_NV,
795 .smmu = {
796 .reg = 0x230,
797 .bit = 14,
798 },
799 .la = {
800 .reg = 0x388,
801 .shift = 0,
802 .mask = 0xff,
803 .def = 0x10,
804 },
805 }, {
806 .id = 0x4f,
807 .name = "fdcdrd3",
808 .swgroup = TEGRA_SWGROUP_NV,
809 .smmu = {
810 .reg = 0x230,
811 .bit = 15,
812 },
813 .la = {
814 .reg = 0x384,
815 .shift = 0,
816 .mask = 0xff,
817 .def = 0x0c,
818 },
819 }, {
820 .id = 0x50,
821 .name = "fdcwr4",
822 .swgroup = TEGRA_SWGROUP_NV,
823 .smmu = {
824 .reg = 0x230,
825 .bit = 16,
826 },
827 .la = {
828 .reg = 0x388,
829 .shift = 16,
830 .mask = 0xff,
831 .def = 0x10,
832 },
833 }, {
834 .id = 0x51,
835 .name = "fdcrd4",
836 .swgroup = TEGRA_SWGROUP_NV,
837 .smmu = {
838 .reg = 0x230,
839 .bit = 17,
840 },
841 .la = {
842 .reg = 0x384,
843 .shift = 16,
844 .mask = 0xff,
845 .def = 0x0c,
846 },
847 }, {
848 .id = 0x52,
849 .name = "emucifr",
850 .swgroup = TEGRA_SWGROUP_EMUCIF,
851 .la = {
852 .reg = 0x38c,
853 .shift = 0,
854 .mask = 0xff,
855 .def = 0x04,
856 },
857 }, {
858 .id = 0x53,
859 .name = "emucifw",
860 .swgroup = TEGRA_SWGROUP_EMUCIF,
861 .la = {
862 .reg = 0x38c,
863 .shift = 16,
864 .mask = 0xff,
865 .def = 0x0e,
866 },
867 }, {
868 .id = 0x54,
869 .name = "tsecsrd",
870 .swgroup = TEGRA_SWGROUP_TSEC,
871 .smmu = {
872 .reg = 0x230,
873 .bit = 20,
874 },
875 .la = {
876 .reg = 0x390,
877 .shift = 0,
878 .mask = 0xff,
879 .def = 0x50,
880 },
881 }, {
882 .id = 0x55,
883 .name = "tsecswr",
884 .swgroup = TEGRA_SWGROUP_TSEC,
885 .smmu = {
886 .reg = 0x230,
887 .bit = 21,
888 },
889 .la = {
890 .reg = 0x390,
891 .shift = 16,
892 .mask = 0xff,
893 .def = 0x50,
894 },
895 },
896};
897
898static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
899 { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
900 { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
901 { .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
902 { .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
903 { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
904 { .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
905 { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
906 { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
907 { .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
908 { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
909 { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
910 { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
911 { .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
912 { .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
913 { .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
914 { .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
915};
916
917static void tegra114_flush_dcache(struct page *page, unsigned long offset,
918 size_t size)
919{
920 phys_addr_t phys = page_to_phys(page) + offset;
921 void *virt = page_address(page) + offset;
922
923 __cpuc_flush_dcache_area(virt, size);
924 outer_flush_range(phys, phys + size);
925}
926
927static const struct tegra_smmu_ops tegra114_smmu_ops = {
928 .flush_dcache = tegra114_flush_dcache,
929};
930
931static const struct tegra_smmu_soc tegra114_smmu_soc = {
932 .clients = tegra114_mc_clients,
933 .num_clients = ARRAY_SIZE(tegra114_mc_clients),
934 .swgroups = tegra114_swgroups,
935 .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
936 .supports_round_robin_arbitration = false,
937 .supports_request_limit = false,
938 .num_asids = 4,
939 .ops = &tegra114_smmu_ops,
940};
941
942const struct tegra_mc_soc tegra114_mc_soc = {
943 .clients = tegra114_mc_clients,
944 .num_clients = ARRAY_SIZE(tegra114_mc_clients),
945 .num_address_bits = 32,
946 .atom_size = 32,
947 .smmu = &tegra114_smmu_soc,
948};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
new file mode 100644
index 000000000000..278d40b854c1
--- /dev/null
+++ b/drivers/memory/tegra/tegra124.c
@@ -0,0 +1,995 @@
1/*
2 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/of.h>
10#include <linux/mm.h>
11
12#include <asm/cacheflush.h>
13
14#include <dt-bindings/memory/tegra124-mc.h>
15
16#include "mc.h"
17
18static const struct tegra_mc_client tegra124_mc_clients[] = {
19 {
20 .id = 0x00,
21 .name = "ptcr",
22 .swgroup = TEGRA_SWGROUP_PTC,
23 }, {
24 .id = 0x01,
25 .name = "display0a",
26 .swgroup = TEGRA_SWGROUP_DC,
27 .smmu = {
28 .reg = 0x228,
29 .bit = 1,
30 },
31 .la = {
32 .reg = 0x2e8,
33 .shift = 0,
34 .mask = 0xff,
35 .def = 0xc2,
36 },
37 }, {
38 .id = 0x02,
39 .name = "display0ab",
40 .swgroup = TEGRA_SWGROUP_DCB,
41 .smmu = {
42 .reg = 0x228,
43 .bit = 2,
44 },
45 .la = {
46 .reg = 0x2f4,
47 .shift = 0,
48 .mask = 0xff,
49 .def = 0xc6,
50 },
51 }, {
52 .id = 0x03,
53 .name = "display0b",
54 .swgroup = TEGRA_SWGROUP_DC,
55 .smmu = {
56 .reg = 0x228,
57 .bit = 3,
58 },
59 .la = {
60 .reg = 0x2e8,
61 .shift = 16,
62 .mask = 0xff,
63 .def = 0x50,
64 },
65 }, {
66 .id = 0x04,
67 .name = "display0bb",
68 .swgroup = TEGRA_SWGROUP_DCB,
69 .smmu = {
70 .reg = 0x228,
71 .bit = 4,
72 },
73 .la = {
74 .reg = 0x2f4,
75 .shift = 16,
76 .mask = 0xff,
77 .def = 0x50,
78 },
79 }, {
80 .id = 0x05,
81 .name = "display0c",
82 .swgroup = TEGRA_SWGROUP_DC,
83 .smmu = {
84 .reg = 0x228,
85 .bit = 5,
86 },
87 .la = {
88 .reg = 0x2ec,
89 .shift = 0,
90 .mask = 0xff,
91 .def = 0x50,
92 },
93 }, {
94 .id = 0x06,
95 .name = "display0cb",
96 .swgroup = TEGRA_SWGROUP_DCB,
97 .smmu = {
98 .reg = 0x228,
99 .bit = 6,
100 },
101 .la = {
102 .reg = 0x2f8,
103 .shift = 0,
104 .mask = 0xff,
105 .def = 0x50,
106 },
107 }, {
108 .id = 0x0e,
109 .name = "afir",
110 .swgroup = TEGRA_SWGROUP_AFI,
111 .smmu = {
112 .reg = 0x228,
113 .bit = 14,
114 },
115 .la = {
116 .reg = 0x2e0,
117 .shift = 0,
118 .mask = 0xff,
119 .def = 0x13,
120 },
121 }, {
122 .id = 0x0f,
123 .name = "avpcarm7r",
124 .swgroup = TEGRA_SWGROUP_AVPC,
125 .smmu = {
126 .reg = 0x228,
127 .bit = 15,
128 },
129 .la = {
130 .reg = 0x2e4,
131 .shift = 0,
132 .mask = 0xff,
133 .def = 0x04,
134 },
135 }, {
136 .id = 0x10,
137 .name = "displayhc",
138 .swgroup = TEGRA_SWGROUP_DC,
139 .smmu = {
140 .reg = 0x228,
141 .bit = 16,
142 },
143 .la = {
144 .reg = 0x2f0,
145 .shift = 0,
146 .mask = 0xff,
147 .def = 0x50,
148 },
149 }, {
150 .id = 0x11,
151 .name = "displayhcb",
152 .swgroup = TEGRA_SWGROUP_DCB,
153 .smmu = {
154 .reg = 0x228,
155 .bit = 17,
156 },
157 .la = {
158 .reg = 0x2fc,
159 .shift = 0,
160 .mask = 0xff,
161 .def = 0x50,
162 },
163 }, {
164 .id = 0x15,
165 .name = "hdar",
166 .swgroup = TEGRA_SWGROUP_HDA,
167 .smmu = {
168 .reg = 0x228,
169 .bit = 21,
170 },
171 .la = {
172 .reg = 0x318,
173 .shift = 0,
174 .mask = 0xff,
175 .def = 0x24,
176 },
177 }, {
178 .id = 0x16,
179 .name = "host1xdmar",
180 .swgroup = TEGRA_SWGROUP_HC,
181 .smmu = {
182 .reg = 0x228,
183 .bit = 22,
184 },
185 .la = {
186 .reg = 0x310,
187 .shift = 0,
188 .mask = 0xff,
189 .def = 0x1e,
190 },
191 }, {
192 .id = 0x17,
193 .name = "host1xr",
194 .swgroup = TEGRA_SWGROUP_HC,
195 .smmu = {
196 .reg = 0x228,
197 .bit = 23,
198 },
199 .la = {
200 .reg = 0x310,
201 .shift = 16,
202 .mask = 0xff,
203 .def = 0x50,
204 },
205 }, {
206 .id = 0x1c,
207 .name = "msencsrd",
208 .swgroup = TEGRA_SWGROUP_MSENC,
209 .smmu = {
210 .reg = 0x228,
211 .bit = 28,
212 },
213 .la = {
214 .reg = 0x328,
215 .shift = 0,
216 .mask = 0xff,
217 .def = 0x23,
218 },
219 }, {
220 .id = 0x1d,
221 .name = "ppcsahbdmar",
222 .swgroup = TEGRA_SWGROUP_PPCS,
223 .smmu = {
224 .reg = 0x228,
225 .bit = 29,
226 },
227 .la = {
228 .reg = 0x344,
229 .shift = 0,
230 .mask = 0xff,
231 .def = 0x49,
232 },
233 }, {
234 .id = 0x1e,
235 .name = "ppcsahbslvr",
236 .swgroup = TEGRA_SWGROUP_PPCS,
237 .smmu = {
238 .reg = 0x228,
239 .bit = 30,
240 },
241 .la = {
242 .reg = 0x344,
243 .shift = 16,
244 .mask = 0xff,
245 .def = 0x1a,
246 },
247 }, {
248 .id = 0x1f,
249 .name = "satar",
250 .swgroup = TEGRA_SWGROUP_SATA,
251 .smmu = {
252 .reg = 0x228,
253 .bit = 31,
254 },
255 .la = {
256 .reg = 0x350,
257 .shift = 0,
258 .mask = 0xff,
259 .def = 0x65,
260 },
261 }, {
262 .id = 0x22,
263 .name = "vdebsevr",
264 .swgroup = TEGRA_SWGROUP_VDE,
265 .smmu = {
266 .reg = 0x22c,
267 .bit = 2,
268 },
269 .la = {
270 .reg = 0x354,
271 .shift = 0,
272 .mask = 0xff,
273 .def = 0x4f,
274 },
275 }, {
276 .id = 0x23,
277 .name = "vdember",
278 .swgroup = TEGRA_SWGROUP_VDE,
279 .smmu = {
280 .reg = 0x22c,
281 .bit = 3,
282 },
283 .la = {
284 .reg = 0x354,
285 .shift = 16,
286 .mask = 0xff,
287 .def = 0x3d,
288 },
289 }, {
290 .id = 0x24,
291 .name = "vdemcer",
292 .swgroup = TEGRA_SWGROUP_VDE,
293 .smmu = {
294 .reg = 0x22c,
295 .bit = 4,
296 },
297 .la = {
298 .reg = 0x358,
299 .shift = 0,
300 .mask = 0xff,
301 .def = 0x66,
302 },
303 }, {
304 .id = 0x25,
305 .name = "vdetper",
306 .swgroup = TEGRA_SWGROUP_VDE,
307 .smmu = {
308 .reg = 0x22c,
309 .bit = 5,
310 },
311 .la = {
312 .reg = 0x358,
313 .shift = 16,
314 .mask = 0xff,
315 .def = 0xa5,
316 },
317 }, {
318 .id = 0x26,
319 .name = "mpcorelpr",
320 .swgroup = TEGRA_SWGROUP_MPCORELP,
321 .la = {
322 .reg = 0x324,
323 .shift = 0,
324 .mask = 0xff,
325 .def = 0x04,
326 },
327 }, {
328 .id = 0x27,
329 .name = "mpcorer",
330 .swgroup = TEGRA_SWGROUP_MPCORE,
331 .la = {
332 .reg = 0x320,
333 .shift = 0,
334 .mask = 0xff,
335 .def = 0x04,
336 },
337 }, {
338 .id = 0x2b,
339 .name = "msencswr",
340 .swgroup = TEGRA_SWGROUP_MSENC,
341 .smmu = {
342 .reg = 0x22c,
343 .bit = 11,
344 },
345 .la = {
346 .reg = 0x328,
347 .shift = 16,
348 .mask = 0xff,
349 .def = 0x80,
350 },
351 }, {
352 .id = 0x31,
353 .name = "afiw",
354 .swgroup = TEGRA_SWGROUP_AFI,
355 .smmu = {
356 .reg = 0x22c,
357 .bit = 17,
358 },
359 .la = {
360 .reg = 0x2e0,
361 .shift = 16,
362 .mask = 0xff,
363 .def = 0x80,
364 },
365 }, {
366 .id = 0x32,
367 .name = "avpcarm7w",
368 .swgroup = TEGRA_SWGROUP_AVPC,
369 .smmu = {
370 .reg = 0x22c,
371 .bit = 18,
372 },
373 .la = {
374 .reg = 0x2e4,
375 .shift = 16,
376 .mask = 0xff,
377 .def = 0x80,
378 },
379 }, {
380 .id = 0x35,
381 .name = "hdaw",
382 .swgroup = TEGRA_SWGROUP_HDA,
383 .smmu = {
384 .reg = 0x22c,
385 .bit = 21,
386 },
387 .la = {
388 .reg = 0x318,
389 .shift = 16,
390 .mask = 0xff,
391 .def = 0x80,
392 },
393 }, {
394 .id = 0x36,
395 .name = "host1xw",
396 .swgroup = TEGRA_SWGROUP_HC,
397 .smmu = {
398 .reg = 0x22c,
399 .bit = 22,
400 },
401 .la = {
402 .reg = 0x314,
403 .shift = 0,
404 .mask = 0xff,
405 .def = 0x80,
406 },
407 }, {
408 .id = 0x38,
409 .name = "mpcorelpw",
410 .swgroup = TEGRA_SWGROUP_MPCORELP,
411 .la = {
412 .reg = 0x324,
413 .shift = 16,
414 .mask = 0xff,
415 .def = 0x80,
416 },
417 }, {
418 .id = 0x39,
419 .name = "mpcorew",
420 .swgroup = TEGRA_SWGROUP_MPCORE,
421 .la = {
422 .reg = 0x320,
423 .shift = 16,
424 .mask = 0xff,
425 .def = 0x80,
426 },
427 }, {
428 .id = 0x3b,
429 .name = "ppcsahbdmaw",
430 .swgroup = TEGRA_SWGROUP_PPCS,
431 .smmu = {
432 .reg = 0x22c,
433 .bit = 27,
434 },
435 .la = {
436 .reg = 0x348,
437 .shift = 0,
438 .mask = 0xff,
439 .def = 0x80,
440 },
441 }, {
442 .id = 0x3c,
443 .name = "ppcsahbslvw",
444 .swgroup = TEGRA_SWGROUP_PPCS,
445 .smmu = {
446 .reg = 0x22c,
447 .bit = 28,
448 },
449 .la = {
450 .reg = 0x348,
451 .shift = 16,
452 .mask = 0xff,
453 .def = 0x80,
454 },
455 }, {
456 .id = 0x3d,
457 .name = "sataw",
458 .swgroup = TEGRA_SWGROUP_SATA,
459 .smmu = {
460 .reg = 0x22c,
461 .bit = 29,
462 },
463 .la = {
464 .reg = 0x350,
465 .shift = 16,
466 .mask = 0xff,
467 .def = 0x65,
468 },
469 }, {
470 .id = 0x3e,
471 .name = "vdebsevw",
472 .swgroup = TEGRA_SWGROUP_VDE,
473 .smmu = {
474 .reg = 0x22c,
475 .bit = 30,
476 },
477 .la = {
478 .reg = 0x35c,
479 .shift = 0,
480 .mask = 0xff,
481 .def = 0x80,
482 },
483 }, {
484 .id = 0x3f,
485 .name = "vdedbgw",
486 .swgroup = TEGRA_SWGROUP_VDE,
487 .smmu = {
488 .reg = 0x22c,
489 .bit = 31,
490 },
491 .la = {
492 .reg = 0x35c,
493 .shift = 16,
494 .mask = 0xff,
495 .def = 0x80,
496 },
497 }, {
498 .id = 0x40,
499 .name = "vdembew",
500 .swgroup = TEGRA_SWGROUP_VDE,
501 .smmu = {
502 .reg = 0x230,
503 .bit = 0,
504 },
505 .la = {
506 .reg = 0x360,
507 .shift = 0,
508 .mask = 0xff,
509 .def = 0x80,
510 },
511 }, {
512 .id = 0x41,
513 .name = "vdetpmw",
514 .swgroup = TEGRA_SWGROUP_VDE,
515 .smmu = {
516 .reg = 0x230,
517 .bit = 1,
518 },
519 .la = {
520 .reg = 0x360,
521 .shift = 16,
522 .mask = 0xff,
523 .def = 0x80,
524 },
525 }, {
526 .id = 0x44,
527 .name = "ispra",
528 .swgroup = TEGRA_SWGROUP_ISP2,
529 .smmu = {
530 .reg = 0x230,
531 .bit = 4,
532 },
533 .la = {
534 .reg = 0x370,
535 .shift = 0,
536 .mask = 0xff,
537 .def = 0x18,
538 },
539 }, {
540 .id = 0x46,
541 .name = "ispwa",
542 .swgroup = TEGRA_SWGROUP_ISP2,
543 .smmu = {
544 .reg = 0x230,
545 .bit = 6,
546 },
547 .la = {
548 .reg = 0x374,
549 .shift = 0,
550 .mask = 0xff,
551 .def = 0x80,
552 },
553 }, {
554 .id = 0x47,
555 .name = "ispwb",
556 .swgroup = TEGRA_SWGROUP_ISP2,
557 .smmu = {
558 .reg = 0x230,
559 .bit = 7,
560 },
561 .la = {
562 .reg = 0x374,
563 .shift = 16,
564 .mask = 0xff,
565 .def = 0x80,
566 },
567 }, {
568 .id = 0x4a,
569 .name = "xusb_hostr",
570 .swgroup = TEGRA_SWGROUP_XUSB_HOST,
571 .smmu = {
572 .reg = 0x230,
573 .bit = 10,
574 },
575 .la = {
576 .reg = 0x37c,
577 .shift = 0,
578 .mask = 0xff,
579 .def = 0x39,
580 },
581 }, {
582 .id = 0x4b,
583 .name = "xusb_hostw",
584 .swgroup = TEGRA_SWGROUP_XUSB_HOST,
585 .smmu = {
586 .reg = 0x230,
587 .bit = 11,
588 },
589 .la = {
590 .reg = 0x37c,
591 .shift = 16,
592 .mask = 0xff,
593 .def = 0x80,
594 },
595 }, {
596 .id = 0x4c,
597 .name = "xusb_devr",
598 .swgroup = TEGRA_SWGROUP_XUSB_DEV,
599 .smmu = {
600 .reg = 0x230,
601 .bit = 12,
602 },
603 .la = {
604 .reg = 0x380,
605 .shift = 0,
606 .mask = 0xff,
607 .def = 0x39,
608 },
609 }, {
610 .id = 0x4d,
611 .name = "xusb_devw",
612 .swgroup = TEGRA_SWGROUP_XUSB_DEV,
613 .smmu = {
614 .reg = 0x230,
615 .bit = 13,
616 },
617 .la = {
618 .reg = 0x380,
619 .shift = 16,
620 .mask = 0xff,
621 .def = 0x80,
622 },
623 }, {
624 .id = 0x4e,
625 .name = "isprab",
626 .swgroup = TEGRA_SWGROUP_ISP2B,
627 .smmu = {
628 .reg = 0x230,
629 .bit = 14,
630 },
631 .la = {
632 .reg = 0x384,
633 .shift = 0,
634 .mask = 0xff,
635 .def = 0x18,
636 },
637 }, {
638 .id = 0x50,
639 .name = "ispwab",
640 .swgroup = TEGRA_SWGROUP_ISP2B,
641 .smmu = {
642 .reg = 0x230,
643 .bit = 16,
644 },
645 .la = {
646 .reg = 0x388,
647 .shift = 0,
648 .mask = 0xff,
649 .def = 0x80,
650 },
651 }, {
652 .id = 0x51,
653 .name = "ispwbb",
654 .swgroup = TEGRA_SWGROUP_ISP2B,
655 .smmu = {
656 .reg = 0x230,
657 .bit = 17,
658 },
659 .la = {
660 .reg = 0x388,
661 .shift = 16,
662 .mask = 0xff,
663 .def = 0x80,
664 },
665 }, {
666 .id = 0x54,
667 .name = "tsecsrd",
668 .swgroup = TEGRA_SWGROUP_TSEC,
669 .smmu = {
670 .reg = 0x230,
671 .bit = 20,
672 },
673 .la = {
674 .reg = 0x390,
675 .shift = 0,
676 .mask = 0xff,
677 .def = 0x9b,
678 },
679 }, {
680 .id = 0x55,
681 .name = "tsecswr",
682 .swgroup = TEGRA_SWGROUP_TSEC,
683 .smmu = {
684 .reg = 0x230,
685 .bit = 21,
686 },
687 .la = {
688 .reg = 0x390,
689 .shift = 16,
690 .mask = 0xff,
691 .def = 0x80,
692 },
693 }, {
694 .id = 0x56,
695 .name = "a9avpscr",
696 .swgroup = TEGRA_SWGROUP_A9AVP,
697 .smmu = {
698 .reg = 0x230,
699 .bit = 22,
700 },
701 .la = {
702 .reg = 0x3a4,
703 .shift = 0,
704 .mask = 0xff,
705 .def = 0x04,
706 },
707 }, {
708 .id = 0x57,
709 .name = "a9avpscw",
710 .swgroup = TEGRA_SWGROUP_A9AVP,
711 .smmu = {
712 .reg = 0x230,
713 .bit = 23,
714 },
715 .la = {
716 .reg = 0x3a4,
717 .shift = 16,
718 .mask = 0xff,
719 .def = 0x80,
720 },
721 }, {
722 .id = 0x58,
723 .name = "gpusrd",
724 .swgroup = TEGRA_SWGROUP_GPU,
725 .smmu = {
726 /* read-only */
727 .reg = 0x230,
728 .bit = 24,
729 },
730 .la = {
731 .reg = 0x3c8,
732 .shift = 0,
733 .mask = 0xff,
734 .def = 0x1a,
735 },
736 }, {
737 .id = 0x59,
738 .name = "gpuswr",
739 .swgroup = TEGRA_SWGROUP_GPU,
740 .smmu = {
741 /* read-only */
742 .reg = 0x230,
743 .bit = 25,
744 },
745 .la = {
746 .reg = 0x3c8,
747 .shift = 16,
748 .mask = 0xff,
749 .def = 0x80,
750 },
751 }, {
752 .id = 0x5a,
753 .name = "displayt",
754 .swgroup = TEGRA_SWGROUP_DC,
755 .smmu = {
756 .reg = 0x230,
757 .bit = 26,
758 },
759 .la = {
760 .reg = 0x2f0,
761 .shift = 16,
762 .mask = 0xff,
763 .def = 0x50,
764 },
765 }, {
766 .id = 0x60,
767 .name = "sdmmcra",
768 .swgroup = TEGRA_SWGROUP_SDMMC1A,
769 .smmu = {
770 .reg = 0x234,
771 .bit = 0,
772 },
773 .la = {
774 .reg = 0x3b8,
775 .shift = 0,
776 .mask = 0xff,
777 .def = 0x49,
778 },
779 }, {
780 .id = 0x61,
781 .name = "sdmmcraa",
782 .swgroup = TEGRA_SWGROUP_SDMMC2A,
783 .smmu = {
784 .reg = 0x234,
785 .bit = 1,
786 },
787 .la = {
788 .reg = 0x3bc,
789 .shift = 0,
790 .mask = 0xff,
791 .def = 0x49,
792 },
793 }, {
794 .id = 0x62,
795 .name = "sdmmcr",
796 .swgroup = TEGRA_SWGROUP_SDMMC3A,
797 .smmu = {
798 .reg = 0x234,
799 .bit = 2,
800 },
801 .la = {
802 .reg = 0x3c0,
803 .shift = 0,
804 .mask = 0xff,
805 .def = 0x49,
806 },
807 }, {
808 .id = 0x63,
809 .swgroup = TEGRA_SWGROUP_SDMMC4A,
810 .name = "sdmmcrab",
811 .smmu = {
812 .reg = 0x234,
813 .bit = 3,
814 },
815 .la = {
816 .reg = 0x3c4,
817 .shift = 0,
818 .mask = 0xff,
819 .def = 0x49,
820 },
821 }, {
822 .id = 0x64,
823 .name = "sdmmcwa",
824 .swgroup = TEGRA_SWGROUP_SDMMC1A,
825 .smmu = {
826 .reg = 0x234,
827 .bit = 4,
828 },
829 .la = {
830 .reg = 0x3b8,
831 .shift = 16,
832 .mask = 0xff,
833 .def = 0x80,
834 },
835 }, {
836 .id = 0x65,
837 .name = "sdmmcwaa",
838 .swgroup = TEGRA_SWGROUP_SDMMC2A,
839 .smmu = {
840 .reg = 0x234,
841 .bit = 5,
842 },
843 .la = {
844 .reg = 0x3bc,
845 .shift = 16,
846 .mask = 0xff,
847 .def = 0x80,
848 },
849 }, {
850 .id = 0x66,
851 .name = "sdmmcw",
852 .swgroup = TEGRA_SWGROUP_SDMMC3A,
853 .smmu = {
854 .reg = 0x234,
855 .bit = 6,
856 },
857 .la = {
858 .reg = 0x3c0,
859 .shift = 16,
860 .mask = 0xff,
861 .def = 0x80,
862 },
863 }, {
864 .id = 0x67,
865 .name = "sdmmcwab",
866 .swgroup = TEGRA_SWGROUP_SDMMC4A,
867 .smmu = {
868 .reg = 0x234,
869 .bit = 7,
870 },
871 .la = {
872 .reg = 0x3c4,
873 .shift = 16,
874 .mask = 0xff,
875 .def = 0x80,
876 },
877 }, {
878 .id = 0x6c,
879 .name = "vicsrd",
880 .swgroup = TEGRA_SWGROUP_VIC,
881 .smmu = {
882 .reg = 0x234,
883 .bit = 12,
884 },
885 .la = {
886 .reg = 0x394,
887 .shift = 0,
888 .mask = 0xff,
889 .def = 0x1a,
890 },
891 }, {
892 .id = 0x6d,
893 .name = "vicswr",
894 .swgroup = TEGRA_SWGROUP_VIC,
895 .smmu = {
896 .reg = 0x234,
897 .bit = 13,
898 },
899 .la = {
900 .reg = 0x394,
901 .shift = 16,
902 .mask = 0xff,
903 .def = 0x80,
904 },
905 }, {
906 .id = 0x72,
907 .name = "viw",
908 .swgroup = TEGRA_SWGROUP_VI,
909 .smmu = {
910 .reg = 0x234,
911 .bit = 18,
912 },
913 .la = {
914 .reg = 0x398,
915 .shift = 0,
916 .mask = 0xff,
917 .def = 0x80,
918 },
919 }, {
920 .id = 0x73,
921 .name = "displayd",
922 .swgroup = TEGRA_SWGROUP_DC,
923 .smmu = {
924 .reg = 0x234,
925 .bit = 19,
926 },
927 .la = {
928 .reg = 0x3c8,
929 .shift = 0,
930 .mask = 0xff,
931 .def = 0x50,
932 },
933 },
934};
935
936static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
937 { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
938 { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
939 { .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
940 { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
941 { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
942 { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
943 { .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
944 { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
945 { .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
946 { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
947 { .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
948 { .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
949 { .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
950 { .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
951 { .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
952 { .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
953 { .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
954 { .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
955 { .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
956 { .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
957 { .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
958 { .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
959 { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
960};
961
962#ifdef CONFIG_ARCH_TEGRA_124_SOC
963static void tegra124_flush_dcache(struct page *page, unsigned long offset,
964 size_t size)
965{
966 phys_addr_t phys = page_to_phys(page) + offset;
967 void *virt = page_address(page) + offset;
968
969 __cpuc_flush_dcache_area(virt, size);
970 outer_flush_range(phys, phys + size);
971}
972
973static const struct tegra_smmu_ops tegra124_smmu_ops = {
974 .flush_dcache = tegra124_flush_dcache,
975};
976
977static const struct tegra_smmu_soc tegra124_smmu_soc = {
978 .clients = tegra124_mc_clients,
979 .num_clients = ARRAY_SIZE(tegra124_mc_clients),
980 .swgroups = tegra124_swgroups,
981 .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
982 .supports_round_robin_arbitration = true,
983 .supports_request_limit = true,
984 .num_asids = 128,
985 .ops = &tegra124_smmu_ops,
986};
987
988const struct tegra_mc_soc tegra124_mc_soc = {
989 .clients = tegra124_mc_clients,
990 .num_clients = ARRAY_SIZE(tegra124_mc_clients),
991 .num_address_bits = 34,
992 .atom_size = 32,
993 .smmu = &tegra124_smmu_soc,
994};
995#endif /* CONFIG_ARCH_TEGRA_124_SOC */
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
new file mode 100644
index 000000000000..71fe9376fe53
--- /dev/null
+++ b/drivers/memory/tegra/tegra30.c
@@ -0,0 +1,970 @@
1/*
2 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/of.h>
10#include <linux/mm.h>
11
12#include <asm/cacheflush.h>
13
14#include <dt-bindings/memory/tegra30-mc.h>
15
16#include "mc.h"
17
18static const struct tegra_mc_client tegra30_mc_clients[] = {
19 {
20 .id = 0x00,
21 .name = "ptcr",
22 .swgroup = TEGRA_SWGROUP_PTC,
23 }, {
24 .id = 0x01,
25 .name = "display0a",
26 .swgroup = TEGRA_SWGROUP_DC,
27 .smmu = {
28 .reg = 0x228,
29 .bit = 1,
30 },
31 .la = {
32 .reg = 0x2e8,
33 .shift = 0,
34 .mask = 0xff,
35 .def = 0x4e,
36 },
37 }, {
38 .id = 0x02,
39 .name = "display0ab",
40 .swgroup = TEGRA_SWGROUP_DCB,
41 .smmu = {
42 .reg = 0x228,
43 .bit = 2,
44 },
45 .la = {
46 .reg = 0x2f4,
47 .shift = 0,
48 .mask = 0xff,
49 .def = 0x4e,
50 },
51 }, {
52 .id = 0x03,
53 .name = "display0b",
54 .swgroup = TEGRA_SWGROUP_DC,
55 .smmu = {
56 .reg = 0x228,
57 .bit = 3,
58 },
59 .la = {
60 .reg = 0x2e8,
61 .shift = 16,
62 .mask = 0xff,
63 .def = 0x4e,
64 },
65 }, {
66 .id = 0x04,
67 .name = "display0bb",
68 .swgroup = TEGRA_SWGROUP_DCB,
69 .smmu = {
70 .reg = 0x228,
71 .bit = 4,
72 },
73 .la = {
74 .reg = 0x2f4,
75 .shift = 16,
76 .mask = 0xff,
77 .def = 0x4e,
78 },
79 }, {
80 .id = 0x05,
81 .name = "display0c",
82 .swgroup = TEGRA_SWGROUP_DC,
83 .smmu = {
84 .reg = 0x228,
85 .bit = 5,
86 },
87 .la = {
88 .reg = 0x2ec,
89 .shift = 0,
90 .mask = 0xff,
91 .def = 0x4e,
92 },
93 }, {
94 .id = 0x06,
95 .name = "display0cb",
96 .swgroup = TEGRA_SWGROUP_DCB,
97 .smmu = {
98 .reg = 0x228,
99 .bit = 6,
100 },
101 .la = {
102 .reg = 0x2f8,
103 .shift = 0,
104 .mask = 0xff,
105 .def = 0x4e,
106 },
107 }, {
108 .id = 0x07,
109 .name = "display1b",
110 .swgroup = TEGRA_SWGROUP_DC,
111 .smmu = {
112 .reg = 0x228,
113 .bit = 7,
114 },
115 .la = {
116 .reg = 0x2ec,
117 .shift = 16,
118 .mask = 0xff,
119 .def = 0x4e,
120 },
121 }, {
122 .id = 0x08,
123 .name = "display1bb",
124 .swgroup = TEGRA_SWGROUP_DCB,
125 .smmu = {
126 .reg = 0x228,
127 .bit = 8,
128 },
129 .la = {
130 .reg = 0x2f8,
131 .shift = 16,
132 .mask = 0xff,
133 .def = 0x4e,
134 },
135 }, {
136 .id = 0x09,
137 .name = "eppup",
138 .swgroup = TEGRA_SWGROUP_EPP,
139 .smmu = {
140 .reg = 0x228,
141 .bit = 9,
142 },
143 .la = {
144 .reg = 0x300,
145 .shift = 0,
146 .mask = 0xff,
147 .def = 0x17,
148 },
149 }, {
150 .id = 0x0a,
151 .name = "g2pr",
152 .swgroup = TEGRA_SWGROUP_G2,
153 .smmu = {
154 .reg = 0x228,
155 .bit = 10,
156 },
157 .la = {
158 .reg = 0x308,
159 .shift = 0,
160 .mask = 0xff,
161 .def = 0x09,
162 },
163 }, {
164 .id = 0x0b,
165 .name = "g2sr",
166 .swgroup = TEGRA_SWGROUP_G2,
167 .smmu = {
168 .reg = 0x228,
169 .bit = 11,
170 },
171 .la = {
172 .reg = 0x308,
173 .shift = 16,
174 .mask = 0xff,
175 .def = 0x09,
176 },
177 }, {
178 .id = 0x0c,
179 .name = "mpeunifbr",
180 .swgroup = TEGRA_SWGROUP_MPE,
181 .smmu = {
182 .reg = 0x228,
183 .bit = 12,
184 },
185 .la = {
186 .reg = 0x328,
187 .shift = 0,
188 .mask = 0xff,
189 .def = 0x50,
190 },
191 }, {
192 .id = 0x0d,
193 .name = "viruv",
194 .swgroup = TEGRA_SWGROUP_VI,
195 .smmu = {
196 .reg = 0x228,
197 .bit = 13,
198 },
199 .la = {
200 .reg = 0x364,
201 .shift = 0,
202 .mask = 0xff,
203 .def = 0x2c,
204 },
205 }, {
206 .id = 0x0e,
207 .name = "afir",
208 .swgroup = TEGRA_SWGROUP_AFI,
209 .smmu = {
210 .reg = 0x228,
211 .bit = 14,
212 },
213 .la = {
214 .reg = 0x2e0,
215 .shift = 0,
216 .mask = 0xff,
217 .def = 0x10,
218 },
219 }, {
220 .id = 0x0f,
221 .name = "avpcarm7r",
222 .swgroup = TEGRA_SWGROUP_AVPC,
223 .smmu = {
224 .reg = 0x228,
225 .bit = 15,
226 },
227 .la = {
228 .reg = 0x2e4,
229 .shift = 0,
230 .mask = 0xff,
231 .def = 0x04,
232 },
233 }, {
234 .id = 0x10,
235 .name = "displayhc",
236 .swgroup = TEGRA_SWGROUP_DC,
237 .smmu = {
238 .reg = 0x228,
239 .bit = 16,
240 },
241 .la = {
242 .reg = 0x2f0,
243 .shift = 0,
244 .mask = 0xff,
245 .def = 0xff,
246 },
247 }, {
248 .id = 0x11,
249 .name = "displayhcb",
250 .swgroup = TEGRA_SWGROUP_DCB,
251 .smmu = {
252 .reg = 0x228,
253 .bit = 17,
254 },
255 .la = {
256 .reg = 0x2fc,
257 .shift = 0,
258 .mask = 0xff,
259 .def = 0xff,
260 },
261 }, {
262 .id = 0x12,
263 .name = "fdcdrd",
264 .swgroup = TEGRA_SWGROUP_NV,
265 .smmu = {
266 .reg = 0x228,
267 .bit = 18,
268 },
269 .la = {
270 .reg = 0x334,
271 .shift = 0,
272 .mask = 0xff,
273 .def = 0x0a,
274 },
275 }, {
276 .id = 0x13,
277 .name = "fdcdrd2",
278 .swgroup = TEGRA_SWGROUP_NV2,
279 .smmu = {
280 .reg = 0x228,
281 .bit = 19,
282 },
283 .la = {
284 .reg = 0x33c,
285 .shift = 0,
286 .mask = 0xff,
287 .def = 0x0a,
288 },
289 }, {
290 .id = 0x14,
291 .name = "g2dr",
292 .swgroup = TEGRA_SWGROUP_G2,
293 .smmu = {
294 .reg = 0x228,
295 .bit = 20,
296 },
297 .la = {
298 .reg = 0x30c,
299 .shift = 0,
300 .mask = 0xff,
301 .def = 0x0a,
302 },
303 }, {
304 .id = 0x15,
305 .name = "hdar",
306 .swgroup = TEGRA_SWGROUP_HDA,
307 .smmu = {
308 .reg = 0x228,
309 .bit = 21,
310 },
311 .la = {
312 .reg = 0x318,
313 .shift = 0,
314 .mask = 0xff,
315 .def = 0xff,
316 },
317 }, {
318 .id = 0x16,
319 .name = "host1xdmar",
320 .swgroup = TEGRA_SWGROUP_HC,
321 .smmu = {
322 .reg = 0x228,
323 .bit = 22,
324 },
325 .la = {
326 .reg = 0x310,
327 .shift = 0,
328 .mask = 0xff,
329 .def = 0x05,
330 },
331 }, {
332 .id = 0x17,
333 .name = "host1xr",
334 .swgroup = TEGRA_SWGROUP_HC,
335 .smmu = {
336 .reg = 0x228,
337 .bit = 23,
338 },
339 .la = {
340 .reg = 0x310,
341 .shift = 16,
342 .mask = 0xff,
343 .def = 0x50,
344 },
345 }, {
346 .id = 0x18,
347 .name = "idxsrd",
348 .swgroup = TEGRA_SWGROUP_NV,
349 .smmu = {
350 .reg = 0x228,
351 .bit = 24,
352 },
353 .la = {
354 .reg = 0x334,
355 .shift = 16,
356 .mask = 0xff,
357 .def = 0x13,
358 },
359 }, {
360 .id = 0x19,
361 .name = "idxsrd2",
362 .swgroup = TEGRA_SWGROUP_NV2,
363 .smmu = {
364 .reg = 0x228,
365 .bit = 25,
366 },
367 .la = {
368 .reg = 0x33c,
369 .shift = 16,
370 .mask = 0xff,
371 .def = 0x13,
372 },
373 }, {
374 .id = 0x1a,
375 .name = "mpe_ipred",
376 .swgroup = TEGRA_SWGROUP_MPE,
377 .smmu = {
378 .reg = 0x228,
379 .bit = 26,
380 },
381 .la = {
382 .reg = 0x328,
383 .shift = 16,
384 .mask = 0xff,
385 .def = 0x80,
386 },
387 }, {
388 .id = 0x1b,
389 .name = "mpeamemrd",
390 .swgroup = TEGRA_SWGROUP_MPE,
391 .smmu = {
392 .reg = 0x228,
393 .bit = 27,
394 },
395 .la = {
396 .reg = 0x32c,
397 .shift = 0,
398 .mask = 0xff,
399 .def = 0x42,
400 },
401 }, {
402 .id = 0x1c,
403 .name = "mpecsrd",
404 .swgroup = TEGRA_SWGROUP_MPE,
405 .smmu = {
406 .reg = 0x228,
407 .bit = 28,
408 },
409 .la = {
410 .reg = 0x32c,
411 .shift = 16,
412 .mask = 0xff,
413 .def = 0xff,
414 },
415 }, {
416 .id = 0x1d,
417 .name = "ppcsahbdmar",
418 .swgroup = TEGRA_SWGROUP_PPCS,
419 .smmu = {
420 .reg = 0x228,
421 .bit = 29,
422 },
423 .la = {
424 .reg = 0x344,
425 .shift = 0,
426 .mask = 0xff,
427 .def = 0x10,
428 },
429 }, {
430 .id = 0x1e,
431 .name = "ppcsahbslvr",
432 .swgroup = TEGRA_SWGROUP_PPCS,
433 .smmu = {
434 .reg = 0x228,
435 .bit = 30,
436 },
437 .la = {
438 .reg = 0x344,
439 .shift = 16,
440 .mask = 0xff,
441 .def = 0x12,
442 },
443 }, {
444 .id = 0x1f,
445 .name = "satar",
446 .swgroup = TEGRA_SWGROUP_SATA,
447 .smmu = {
448 .reg = 0x228,
449 .bit = 31,
450 },
451 .la = {
452 .reg = 0x350,
453 .shift = 0,
454 .mask = 0xff,
455 .def = 0x33,
456 },
457 }, {
458 .id = 0x20,
459 .name = "texsrd",
460 .swgroup = TEGRA_SWGROUP_NV,
461 .smmu = {
462 .reg = 0x22c,
463 .bit = 0,
464 },
465 .la = {
466 .reg = 0x338,
467 .shift = 0,
468 .mask = 0xff,
469 .def = 0x13,
470 },
471 }, {
472 .id = 0x21,
473 .name = "texsrd2",
474 .swgroup = TEGRA_SWGROUP_NV2,
475 .smmu = {
476 .reg = 0x22c,
477 .bit = 1,
478 },
479 .la = {
480 .reg = 0x340,
481 .shift = 0,
482 .mask = 0xff,
483 .def = 0x13,
484 },
485 }, {
486 .id = 0x22,
487 .name = "vdebsevr",
488 .swgroup = TEGRA_SWGROUP_VDE,
489 .smmu = {
490 .reg = 0x22c,
491 .bit = 2,
492 },
493 .la = {
494 .reg = 0x354,
495 .shift = 0,
496 .mask = 0xff,
497 .def = 0xff,
498 },
499 }, {
500 .id = 0x23,
501 .name = "vdember",
502 .swgroup = TEGRA_SWGROUP_VDE,
503 .smmu = {
504 .reg = 0x22c,
505 .bit = 3,
506 },
507 .la = {
508 .reg = 0x354,
509 .shift = 16,
510 .mask = 0xff,
511 .def = 0xd0,
512 },
513 }, {
514 .id = 0x24,
515 .name = "vdemcer",
516 .swgroup = TEGRA_SWGROUP_VDE,
517 .smmu = {
518 .reg = 0x22c,
519 .bit = 4,
520 },
521 .la = {
522 .reg = 0x358,
523 .shift = 0,
524 .mask = 0xff,
525 .def = 0x2a,
526 },
527 }, {
528 .id = 0x25,
529 .name = "vdetper",
530 .swgroup = TEGRA_SWGROUP_VDE,
531 .smmu = {
532 .reg = 0x22c,
533 .bit = 5,
534 },
535 .la = {
536 .reg = 0x358,
537 .shift = 16,
538 .mask = 0xff,
539 .def = 0x74,
540 },
541 }, {
542 .id = 0x26,
543 .name = "mpcorelpr",
544 .swgroup = TEGRA_SWGROUP_MPCORELP,
545 .la = {
546 .reg = 0x324,
547 .shift = 0,
548 .mask = 0xff,
549 .def = 0x04,
550 },
551 }, {
552 .id = 0x27,
553 .name = "mpcorer",
554 .swgroup = TEGRA_SWGROUP_MPCORE,
555 .la = {
556 .reg = 0x320,
557 .shift = 0,
558 .mask = 0xff,
559 .def = 0x04,
560 },
561 }, {
562 .id = 0x28,
563 .name = "eppu",
564 .swgroup = TEGRA_SWGROUP_EPP,
565 .smmu = {
566 .reg = 0x22c,
567 .bit = 8,
568 },
569 .la = {
570 .reg = 0x300,
571 .shift = 16,
572 .mask = 0xff,
573 .def = 0x6c,
574 },
575 }, {
576 .id = 0x29,
577 .name = "eppv",
578 .swgroup = TEGRA_SWGROUP_EPP,
579 .smmu = {
580 .reg = 0x22c,
581 .bit = 9,
582 },
583 .la = {
584 .reg = 0x304,
585 .shift = 0,
586 .mask = 0xff,
587 .def = 0x6c,
588 },
589 }, {
590 .id = 0x2a,
591 .name = "eppy",
592 .swgroup = TEGRA_SWGROUP_EPP,
593 .smmu = {
594 .reg = 0x22c,
595 .bit = 10,
596 },
597 .la = {
598 .reg = 0x304,
599 .shift = 16,
600 .mask = 0xff,
601 .def = 0x6c,
602 },
603 }, {
604 .id = 0x2b,
605 .name = "mpeunifbw",
606 .swgroup = TEGRA_SWGROUP_MPE,
607 .smmu = {
608 .reg = 0x22c,
609 .bit = 11,
610 },
611 .la = {
612 .reg = 0x330,
613 .shift = 0,
614 .mask = 0xff,
615 .def = 0x13,
616 },
617 }, {
618 .id = 0x2c,
619 .name = "viwsb",
620 .swgroup = TEGRA_SWGROUP_VI,
621 .smmu = {
622 .reg = 0x22c,
623 .bit = 12,
624 },
625 .la = {
626 .reg = 0x364,
627 .shift = 16,
628 .mask = 0xff,
629 .def = 0x12,
630 },
631 }, {
632 .id = 0x2d,
633 .name = "viwu",
634 .swgroup = TEGRA_SWGROUP_VI,
635 .smmu = {
636 .reg = 0x22c,
637 .bit = 13,
638 },
639 .la = {
640 .reg = 0x368,
641 .shift = 0,
642 .mask = 0xff,
643 .def = 0xb2,
644 },
645 }, {
646 .id = 0x2e,
647 .name = "viwv",
648 .swgroup = TEGRA_SWGROUP_VI,
649 .smmu = {
650 .reg = 0x22c,
651 .bit = 14,
652 },
653 .la = {
654 .reg = 0x368,
655 .shift = 16,
656 .mask = 0xff,
657 .def = 0xb2,
658 },
659 }, {
660 .id = 0x2f,
661 .name = "viwy",
662 .swgroup = TEGRA_SWGROUP_VI,
663 .smmu = {
664 .reg = 0x22c,
665 .bit = 15,
666 },
667 .la = {
668 .reg = 0x36c,
669 .shift = 0,
670 .mask = 0xff,
671 .def = 0x12,
672 },
673 }, {
674 .id = 0x30,
675 .name = "g2dw",
676 .swgroup = TEGRA_SWGROUP_G2,
677 .smmu = {
678 .reg = 0x22c,
679 .bit = 16,
680 },
681 .la = {
682 .reg = 0x30c,
683 .shift = 16,
684 .mask = 0xff,
685 .def = 0x9,
686 },
687 }, {
688 .id = 0x31,
689 .name = "afiw",
690 .swgroup = TEGRA_SWGROUP_AFI,
691 .smmu = {
692 .reg = 0x22c,
693 .bit = 17,
694 },
695 .la = {
696 .reg = 0x2e0,
697 .shift = 16,
698 .mask = 0xff,
699 .def = 0x0c,
700 },
701 }, {
702 .id = 0x32,
703 .name = "avpcarm7w",
704 .swgroup = TEGRA_SWGROUP_AVPC,
705 .smmu = {
706 .reg = 0x22c,
707 .bit = 18,
708 },
709 .la = {
710 .reg = 0x2e4,
711 .shift = 16,
712 .mask = 0xff,
713 .def = 0x0e,
714 },
715 }, {
716 .id = 0x33,
717 .name = "fdcdwr",
718 .swgroup = TEGRA_SWGROUP_NV,
719 .smmu = {
720 .reg = 0x22c,
721 .bit = 19,
722 },
723 .la = {
724 .reg = 0x338,
725 .shift = 16,
726 .mask = 0xff,
727 .def = 0x0a,
728 },
729 }, {
730 .id = 0x34,
731 .name = "fdcwr2",
732 .swgroup = TEGRA_SWGROUP_NV2,
733 .smmu = {
734 .reg = 0x22c,
735 .bit = 20,
736 },
737 .la = {
738 .reg = 0x340,
739 .shift = 16,
740 .mask = 0xff,
741 .def = 0x0a,
742 },
743 }, {
744 .id = 0x35,
745 .name = "hdaw",
746 .swgroup = TEGRA_SWGROUP_HDA,
747 .smmu = {
748 .reg = 0x22c,
749 .bit = 21,
750 },
751 .la = {
752 .reg = 0x318,
753 .shift = 16,
754 .mask = 0xff,
755 .def = 0xff,
756 },
757 }, {
758 .id = 0x36,
759 .name = "host1xw",
760 .swgroup = TEGRA_SWGROUP_HC,
761 .smmu = {
762 .reg = 0x22c,
763 .bit = 22,
764 },
765 .la = {
766 .reg = 0x314,
767 .shift = 0,
768 .mask = 0xff,
769 .def = 0x10,
770 },
771 }, {
772 .id = 0x37,
773 .name = "ispw",
774 .swgroup = TEGRA_SWGROUP_ISP,
775 .smmu = {
776 .reg = 0x22c,
777 .bit = 23,
778 },
779 .la = {
780 .reg = 0x31c,
781 .shift = 0,
782 .mask = 0xff,
783 .def = 0xff,
784 },
785 }, {
786 .id = 0x38,
787 .name = "mpcorelpw",
788 .swgroup = TEGRA_SWGROUP_MPCORELP,
789 .la = {
790 .reg = 0x324,
791 .shift = 16,
792 .mask = 0xff,
793 .def = 0x0e,
794 },
795 }, {
796 .id = 0x39,
797 .name = "mpcorew",
798 .swgroup = TEGRA_SWGROUP_MPCORE,
799 .la = {
800 .reg = 0x320,
801 .shift = 16,
802 .mask = 0xff,
803 .def = 0x0e,
804 },
805 }, {
806 .id = 0x3a,
807 .name = "mpecswr",
808 .swgroup = TEGRA_SWGROUP_MPE,
809 .smmu = {
810 .reg = 0x22c,
811 .bit = 26,
812 },
813 .la = {
814 .reg = 0x330,
815 .shift = 16,
816 .mask = 0xff,
817 .def = 0xff,
818 },
819 }, {
820 .id = 0x3b,
821 .name = "ppcsahbdmaw",
822 .swgroup = TEGRA_SWGROUP_PPCS,
823 .smmu = {
824 .reg = 0x22c,
825 .bit = 27,
826 },
827 .la = {
828 .reg = 0x348,
829 .shift = 0,
830 .mask = 0xff,
831 .def = 0x10,
832 },
833 }, {
834 .id = 0x3c,
835 .name = "ppcsahbslvw",
836 .swgroup = TEGRA_SWGROUP_PPCS,
837 .smmu = {
838 .reg = 0x22c,
839 .bit = 28,
840 },
841 .la = {
842 .reg = 0x348,
843 .shift = 16,
844 .mask = 0xff,
845 .def = 0x06,
846 },
847 }, {
848 .id = 0x3d,
849 .name = "sataw",
850 .swgroup = TEGRA_SWGROUP_SATA,
851 .smmu = {
852 .reg = 0x22c,
853 .bit = 29,
854 },
855 .la = {
856 .reg = 0x350,
857 .shift = 16,
858 .mask = 0xff,
859 .def = 0x33,
860 },
861 }, {
862 .id = 0x3e,
863 .name = "vdebsevw",
864 .swgroup = TEGRA_SWGROUP_VDE,
865 .smmu = {
866 .reg = 0x22c,
867 .bit = 30,
868 },
869 .la = {
870 .reg = 0x35c,
871 .shift = 0,
872 .mask = 0xff,
873 .def = 0xff,
874 },
875 }, {
876 .id = 0x3f,
877 .name = "vdedbgw",
878 .swgroup = TEGRA_SWGROUP_VDE,
879 .smmu = {
880 .reg = 0x22c,
881 .bit = 31,
882 },
883 .la = {
884 .reg = 0x35c,
885 .shift = 16,
886 .mask = 0xff,
887 .def = 0xff,
888 },
889 }, {
890 .id = 0x40,
891 .name = "vdembew",
892 .swgroup = TEGRA_SWGROUP_VDE,
893 .smmu = {
894 .reg = 0x230,
895 .bit = 0,
896 },
897 .la = {
898 .reg = 0x360,
899 .shift = 0,
900 .mask = 0xff,
901 .def = 0x42,
902 },
903 }, {
904 .id = 0x41,
905 .name = "vdetpmw",
906 .swgroup = TEGRA_SWGROUP_VDE,
907 .smmu = {
908 .reg = 0x230,
909 .bit = 1,
910 },
911 .la = {
912 .reg = 0x360,
913 .shift = 16,
914 .mask = 0xff,
915 .def = 0x2a,
916 },
917 },
918};
919
920static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
921 { .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
922 { .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
923 { .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
924 { .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
925 { .swgroup = TEGRA_SWGROUP_MPE, .reg = 0x264 },
926 { .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
927 { .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
928 { .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
929 { .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
930 { .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
931 { .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
932 { .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
933 { .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
934 { .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x278 },
935 { .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
936 { .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
937};
938
939static void tegra30_flush_dcache(struct page *page, unsigned long offset,
940 size_t size)
941{
942 phys_addr_t phys = page_to_phys(page) + offset;
943 void *virt = page_address(page) + offset;
944
945 __cpuc_flush_dcache_area(virt, size);
946 outer_flush_range(phys, phys + size);
947}
948
949static const struct tegra_smmu_ops tegra30_smmu_ops = {
950 .flush_dcache = tegra30_flush_dcache,
951};
952
953static const struct tegra_smmu_soc tegra30_smmu_soc = {
954 .clients = tegra30_mc_clients,
955 .num_clients = ARRAY_SIZE(tegra30_mc_clients),
956 .swgroups = tegra30_swgroups,
957 .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
958 .supports_round_robin_arbitration = false,
959 .supports_request_limit = false,
960 .num_asids = 4,
961 .ops = &tegra30_smmu_ops,
962};
963
964const struct tegra_mc_soc tegra30_mc_soc = {
965 .clients = tegra30_mc_clients,
966 .num_clients = ARRAY_SIZE(tegra30_mc_clients),
967 .num_address_bits = 32,
968 .atom_size = 16,
969 .smmu = &tegra30_smmu_soc,
970};
diff --git a/drivers/memory/tegra30-mc.c b/drivers/memory/tegra30-mc.c
deleted file mode 100644
index ef7934535fd1..000000000000
--- a/drivers/memory/tegra30-mc.c
+++ /dev/null
@@ -1,378 +0,0 @@
1/*
2 * Tegra30 Memory Controller
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/err.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/ratelimit.h>
24#include <linux/platform_device.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27
28#define DRV_NAME "tegra30-mc"
29
30#define MC_INTSTATUS 0x0
31#define MC_INTMASK 0x4
32
33#define MC_INT_ERR_SHIFT 6
34#define MC_INT_ERR_MASK (0x1f << MC_INT_ERR_SHIFT)
35#define MC_INT_DECERR_EMEM BIT(MC_INT_ERR_SHIFT)
36#define MC_INT_SECURITY_VIOLATION BIT(MC_INT_ERR_SHIFT + 2)
37#define MC_INT_ARBITRATION_EMEM BIT(MC_INT_ERR_SHIFT + 3)
38#define MC_INT_INVALID_SMMU_PAGE BIT(MC_INT_ERR_SHIFT + 4)
39
40#define MC_ERR_STATUS 0x8
41#define MC_ERR_ADR 0xc
42
43#define MC_ERR_TYPE_SHIFT 28
44#define MC_ERR_TYPE_MASK (7 << MC_ERR_TYPE_SHIFT)
45#define MC_ERR_TYPE_DECERR_EMEM 2
46#define MC_ERR_TYPE_SECURITY_TRUSTZONE 3
47#define MC_ERR_TYPE_SECURITY_CARVEOUT 4
48#define MC_ERR_TYPE_INVALID_SMMU_PAGE 6
49
50#define MC_ERR_INVALID_SMMU_PAGE_SHIFT 25
51#define MC_ERR_INVALID_SMMU_PAGE_MASK (7 << MC_ERR_INVALID_SMMU_PAGE_SHIFT)
52#define MC_ERR_RW_SHIFT 16
53#define MC_ERR_RW BIT(MC_ERR_RW_SHIFT)
54#define MC_ERR_SECURITY BIT(MC_ERR_RW_SHIFT + 1)
55
56#define SECURITY_VIOLATION_TYPE BIT(30) /* 0=TRUSTZONE, 1=CARVEOUT */
57
58#define MC_EMEM_ARB_CFG 0x90
59#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
60#define MC_EMEM_ARB_TIMING_RCD 0x98
61#define MC_EMEM_ARB_TIMING_RP 0x9c
62#define MC_EMEM_ARB_TIMING_RC 0xa0
63#define MC_EMEM_ARB_TIMING_RAS 0xa4
64#define MC_EMEM_ARB_TIMING_FAW 0xa8
65#define MC_EMEM_ARB_TIMING_RRD 0xac
66#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
67#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
68#define MC_EMEM_ARB_TIMING_R2R 0xb8
69#define MC_EMEM_ARB_TIMING_W2W 0xbc
70#define MC_EMEM_ARB_TIMING_R2W 0xc0
71#define MC_EMEM_ARB_TIMING_W2R 0xc4
72
73#define MC_EMEM_ARB_DA_TURNS 0xd0
74#define MC_EMEM_ARB_DA_COVERS 0xd4
75#define MC_EMEM_ARB_MISC0 0xd8
76#define MC_EMEM_ARB_MISC1 0xdc
77
78#define MC_EMEM_ARB_RING3_THROTTLE 0xe4
79#define MC_EMEM_ARB_OVERRIDE 0xe8
80
81#define MC_TIMING_CONTROL 0xfc
82
83#define MC_CLIENT_ID_MASK 0x7f
84
85#define NUM_MC_REG_BANKS 4
86
87struct tegra30_mc {
88 void __iomem *regs[NUM_MC_REG_BANKS];
89 struct device *dev;
90 u32 ctx[0];
91};
92
93static inline u32 mc_readl(struct tegra30_mc *mc, u32 offs)
94{
95 u32 val = 0;
96
97 if (offs < 0x10)
98 val = readl(mc->regs[0] + offs);
99 else if (offs < 0x1f0)
100 val = readl(mc->regs[1] + offs - 0x3c);
101 else if (offs < 0x228)
102 val = readl(mc->regs[2] + offs - 0x200);
103 else if (offs < 0x400)
104 val = readl(mc->regs[3] + offs - 0x284);
105
106 return val;
107}
108
109static inline void mc_writel(struct tegra30_mc *mc, u32 val, u32 offs)
110{
111 if (offs < 0x10)
112 writel(val, mc->regs[0] + offs);
113 else if (offs < 0x1f0)
114 writel(val, mc->regs[1] + offs - 0x3c);
115 else if (offs < 0x228)
116 writel(val, mc->regs[2] + offs - 0x200);
117 else if (offs < 0x400)
118 writel(val, mc->regs[3] + offs - 0x284);
119}
120
121static const char * const tegra30_mc_client[] = {
122 "csr_ptcr",
123 "cbr_display0a",
124 "cbr_display0ab",
125 "cbr_display0b",
126 "cbr_display0bb",
127 "cbr_display0c",
128 "cbr_display0cb",
129 "cbr_display1b",
130 "cbr_display1bb",
131 "cbr_eppup",
132 "cbr_g2pr",
133 "cbr_g2sr",
134 "cbr_mpeunifbr",
135 "cbr_viruv",
136 "csr_afir",
137 "csr_avpcarm7r",
138 "csr_displayhc",
139 "csr_displayhcb",
140 "csr_fdcdrd",
141 "csr_fdcdrd2",
142 "csr_g2dr",
143 "csr_hdar",
144 "csr_host1xdmar",
145 "csr_host1xr",
146 "csr_idxsrd",
147 "csr_idxsrd2",
148 "csr_mpe_ipred",
149 "csr_mpeamemrd",
150 "csr_mpecsrd",
151 "csr_ppcsahbdmar",
152 "csr_ppcsahbslvr",
153 "csr_satar",
154 "csr_texsrd",
155 "csr_texsrd2",
156 "csr_vdebsevr",
157 "csr_vdember",
158 "csr_vdemcer",
159 "csr_vdetper",
160 "csr_mpcorelpr",
161 "csr_mpcorer",
162 "cbw_eppu",
163 "cbw_eppv",
164 "cbw_eppy",
165 "cbw_mpeunifbw",
166 "cbw_viwsb",
167 "cbw_viwu",
168 "cbw_viwv",
169 "cbw_viwy",
170 "ccw_g2dw",
171 "csw_afiw",
172 "csw_avpcarm7w",
173 "csw_fdcdwr",
174 "csw_fdcdwr2",
175 "csw_hdaw",
176 "csw_host1xw",
177 "csw_ispw",
178 "csw_mpcorelpw",
179 "csw_mpcorew",
180 "csw_mpecswr",
181 "csw_ppcsahbdmaw",
182 "csw_ppcsahbslvw",
183 "csw_sataw",
184 "csw_vdebsevw",
185 "csw_vdedbgw",
186 "csw_vdembew",
187 "csw_vdetpmw",
188};
189
190static void tegra30_mc_decode(struct tegra30_mc *mc, int n)
191{
192 u32 err, addr;
193 const char * const mc_int_err[] = {
194 "MC_DECERR",
195 "Unknown",
196 "MC_SECURITY_ERR",
197 "MC_ARBITRATION_EMEM",
198 "MC_SMMU_ERR",
199 };
200 const char * const err_type[] = {
201 "Unknown",
202 "Unknown",
203 "DECERR_EMEM",
204 "SECURITY_TRUSTZONE",
205 "SECURITY_CARVEOUT",
206 "Unknown",
207 "INVALID_SMMU_PAGE",
208 "Unknown",
209 };
210 char attr[6];
211 int cid, perm, type, idx;
212 const char *client = "Unknown";
213
214 idx = n - MC_INT_ERR_SHIFT;
215 if ((idx < 0) || (idx >= ARRAY_SIZE(mc_int_err)) || (idx == 1)) {
216 dev_err_ratelimited(mc->dev, "Unknown interrupt status %08lx\n",
217 BIT(n));
218 return;
219 }
220
221 err = mc_readl(mc, MC_ERR_STATUS);
222
223 type = (err & MC_ERR_TYPE_MASK) >> MC_ERR_TYPE_SHIFT;
224 perm = (err & MC_ERR_INVALID_SMMU_PAGE_MASK) >>
225 MC_ERR_INVALID_SMMU_PAGE_SHIFT;
226 if (type == MC_ERR_TYPE_INVALID_SMMU_PAGE)
227 sprintf(attr, "%c-%c-%c",
228 (perm & BIT(2)) ? 'R' : '-',
229 (perm & BIT(1)) ? 'W' : '-',
230 (perm & BIT(0)) ? 'S' : '-');
231 else
232 attr[0] = '\0';
233
234 cid = err & MC_CLIENT_ID_MASK;
235 if (cid < ARRAY_SIZE(tegra30_mc_client))
236 client = tegra30_mc_client[cid];
237
238 addr = mc_readl(mc, MC_ERR_ADR);
239
240 dev_err_ratelimited(mc->dev, "%s (0x%08x): 0x%08x %s (%s %s %s %s)\n",
241 mc_int_err[idx], err, addr, client,
242 (err & MC_ERR_SECURITY) ? "secure" : "non-secure",
243 (err & MC_ERR_RW) ? "write" : "read",
244 err_type[type], attr);
245}
246
247static const u32 tegra30_mc_ctx[] = {
248 MC_EMEM_ARB_CFG,
249 MC_EMEM_ARB_OUTSTANDING_REQ,
250 MC_EMEM_ARB_TIMING_RCD,
251 MC_EMEM_ARB_TIMING_RP,
252 MC_EMEM_ARB_TIMING_RC,
253 MC_EMEM_ARB_TIMING_RAS,
254 MC_EMEM_ARB_TIMING_FAW,
255 MC_EMEM_ARB_TIMING_RRD,
256 MC_EMEM_ARB_TIMING_RAP2PRE,
257 MC_EMEM_ARB_TIMING_WAP2PRE,
258 MC_EMEM_ARB_TIMING_R2R,
259 MC_EMEM_ARB_TIMING_W2W,
260 MC_EMEM_ARB_TIMING_R2W,
261 MC_EMEM_ARB_TIMING_W2R,
262 MC_EMEM_ARB_DA_TURNS,
263 MC_EMEM_ARB_DA_COVERS,
264 MC_EMEM_ARB_MISC0,
265 MC_EMEM_ARB_MISC1,
266 MC_EMEM_ARB_RING3_THROTTLE,
267 MC_EMEM_ARB_OVERRIDE,
268 MC_INTMASK,
269};
270
271#ifdef CONFIG_PM
272static int tegra30_mc_suspend(struct device *dev)
273{
274 int i;
275 struct tegra30_mc *mc = dev_get_drvdata(dev);
276
277 for (i = 0; i < ARRAY_SIZE(tegra30_mc_ctx); i++)
278 mc->ctx[i] = mc_readl(mc, tegra30_mc_ctx[i]);
279 return 0;
280}
281
282static int tegra30_mc_resume(struct device *dev)
283{
284 int i;
285 struct tegra30_mc *mc = dev_get_drvdata(dev);
286
287 for (i = 0; i < ARRAY_SIZE(tegra30_mc_ctx); i++)
288 mc_writel(mc, mc->ctx[i], tegra30_mc_ctx[i]);
289
290 mc_writel(mc, 1, MC_TIMING_CONTROL);
291 /* Read-back to ensure that write reached */
292 mc_readl(mc, MC_TIMING_CONTROL);
293 return 0;
294}
295#endif
296
297static UNIVERSAL_DEV_PM_OPS(tegra30_mc_pm,
298 tegra30_mc_suspend,
299 tegra30_mc_resume, NULL);
300
301static const struct of_device_id tegra30_mc_of_match[] = {
302 { .compatible = "nvidia,tegra30-mc", },
303 {},
304};
305
306static irqreturn_t tegra30_mc_isr(int irq, void *data)
307{
308 u32 stat, mask, bit;
309 struct tegra30_mc *mc = data;
310
311 stat = mc_readl(mc, MC_INTSTATUS);
312 mask = mc_readl(mc, MC_INTMASK);
313 mask &= stat;
314 if (!mask)
315 return IRQ_NONE;
316 while ((bit = ffs(mask)) != 0) {
317 tegra30_mc_decode(mc, bit - 1);
318 mask &= ~BIT(bit - 1);
319 }
320
321 mc_writel(mc, stat, MC_INTSTATUS);
322 return IRQ_HANDLED;
323}
324
325static int tegra30_mc_probe(struct platform_device *pdev)
326{
327 struct resource *irq;
328 struct tegra30_mc *mc;
329 size_t bytes;
330 int err, i;
331 u32 intmask;
332
333 bytes = sizeof(*mc) + sizeof(u32) * ARRAY_SIZE(tegra30_mc_ctx);
334 mc = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
335 if (!mc)
336 return -ENOMEM;
337 mc->dev = &pdev->dev;
338
339 for (i = 0; i < ARRAY_SIZE(mc->regs); i++) {
340 struct resource *res;
341
342 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
343 mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
344 if (IS_ERR(mc->regs[i]))
345 return PTR_ERR(mc->regs[i]);
346 }
347
348 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
349 if (!irq)
350 return -ENODEV;
351 err = devm_request_irq(&pdev->dev, irq->start, tegra30_mc_isr,
352 IRQF_SHARED, dev_name(&pdev->dev), mc);
353 if (err)
354 return -ENODEV;
355
356 platform_set_drvdata(pdev, mc);
357
358 intmask = MC_INT_INVALID_SMMU_PAGE |
359 MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION;
360 mc_writel(mc, intmask, MC_INTMASK);
361 return 0;
362}
363
364static struct platform_driver tegra30_mc_driver = {
365 .probe = tegra30_mc_probe,
366 .driver = {
367 .name = DRV_NAME,
368 .owner = THIS_MODULE,
369 .of_match_table = tegra30_mc_of_match,
370 .pm = &tegra30_mc_pm,
371 },
372};
373module_platform_driver(tegra30_mc_driver);
374
375MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
376MODULE_DESCRIPTION("Tegra30 MC driver");
377MODULE_LICENSE("GPL v2");
378MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 0707fa2c701b..5bdaae15a742 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING, 1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1996 .shost_attrs = mptscsih_host_attrs,
1997 .use_blk_tags = 1,
1997}; 1998};
1998 1999
1999static int mptsas_get_linkerrors(struct sas_phy *phy) 2000static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e7dcb2583369..6c9fc11efb87 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2311,26 +2311,21 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
2311 * mptscsih_change_queue_depth - This function will set a devices queue depth 2311 * mptscsih_change_queue_depth - This function will set a devices queue depth
2312 * @sdev: per scsi_device pointer 2312 * @sdev: per scsi_device pointer
2313 * @qdepth: requested queue depth 2313 * @qdepth: requested queue depth
2314 * @reason: calling context
2315 * 2314 *
2316 * Adding support for new 'change_queue_depth' api. 2315 * Adding support for new 'change_queue_depth' api.
2317*/ 2316*/
2318int 2317int
2319mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 2318mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
2320{ 2319{
2321 MPT_SCSI_HOST *hd = shost_priv(sdev->host); 2320 MPT_SCSI_HOST *hd = shost_priv(sdev->host);
2322 VirtTarget *vtarget; 2321 VirtTarget *vtarget;
2323 struct scsi_target *starget; 2322 struct scsi_target *starget;
2324 int max_depth; 2323 int max_depth;
2325 int tagged;
2326 MPT_ADAPTER *ioc = hd->ioc; 2324 MPT_ADAPTER *ioc = hd->ioc;
2327 2325
2328 starget = scsi_target(sdev); 2326 starget = scsi_target(sdev);
2329 vtarget = starget->hostdata; 2327 vtarget = starget->hostdata;
2330 2328
2331 if (reason != SCSI_QDEPTH_DEFAULT)
2332 return -EOPNOTSUPP;
2333
2334 if (ioc->bus_type == SPI) { 2329 if (ioc->bus_type == SPI) {
2335 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) 2330 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
2336 max_depth = 1; 2331 max_depth = 1;
@@ -2347,13 +2342,8 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2347 2342
2348 if (qdepth > max_depth) 2343 if (qdepth > max_depth)
2349 qdepth = max_depth; 2344 qdepth = max_depth;
2350 if (qdepth == 1)
2351 tagged = 0;
2352 else
2353 tagged = MSG_SIMPLE_TAG;
2354 2345
2355 scsi_adjust_queue_depth(sdev, tagged, qdepth); 2346 return scsi_change_queue_depth(sdev, qdepth);
2356 return sdev->queue_depth;
2357} 2347}
2358 2348
2359/* 2349/*
@@ -2397,12 +2387,10 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2397 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2387 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2398 vtarget->minSyncFactor)); 2388 vtarget->minSyncFactor));
2399 2389
2400 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH, 2390 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2401 SCSI_QDEPTH_DEFAULT);
2402 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2391 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2403 "tagged %d, simple %d, ordered %d\n", 2392 "tagged %d, simple %d\n",
2404 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2393 ioc->name,sdev->tagged_supported, sdev->simple_tags));
2405 sdev->ordered_tags));
2406 2394
2407 blk_queue_dma_alignment (sdev->request_queue, 512 - 1); 2395 blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
2408 2396
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index e1b1a198a62a..2baeefd9be7a 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -128,8 +128,7 @@ extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_F
128extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 128extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
132 int reason);
133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 132extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
135extern struct device_attribute *mptscsih_host_attrs[]; 134extern struct device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1456ea70bbc7..2e6b7311fabc 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -59,6 +59,17 @@ config MFD_AAT2870_CORE
59 additional drivers must be enabled in order to use the 59 additional drivers must be enabled in order to use the
60 functionality of the device. 60 functionality of the device.
61 61
62config MFD_ATMEL_HLCDC
63 tristate "Atmel HLCDC (High-end LCD Controller)"
64 select MFD_CORE
65 select REGMAP_MMIO
66 depends on OF
67 help
68 If you say yes here you get support for the HLCDC block.
69 This driver provides common support for accessing the device,
70 additional drivers must be enabled in order to use the
71 functionality of the device.
72
62config MFD_BCM590XX 73config MFD_BCM590XX
63 tristate "Broadcom BCM590xx PMUs" 74 tristate "Broadcom BCM590xx PMUs"
64 select MFD_CORE 75 select MFD_CORE
@@ -74,7 +85,8 @@ config MFD_AXP20X
74 select REGMAP_IRQ 85 select REGMAP_IRQ
75 depends on I2C=y 86 depends on I2C=y
76 help 87 help
77 If you say Y here you get support for the X-Powers AXP202 and AXP209. 88 If you say Y here you get support for the X-Powers AXP202, AXP209 and
89 AXP288 power management IC (PMIC).
78 This driver include only the core APIs. You have to select individual 90 This driver include only the core APIs. You have to select individual
79 components like regulators or the PEK (Power Enable Key) under the 91 components like regulators or the PEK (Power Enable Key) under the
80 corresponding menus. 92 corresponding menus.
@@ -183,6 +195,16 @@ config MFD_DA9063
183 Additional drivers must be enabled in order to use the functionality 195 Additional drivers must be enabled in order to use the functionality
184 of the device. 196 of the device.
185 197
198config MFD_DLN2
199 tristate "Diolan DLN2 support"
200 select MFD_CORE
201 depends on USB
202 help
203 This adds support for Diolan USB-I2C/SPI/GPIO Master Adapter
204 DLN-2. Additional drivers such as I2C_DLN2, GPIO_DLN2,
205 etc. must be enabled in order to use the functionality of
206 the device.
207
186config MFD_MC13XXX 208config MFD_MC13XXX
187 tristate 209 tristate
188 depends on (SPI_MASTER || I2C) 210 depends on (SPI_MASTER || I2C)
@@ -655,7 +677,6 @@ config MFD_SEC_CORE
655 select MFD_CORE 677 select MFD_CORE
656 select REGMAP_I2C 678 select REGMAP_I2C
657 select REGMAP_IRQ 679 select REGMAP_IRQ
658 select REGULATOR
659 help 680 help
660 Support for the Samsung Electronics MFD series. 681 Support for the Samsung Electronics MFD series.
661 This driver provides common support for accessing the device, 682 This driver provides common support for accessing the device,
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8bd54b1253af..53467e211381 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
13obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o 13obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
14obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o 14obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
15 15
16rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o 16rtsx_pci-objs := rtsx_pcr.o rtsx_gops.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
17obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o 17obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
18obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o 18obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
19 19
@@ -157,6 +157,7 @@ obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
157obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o 157obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
158obj-$(CONFIG_MFD_TPS65090) += tps65090.o 158obj-$(CONFIG_MFD_TPS65090) += tps65090.o
159obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o 159obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
160obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
160obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o 161obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
161obj-$(CONFIG_MFD_PALMAS) += palmas.o 162obj-$(CONFIG_MFD_PALMAS) += palmas.o
162obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o 163obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
@@ -174,6 +175,7 @@ obj-$(CONFIG_MFD_STW481X) += stw481x.o
174obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o 175obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
175obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o 176obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
176obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o 177obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
178obj-$(CONFIG_MFD_DLN2) += dln2.o
177 179
178intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o 180intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
179obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o 181obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 36000f920981..8e3168d160b2 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -867,7 +867,7 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
867 gpadc->cal_data[ADC_INPUT_VBAT].offset); 867 gpadc->cal_data[ADC_INPUT_VBAT].offset);
868} 868}
869 869
870#ifdef CONFIG_PM_RUNTIME 870#ifdef CONFIG_PM
871static int ab8500_gpadc_runtime_suspend(struct device *dev) 871static int ab8500_gpadc_runtime_suspend(struct device *dev)
872{ 872{
873 struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); 873 struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 8e0dae59844d..94dbcdd2a1ff 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -85,63 +85,6 @@ shutdown:
85 } 85 }
86} 86}
87 87
88/*
89 * Use the AB WD to reset the platform. It will perform a hard
90 * reset instead of a soft reset. Write the reset reason to
91 * the AB before reset, which can be read upon restart.
92 */
93void ab8500_restart(char mode, const char *cmd)
94{
95 struct ab8500_platform_data *plat;
96 struct ab8500_sysctrl_platform_data *pdata;
97 u16 reason = 0;
98 u8 val;
99
100 if (sysctrl_dev == NULL) {
101 pr_err("%s: sysctrl not initialized\n", __func__);
102 return;
103 }
104
105 plat = dev_get_platdata(sysctrl_dev->parent);
106 pdata = plat->sysctrl;
107 if (pdata && pdata->reboot_reason_code)
108 reason = pdata->reboot_reason_code(cmd);
109 else
110 pr_warn("[%s] No reboot reason set. Default reason %d\n",
111 __func__, reason);
112
113 /*
114 * Disable RTC alarm, just a precaution so that no alarm
115 * is running when WD reset is executed.
116 */
117 abx500_get_register_interruptible(sysctrl_dev, AB8500_RTC,
118 RTC_CTRL , &val);
119 abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
120 RTC_CTRL , (val & ~RTC_ALARM_ENABLE));
121
122 /*
123 * Android is not using the RTC alarm registers during reboot
124 * so we borrow them for writing the reason of reset
125 */
126
127 /* reason[8 LSB] */
128 val = reason & 0xFF;
129 abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
130 AB8500_ALARM_MIN_LOW , val);
131
132 /* reason[8 MSB] */
133 val = (reason>>8) & 0xFF;
134 abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
135 AB8500_ALARM_MIN_MID , val);
136
137 /* Setting WD timeout to 0 */
138 ab8500_sysctrl_write(AB8500_MAINWDOGTIMER, 0xFF, 0x0);
139
140 /* Setting the parameters to AB8500 WD*/
141 ab8500_sysctrl_write(AB8500_MAINWDOGCTRL, 0xFF, (AB8500_ENABLE_WD |
142 AB8500_WD_RESTART_ON_EXPIRE | AB8500_KICK_WD));
143}
144
145static inline bool valid_bank(u8 bank) 88static inline bool valid_bank(u8 bank)
146{ 89{
147 return ((bank == AB8500_SYS_CTRL1_BLOCK) || 90 return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bce7c0784b6b..09ba8f186e6a 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -330,7 +330,7 @@ err_fll:
330 return err; 330 return err;
331} 331}
332 332
333#ifdef CONFIG_PM_RUNTIME 333#ifdef CONFIG_PM
334static int arizona_runtime_resume(struct device *dev) 334static int arizona_runtime_resume(struct device *dev)
335{ 335{
336 struct arizona *arizona = dev_get_drvdata(dev); 336 struct arizona *arizona = dev_get_drvdata(dev);
@@ -1024,7 +1024,7 @@ int arizona_dev_init(struct arizona *arizona)
1024 goto err_irq; 1024 goto err_irq;
1025 } 1025 }
1026 1026
1027#ifdef CONFIG_PM_RUNTIME 1027#ifdef CONFIG_PM
1028 regulator_disable(arizona->dcvdd); 1028 regulator_disable(arizona->dcvdd);
1029#endif 1029#endif
1030 1030
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 5145d78bf07e..8ef58bcff193 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -75,7 +75,9 @@ static int arizona_spi_probe(struct spi_device *spi)
75static int arizona_spi_remove(struct spi_device *spi) 75static int arizona_spi_remove(struct spi_device *spi)
76{ 76{
77 struct arizona *arizona = spi_get_drvdata(spi); 77 struct arizona *arizona = spi_get_drvdata(spi);
78
78 arizona_dev_exit(arizona); 79 arizona_dev_exit(arizona);
80
79 return 0; 81 return 0;
80} 82}
81 83
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
new file mode 100644
index 000000000000..cfd58f4cc5c3
--- /dev/null
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
4 *
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/clk.h>
21#include <linux/mfd/atmel-hlcdc.h>
22#include <linux/mfd/core.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/regmap.h>
26
27#define ATMEL_HLCDC_REG_MAX (0x4000 - 0x4)
28
29static const struct mfd_cell atmel_hlcdc_cells[] = {
30 {
31 .name = "atmel-hlcdc-pwm",
32 .of_compatible = "atmel,hlcdc-pwm",
33 },
34 {
35 .name = "atmel-hlcdc-dc",
36 .of_compatible = "atmel,hlcdc-display-controller",
37 },
38};
39
40static const struct regmap_config atmel_hlcdc_regmap_config = {
41 .reg_bits = 32,
42 .val_bits = 32,
43 .reg_stride = 4,
44 .max_register = ATMEL_HLCDC_REG_MAX,
45};
46
47static int atmel_hlcdc_probe(struct platform_device *pdev)
48{
49 struct device *dev = &pdev->dev;
50 struct atmel_hlcdc *hlcdc;
51 struct resource *res;
52 void __iomem *regs;
53
54 hlcdc = devm_kzalloc(dev, sizeof(*hlcdc), GFP_KERNEL);
55 if (!hlcdc)
56 return -ENOMEM;
57
58 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 regs = devm_ioremap_resource(dev, res);
60 if (IS_ERR(regs))
61 return PTR_ERR(regs);
62
63 hlcdc->irq = platform_get_irq(pdev, 0);
64 if (hlcdc->irq < 0)
65 return hlcdc->irq;
66
67 hlcdc->periph_clk = devm_clk_get(dev, "periph_clk");
68 if (IS_ERR(hlcdc->periph_clk)) {
69 dev_err(dev, "failed to get peripheral clock\n");
70 return PTR_ERR(hlcdc->periph_clk);
71 }
72
73 hlcdc->sys_clk = devm_clk_get(dev, "sys_clk");
74 if (IS_ERR(hlcdc->sys_clk)) {
75 dev_err(dev, "failed to get system clock\n");
76 return PTR_ERR(hlcdc->sys_clk);
77 }
78
79 hlcdc->slow_clk = devm_clk_get(dev, "slow_clk");
80 if (IS_ERR(hlcdc->slow_clk)) {
81 dev_err(dev, "failed to get slow clock\n");
82 return PTR_ERR(hlcdc->slow_clk);
83 }
84
85 hlcdc->regmap = devm_regmap_init_mmio(dev, regs,
86 &atmel_hlcdc_regmap_config);
87 if (IS_ERR(hlcdc->regmap))
88 return PTR_ERR(hlcdc->regmap);
89
90 dev_set_drvdata(dev, hlcdc);
91
92 return mfd_add_devices(dev, -1, atmel_hlcdc_cells,
93 ARRAY_SIZE(atmel_hlcdc_cells),
94 NULL, 0, NULL);
95}
96
97static int atmel_hlcdc_remove(struct platform_device *pdev)
98{
99 mfd_remove_devices(&pdev->dev);
100
101 return 0;
102}
103
104static const struct of_device_id atmel_hlcdc_match[] = {
105 { .compatible = "atmel,sama5d3-hlcdc" },
106 { /* sentinel */ },
107};
108
109static struct platform_driver atmel_hlcdc_driver = {
110 .probe = atmel_hlcdc_probe,
111 .remove = atmel_hlcdc_remove,
112 .driver = {
113 .name = "atmel-hlcdc",
114 .of_match_table = atmel_hlcdc_match,
115 },
116};
117module_platform_driver(atmel_hlcdc_driver);
118
119MODULE_ALIAS("platform:atmel-hlcdc");
120MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
121MODULE_DESCRIPTION("Atmel HLCDC driver");
122MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 6231adbb295d..b1b580a88654 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * axp20x.c - MFD core driver for the X-Powers AXP202 and AXP209 2 * axp20x.c - MFD core driver for the X-Powers' Power Management ICs
3 * 3 *
4 * AXP20x comprises an adaptive USB-Compatible PWM charger, 2 BUCK DC-DC 4 * AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
5 * converters, 5 LDOs, multiple 12-bit ADCs of voltage, current and temperature 5 * converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
6 * as well as 4 configurable GPIOs. 6 * as well as configurable GPIOs.
7 * 7 *
8 * Author: Carlo Caione <carlo@caione.org> 8 * Author: Carlo Caione <carlo@caione.org>
9 * 9 *
@@ -25,9 +25,16 @@
25#include <linux/mfd/core.h> 25#include <linux/mfd/core.h>
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/of_irq.h> 27#include <linux/of_irq.h>
28#include <linux/acpi.h>
28 29
29#define AXP20X_OFF 0x80 30#define AXP20X_OFF 0x80
30 31
32static const char const *axp20x_model_names[] = {
33 "AXP202",
34 "AXP209",
35 "AXP288",
36};
37
31static const struct regmap_range axp20x_writeable_ranges[] = { 38static const struct regmap_range axp20x_writeable_ranges[] = {
32 regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE), 39 regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
33 regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES), 40 regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES),
@@ -47,6 +54,25 @@ static const struct regmap_access_table axp20x_volatile_table = {
47 .n_yes_ranges = ARRAY_SIZE(axp20x_volatile_ranges), 54 .n_yes_ranges = ARRAY_SIZE(axp20x_volatile_ranges),
48}; 55};
49 56
57static const struct regmap_range axp288_writeable_ranges[] = {
58 regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ6_STATE),
59 regmap_reg_range(AXP20X_DCDC_MODE, AXP288_FG_TUNE5),
60};
61
62static const struct regmap_range axp288_volatile_ranges[] = {
63 regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
64};
65
66static const struct regmap_access_table axp288_writeable_table = {
67 .yes_ranges = axp288_writeable_ranges,
68 .n_yes_ranges = ARRAY_SIZE(axp288_writeable_ranges),
69};
70
71static const struct regmap_access_table axp288_volatile_table = {
72 .yes_ranges = axp288_volatile_ranges,
73 .n_yes_ranges = ARRAY_SIZE(axp288_volatile_ranges),
74};
75
50static struct resource axp20x_pek_resources[] = { 76static struct resource axp20x_pek_resources[] = {
51 { 77 {
52 .name = "PEK_DBR", 78 .name = "PEK_DBR",
@@ -61,6 +87,39 @@ static struct resource axp20x_pek_resources[] = {
61 }, 87 },
62}; 88};
63 89
90static struct resource axp288_battery_resources[] = {
91 {
92 .start = AXP288_IRQ_QWBTU,
93 .end = AXP288_IRQ_QWBTU,
94 .flags = IORESOURCE_IRQ,
95 },
96 {
97 .start = AXP288_IRQ_WBTU,
98 .end = AXP288_IRQ_WBTU,
99 .flags = IORESOURCE_IRQ,
100 },
101 {
102 .start = AXP288_IRQ_QWBTO,
103 .end = AXP288_IRQ_QWBTO,
104 .flags = IORESOURCE_IRQ,
105 },
106 {
107 .start = AXP288_IRQ_WBTO,
108 .end = AXP288_IRQ_WBTO,
109 .flags = IORESOURCE_IRQ,
110 },
111 {
112 .start = AXP288_IRQ_WL2,
113 .end = AXP288_IRQ_WL2,
114 .flags = IORESOURCE_IRQ,
115 },
116 {
117 .start = AXP288_IRQ_WL1,
118 .end = AXP288_IRQ_WL1,
119 .flags = IORESOURCE_IRQ,
120 },
121};
122
64static const struct regmap_config axp20x_regmap_config = { 123static const struct regmap_config axp20x_regmap_config = {
65 .reg_bits = 8, 124 .reg_bits = 8,
66 .val_bits = 8, 125 .val_bits = 8,
@@ -70,47 +129,96 @@ static const struct regmap_config axp20x_regmap_config = {
70 .cache_type = REGCACHE_RBTREE, 129 .cache_type = REGCACHE_RBTREE,
71}; 130};
72 131
73#define AXP20X_IRQ(_irq, _off, _mask) \ 132static const struct regmap_config axp288_regmap_config = {
74 [AXP20X_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) } 133 .reg_bits = 8,
134 .val_bits = 8,
135 .wr_table = &axp288_writeable_table,
136 .volatile_table = &axp288_volatile_table,
137 .max_register = AXP288_FG_TUNE5,
138 .cache_type = REGCACHE_RBTREE,
139};
140
141#define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \
142 [_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
75 143
76static const struct regmap_irq axp20x_regmap_irqs[] = { 144static const struct regmap_irq axp20x_regmap_irqs[] = {
77 AXP20X_IRQ(ACIN_OVER_V, 0, 7), 145 INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V, 0, 7),
78 AXP20X_IRQ(ACIN_PLUGIN, 0, 6), 146 INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN, 0, 6),
79 AXP20X_IRQ(ACIN_REMOVAL, 0, 5), 147 INIT_REGMAP_IRQ(AXP20X, ACIN_REMOVAL, 0, 5),
80 AXP20X_IRQ(VBUS_OVER_V, 0, 4), 148 INIT_REGMAP_IRQ(AXP20X, VBUS_OVER_V, 0, 4),
81 AXP20X_IRQ(VBUS_PLUGIN, 0, 3), 149 INIT_REGMAP_IRQ(AXP20X, VBUS_PLUGIN, 0, 3),
82 AXP20X_IRQ(VBUS_REMOVAL, 0, 2), 150 INIT_REGMAP_IRQ(AXP20X, VBUS_REMOVAL, 0, 2),
83 AXP20X_IRQ(VBUS_V_LOW, 0, 1), 151 INIT_REGMAP_IRQ(AXP20X, VBUS_V_LOW, 0, 1),
84 AXP20X_IRQ(BATT_PLUGIN, 1, 7), 152 INIT_REGMAP_IRQ(AXP20X, BATT_PLUGIN, 1, 7),
85 AXP20X_IRQ(BATT_REMOVAL, 1, 6), 153 INIT_REGMAP_IRQ(AXP20X, BATT_REMOVAL, 1, 6),
86 AXP20X_IRQ(BATT_ENT_ACT_MODE, 1, 5), 154 INIT_REGMAP_IRQ(AXP20X, BATT_ENT_ACT_MODE, 1, 5),
87 AXP20X_IRQ(BATT_EXIT_ACT_MODE, 1, 4), 155 INIT_REGMAP_IRQ(AXP20X, BATT_EXIT_ACT_MODE, 1, 4),
88 AXP20X_IRQ(CHARG, 1, 3), 156 INIT_REGMAP_IRQ(AXP20X, CHARG, 1, 3),
89 AXP20X_IRQ(CHARG_DONE, 1, 2), 157 INIT_REGMAP_IRQ(AXP20X, CHARG_DONE, 1, 2),
90 AXP20X_IRQ(BATT_TEMP_HIGH, 1, 1), 158 INIT_REGMAP_IRQ(AXP20X, BATT_TEMP_HIGH, 1, 1),
91 AXP20X_IRQ(BATT_TEMP_LOW, 1, 0), 159 INIT_REGMAP_IRQ(AXP20X, BATT_TEMP_LOW, 1, 0),
92 AXP20X_IRQ(DIE_TEMP_HIGH, 2, 7), 160 INIT_REGMAP_IRQ(AXP20X, DIE_TEMP_HIGH, 2, 7),
93 AXP20X_IRQ(CHARG_I_LOW, 2, 6), 161 INIT_REGMAP_IRQ(AXP20X, CHARG_I_LOW, 2, 6),
94 AXP20X_IRQ(DCDC1_V_LONG, 2, 5), 162 INIT_REGMAP_IRQ(AXP20X, DCDC1_V_LONG, 2, 5),
95 AXP20X_IRQ(DCDC2_V_LONG, 2, 4), 163 INIT_REGMAP_IRQ(AXP20X, DCDC2_V_LONG, 2, 4),
96 AXP20X_IRQ(DCDC3_V_LONG, 2, 3), 164 INIT_REGMAP_IRQ(AXP20X, DCDC3_V_LONG, 2, 3),
97 AXP20X_IRQ(PEK_SHORT, 2, 1), 165 INIT_REGMAP_IRQ(AXP20X, PEK_SHORT, 2, 1),
98 AXP20X_IRQ(PEK_LONG, 2, 0), 166 INIT_REGMAP_IRQ(AXP20X, PEK_LONG, 2, 0),
99 AXP20X_IRQ(N_OE_PWR_ON, 3, 7), 167 INIT_REGMAP_IRQ(AXP20X, N_OE_PWR_ON, 3, 7),
100 AXP20X_IRQ(N_OE_PWR_OFF, 3, 6), 168 INIT_REGMAP_IRQ(AXP20X, N_OE_PWR_OFF, 3, 6),
101 AXP20X_IRQ(VBUS_VALID, 3, 5), 169 INIT_REGMAP_IRQ(AXP20X, VBUS_VALID, 3, 5),
102 AXP20X_IRQ(VBUS_NOT_VALID, 3, 4), 170 INIT_REGMAP_IRQ(AXP20X, VBUS_NOT_VALID, 3, 4),
103 AXP20X_IRQ(VBUS_SESS_VALID, 3, 3), 171 INIT_REGMAP_IRQ(AXP20X, VBUS_SESS_VALID, 3, 3),
104 AXP20X_IRQ(VBUS_SESS_END, 3, 2), 172 INIT_REGMAP_IRQ(AXP20X, VBUS_SESS_END, 3, 2),
105 AXP20X_IRQ(LOW_PWR_LVL1, 3, 1), 173 INIT_REGMAP_IRQ(AXP20X, LOW_PWR_LVL1, 3, 1),
106 AXP20X_IRQ(LOW_PWR_LVL2, 3, 0), 174 INIT_REGMAP_IRQ(AXP20X, LOW_PWR_LVL2, 3, 0),
107 AXP20X_IRQ(TIMER, 4, 7), 175 INIT_REGMAP_IRQ(AXP20X, TIMER, 4, 7),
108 AXP20X_IRQ(PEK_RIS_EDGE, 4, 6), 176 INIT_REGMAP_IRQ(AXP20X, PEK_RIS_EDGE, 4, 6),
109 AXP20X_IRQ(PEK_FAL_EDGE, 4, 5), 177 INIT_REGMAP_IRQ(AXP20X, PEK_FAL_EDGE, 4, 5),
110 AXP20X_IRQ(GPIO3_INPUT, 4, 3), 178 INIT_REGMAP_IRQ(AXP20X, GPIO3_INPUT, 4, 3),
111 AXP20X_IRQ(GPIO2_INPUT, 4, 2), 179 INIT_REGMAP_IRQ(AXP20X, GPIO2_INPUT, 4, 2),
112 AXP20X_IRQ(GPIO1_INPUT, 4, 1), 180 INIT_REGMAP_IRQ(AXP20X, GPIO1_INPUT, 4, 1),
113 AXP20X_IRQ(GPIO0_INPUT, 4, 0), 181 INIT_REGMAP_IRQ(AXP20X, GPIO0_INPUT, 4, 0),
182};
183
184/* some IRQs are compatible with axp20x models */
185static const struct regmap_irq axp288_regmap_irqs[] = {
186 INIT_REGMAP_IRQ(AXP288, VBUS_FALL, 0, 2),
187 INIT_REGMAP_IRQ(AXP288, VBUS_RISE, 0, 3),
188 INIT_REGMAP_IRQ(AXP288, OV, 0, 4),
189
190 INIT_REGMAP_IRQ(AXP288, DONE, 1, 2),
191 INIT_REGMAP_IRQ(AXP288, CHARGING, 1, 3),
192 INIT_REGMAP_IRQ(AXP288, SAFE_QUIT, 1, 4),
193 INIT_REGMAP_IRQ(AXP288, SAFE_ENTER, 1, 5),
194 INIT_REGMAP_IRQ(AXP288, ABSENT, 1, 6),
195 INIT_REGMAP_IRQ(AXP288, APPEND, 1, 7),
196
197 INIT_REGMAP_IRQ(AXP288, QWBTU, 2, 0),
198 INIT_REGMAP_IRQ(AXP288, WBTU, 2, 1),
199 INIT_REGMAP_IRQ(AXP288, QWBTO, 2, 2),
200 INIT_REGMAP_IRQ(AXP288, WBTO, 2, 3),
201 INIT_REGMAP_IRQ(AXP288, QCBTU, 2, 4),
202 INIT_REGMAP_IRQ(AXP288, CBTU, 2, 5),
203 INIT_REGMAP_IRQ(AXP288, QCBTO, 2, 6),
204 INIT_REGMAP_IRQ(AXP288, CBTO, 2, 7),
205
206 INIT_REGMAP_IRQ(AXP288, WL2, 3, 0),
207 INIT_REGMAP_IRQ(AXP288, WL1, 3, 1),
208 INIT_REGMAP_IRQ(AXP288, GPADC, 3, 2),
209 INIT_REGMAP_IRQ(AXP288, OT, 3, 7),
210
211 INIT_REGMAP_IRQ(AXP288, GPIO0, 4, 0),
212 INIT_REGMAP_IRQ(AXP288, GPIO1, 4, 1),
213 INIT_REGMAP_IRQ(AXP288, POKO, 4, 2),
214 INIT_REGMAP_IRQ(AXP288, POKL, 4, 3),
215 INIT_REGMAP_IRQ(AXP288, POKS, 4, 4),
216 INIT_REGMAP_IRQ(AXP288, POKN, 4, 5),
217 INIT_REGMAP_IRQ(AXP288, POKP, 4, 6),
218 INIT_REGMAP_IRQ(AXP288, TIMER, 4, 7),
219
220 INIT_REGMAP_IRQ(AXP288, MV_CHNG, 5, 0),
221 INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1),
114}; 222};
115 223
116static const struct of_device_id axp20x_of_match[] = { 224static const struct of_device_id axp20x_of_match[] = {
@@ -128,16 +236,39 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
128}; 236};
129MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id); 237MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
130 238
239static const struct acpi_device_id axp20x_acpi_match[] = {
240 {
241 .id = "INT33F4",
242 .driver_data = AXP288_ID,
243 },
244 { },
245};
246MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
247
131static const struct regmap_irq_chip axp20x_regmap_irq_chip = { 248static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
132 .name = "axp20x_irq_chip", 249 .name = "axp20x_irq_chip",
133 .status_base = AXP20X_IRQ1_STATE, 250 .status_base = AXP20X_IRQ1_STATE,
134 .ack_base = AXP20X_IRQ1_STATE, 251 .ack_base = AXP20X_IRQ1_STATE,
135 .mask_base = AXP20X_IRQ1_EN, 252 .mask_base = AXP20X_IRQ1_EN,
136 .num_regs = 5, 253 .mask_invert = true,
254 .init_ack_masked = true,
137 .irqs = axp20x_regmap_irqs, 255 .irqs = axp20x_regmap_irqs,
138 .num_irqs = ARRAY_SIZE(axp20x_regmap_irqs), 256 .num_irqs = ARRAY_SIZE(axp20x_regmap_irqs),
257 .num_regs = 5,
258
259};
260
261static const struct regmap_irq_chip axp288_regmap_irq_chip = {
262 .name = "axp288_irq_chip",
263 .status_base = AXP20X_IRQ1_STATE,
264 .ack_base = AXP20X_IRQ1_STATE,
265 .mask_base = AXP20X_IRQ1_EN,
139 .mask_invert = true, 266 .mask_invert = true,
140 .init_ack_masked = true, 267 .init_ack_masked = true,
268 .irqs = axp288_regmap_irqs,
269 .num_irqs = ARRAY_SIZE(axp288_regmap_irqs),
270 .num_regs = 6,
271
141}; 272};
142 273
143static struct mfd_cell axp20x_cells[] = { 274static struct mfd_cell axp20x_cells[] = {
@@ -150,36 +281,158 @@ static struct mfd_cell axp20x_cells[] = {
150 }, 281 },
151}; 282};
152 283
284static struct resource axp288_adc_resources[] = {
285 {
286 .name = "GPADC",
287 .start = AXP288_IRQ_GPADC,
288 .end = AXP288_IRQ_GPADC,
289 .flags = IORESOURCE_IRQ,
290 },
291};
292
293static struct resource axp288_charger_resources[] = {
294 {
295 .start = AXP288_IRQ_OV,
296 .end = AXP288_IRQ_OV,
297 .flags = IORESOURCE_IRQ,
298 },
299 {
300 .start = AXP288_IRQ_DONE,
301 .end = AXP288_IRQ_DONE,
302 .flags = IORESOURCE_IRQ,
303 },
304 {
305 .start = AXP288_IRQ_CHARGING,
306 .end = AXP288_IRQ_CHARGING,
307 .flags = IORESOURCE_IRQ,
308 },
309 {
310 .start = AXP288_IRQ_SAFE_QUIT,
311 .end = AXP288_IRQ_SAFE_QUIT,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
315 .start = AXP288_IRQ_SAFE_ENTER,
316 .end = AXP288_IRQ_SAFE_ENTER,
317 .flags = IORESOURCE_IRQ,
318 },
319 {
320 .start = AXP288_IRQ_QCBTU,
321 .end = AXP288_IRQ_QCBTU,
322 .flags = IORESOURCE_IRQ,
323 },
324 {
325 .start = AXP288_IRQ_CBTU,
326 .end = AXP288_IRQ_CBTU,
327 .flags = IORESOURCE_IRQ,
328 },
329 {
330 .start = AXP288_IRQ_QCBTO,
331 .end = AXP288_IRQ_QCBTO,
332 .flags = IORESOURCE_IRQ,
333 },
334 {
335 .start = AXP288_IRQ_CBTO,
336 .end = AXP288_IRQ_CBTO,
337 .flags = IORESOURCE_IRQ,
338 },
339};
340
341static struct mfd_cell axp288_cells[] = {
342 {
343 .name = "axp288_adc",
344 .num_resources = ARRAY_SIZE(axp288_adc_resources),
345 .resources = axp288_adc_resources,
346 },
347 {
348 .name = "axp288_charger",
349 .num_resources = ARRAY_SIZE(axp288_charger_resources),
350 .resources = axp288_charger_resources,
351 },
352 {
353 .name = "axp288_battery",
354 .num_resources = ARRAY_SIZE(axp288_battery_resources),
355 .resources = axp288_battery_resources,
356 },
357 {
358 .name = "axp288_pmic_acpi",
359 },
360};
361
153static struct axp20x_dev *axp20x_pm_power_off; 362static struct axp20x_dev *axp20x_pm_power_off;
154static void axp20x_power_off(void) 363static void axp20x_power_off(void)
155{ 364{
365 if (axp20x_pm_power_off->variant == AXP288_ID)
366 return;
367
156 regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL, 368 regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL,
157 AXP20X_OFF); 369 AXP20X_OFF);
158} 370}
159 371
372static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
373{
374 const struct acpi_device_id *acpi_id;
375 const struct of_device_id *of_id;
376
377 if (dev->of_node) {
378 of_id = of_match_device(axp20x_of_match, dev);
379 if (!of_id) {
380 dev_err(dev, "Unable to match OF ID\n");
381 return -ENODEV;
382 }
383 axp20x->variant = (long) of_id->data;
384 } else {
385 acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
386 if (!acpi_id || !acpi_id->driver_data) {
387 dev_err(dev, "Unable to match ACPI ID and data\n");
388 return -ENODEV;
389 }
390 axp20x->variant = (long) acpi_id->driver_data;
391 }
392
393 switch (axp20x->variant) {
394 case AXP202_ID:
395 case AXP209_ID:
396 axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
397 axp20x->cells = axp20x_cells;
398 axp20x->regmap_cfg = &axp20x_regmap_config;
399 axp20x->regmap_irq_chip = &axp20x_regmap_irq_chip;
400 break;
401 case AXP288_ID:
402 axp20x->cells = axp288_cells;
403 axp20x->nr_cells = ARRAY_SIZE(axp288_cells);
404 axp20x->regmap_cfg = &axp288_regmap_config;
405 axp20x->regmap_irq_chip = &axp288_regmap_irq_chip;
406 break;
407 default:
408 dev_err(dev, "unsupported AXP20X ID %lu\n", axp20x->variant);
409 return -EINVAL;
410 }
411 dev_info(dev, "AXP20x variant %s found\n",
412 axp20x_model_names[axp20x->variant]);
413
414 return 0;
415}
416
160static int axp20x_i2c_probe(struct i2c_client *i2c, 417static int axp20x_i2c_probe(struct i2c_client *i2c,
161 const struct i2c_device_id *id) 418 const struct i2c_device_id *id)
162{ 419{
163 struct axp20x_dev *axp20x; 420 struct axp20x_dev *axp20x;
164 const struct of_device_id *of_id;
165 int ret; 421 int ret;
166 422
167 axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL); 423 axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL);
168 if (!axp20x) 424 if (!axp20x)
169 return -ENOMEM; 425 return -ENOMEM;
170 426
171 of_id = of_match_device(axp20x_of_match, &i2c->dev); 427 ret = axp20x_match_device(axp20x, &i2c->dev);
172 if (!of_id) { 428 if (ret)
173 dev_err(&i2c->dev, "Unable to setup AXP20X data\n"); 429 return ret;
174 return -ENODEV;
175 }
176 axp20x->variant = (long) of_id->data;
177 430
178 axp20x->i2c_client = i2c; 431 axp20x->i2c_client = i2c;
179 axp20x->dev = &i2c->dev; 432 axp20x->dev = &i2c->dev;
180 dev_set_drvdata(axp20x->dev, axp20x); 433 dev_set_drvdata(axp20x->dev, axp20x);
181 434
182 axp20x->regmap = devm_regmap_init_i2c(i2c, &axp20x_regmap_config); 435 axp20x->regmap = devm_regmap_init_i2c(i2c, axp20x->regmap_cfg);
183 if (IS_ERR(axp20x->regmap)) { 436 if (IS_ERR(axp20x->regmap)) {
184 ret = PTR_ERR(axp20x->regmap); 437 ret = PTR_ERR(axp20x->regmap);
185 dev_err(&i2c->dev, "regmap init failed: %d\n", ret); 438 dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
@@ -188,15 +441,15 @@ static int axp20x_i2c_probe(struct i2c_client *i2c,
188 441
189 ret = regmap_add_irq_chip(axp20x->regmap, i2c->irq, 442 ret = regmap_add_irq_chip(axp20x->regmap, i2c->irq,
190 IRQF_ONESHOT | IRQF_SHARED, -1, 443 IRQF_ONESHOT | IRQF_SHARED, -1,
191 &axp20x_regmap_irq_chip, 444 axp20x->regmap_irq_chip,
192 &axp20x->regmap_irqc); 445 &axp20x->regmap_irqc);
193 if (ret) { 446 if (ret) {
194 dev_err(&i2c->dev, "failed to add irq chip: %d\n", ret); 447 dev_err(&i2c->dev, "failed to add irq chip: %d\n", ret);
195 return ret; 448 return ret;
196 } 449 }
197 450
198 ret = mfd_add_devices(axp20x->dev, -1, axp20x_cells, 451 ret = mfd_add_devices(axp20x->dev, -1, axp20x->cells,
199 ARRAY_SIZE(axp20x_cells), NULL, 0, NULL); 452 axp20x->nr_cells, NULL, 0, NULL);
200 453
201 if (ret) { 454 if (ret) {
202 dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret); 455 dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
@@ -234,6 +487,7 @@ static struct i2c_driver axp20x_i2c_driver = {
234 .name = "axp20x", 487 .name = "axp20x",
235 .owner = THIS_MODULE, 488 .owner = THIS_MODULE,
236 .of_match_table = of_match_ptr(axp20x_of_match), 489 .of_match_table = of_match_ptr(axp20x_of_match),
490 .acpi_match_table = ACPI_PTR(axp20x_acpi_match),
237 }, 491 },
238 .probe = axp20x_i2c_probe, 492 .probe = axp20x_i2c_probe,
239 .remove = axp20x_i2c_remove, 493 .remove = axp20x_i2c_remove,
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index 93db8bb8c8f0..f38bc98a3c57 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -118,7 +118,7 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
118 da9063->irq_base = pdata->irq_base; 118 da9063->irq_base = pdata->irq_base;
119 } else { 119 } else {
120 da9063->flags = 0; 120 da9063->flags = 0;
121 da9063->irq_base = 0; 121 da9063->irq_base = -1;
122 } 122 }
123 da9063->chip_irq = irq; 123 da9063->chip_irq = irq;
124 124
@@ -168,6 +168,8 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
168 return ret; 168 return ret;
169 } 169 }
170 170
171 da9063->irq_base = regmap_irq_chip_get_base(da9063->regmap_irq);
172
171 ret = mfd_add_devices(da9063->dev, -1, da9063_devs, 173 ret = mfd_add_devices(da9063->dev, -1, da9063_devs,
172 ARRAY_SIZE(da9063_devs), NULL, da9063->irq_base, 174 ARRAY_SIZE(da9063_devs), NULL, da9063->irq_base,
173 NULL); 175 NULL);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 193cf168ba84..a8204730f01c 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3150,23 +3150,28 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
3150 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu"); 3150 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu");
3151 if (!res) { 3151 if (!res) {
3152 dev_err(&pdev->dev, "no prcmu memory region provided\n"); 3152 dev_err(&pdev->dev, "no prcmu memory region provided\n");
3153 return -ENOENT; 3153 return -EINVAL;
3154 } 3154 }
3155 prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 3155 prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
3156 if (!prcmu_base) { 3156 if (!prcmu_base) {
3157 dev_err(&pdev->dev, 3157 dev_err(&pdev->dev,
3158 "failed to ioremap prcmu register memory\n"); 3158 "failed to ioremap prcmu register memory\n");
3159 return -ENOENT; 3159 return -ENOMEM;
3160 } 3160 }
3161 init_prcm_registers(); 3161 init_prcm_registers();
3162 dbx500_fw_version_init(pdev, pdata->version_offset); 3162 dbx500_fw_version_init(pdev, pdata->version_offset);
3163 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm"); 3163 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
3164 if (!res) { 3164 if (!res) {
3165 dev_err(&pdev->dev, "no prcmu tcdm region provided\n"); 3165 dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
3166 return -ENOENT; 3166 return -EINVAL;
3167 } 3167 }
3168 tcdm_base = devm_ioremap(&pdev->dev, res->start, 3168 tcdm_base = devm_ioremap(&pdev->dev, res->start,
3169 resource_size(res)); 3169 resource_size(res));
3170 if (!tcdm_base) {
3171 dev_err(&pdev->dev,
3172 "failed to ioremap prcmu-tcdm register memory\n");
3173 return -ENOMEM;
3174 }
3170 3175
3171 /* Clean up the mailbox interrupts after pre-kernel code. */ 3176 /* Clean up the mailbox interrupts after pre-kernel code. */
3172 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 3177 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
@@ -3174,15 +3179,14 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
3174 irq = platform_get_irq(pdev, 0); 3179 irq = platform_get_irq(pdev, 0);
3175 if (irq <= 0) { 3180 if (irq <= 0) {
3176 dev_err(&pdev->dev, "no prcmu irq provided\n"); 3181 dev_err(&pdev->dev, "no prcmu irq provided\n");
3177 return -ENOENT; 3182 return irq;
3178 } 3183 }
3179 3184
3180 err = request_threaded_irq(irq, prcmu_irq_handler, 3185 err = request_threaded_irq(irq, prcmu_irq_handler,
3181 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 3186 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
3182 if (err < 0) { 3187 if (err < 0) {
3183 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); 3188 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
3184 err = -EBUSY; 3189 return err;
3185 goto no_irq_return;
3186 } 3190 }
3187 3191
3188 db8500_irq_init(np); 3192 db8500_irq_init(np);
@@ -3206,7 +3210,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
3206 if (err) { 3210 if (err) {
3207 mfd_remove_devices(&pdev->dev); 3211 mfd_remove_devices(&pdev->dev);
3208 pr_err("prcmu: Failed to add subdevices\n"); 3212 pr_err("prcmu: Failed to add subdevices\n");
3209 goto no_irq_return; 3213 return err;
3210 } 3214 }
3211 } 3215 }
3212 3216
@@ -3214,12 +3218,10 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
3214 if (err) { 3218 if (err) {
3215 mfd_remove_devices(&pdev->dev); 3219 mfd_remove_devices(&pdev->dev);
3216 pr_err("prcmu: Failed to add ab8500 subdevice\n"); 3220 pr_err("prcmu: Failed to add ab8500 subdevice\n");
3217 goto no_irq_return; 3221 return err;
3218 } 3222 }
3219 3223
3220 pr_info("DB8500 PRCMU initialized\n"); 3224 pr_info("DB8500 PRCMU initialized\n");
3221
3222no_irq_return:
3223 return err; 3225 return err;
3224} 3226}
3225static const struct of_device_id db8500_prcmu_match[] = { 3227static const struct of_device_id db8500_prcmu_match[] = {
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
new file mode 100644
index 000000000000..6d49685d4ee4
--- /dev/null
+++ b/drivers/mfd/dln2.c
@@ -0,0 +1,781 @@
1/*
2 * Driver for the Diolan DLN-2 USB adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * Derived from:
7 * i2c-diolan-u2c.c
8 * Copyright (c) 2010-2011 Ericsson AB
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/slab.h>
19#include <linux/usb.h>
20#include <linux/i2c.h>
21#include <linux/mutex.h>
22#include <linux/platform_device.h>
23#include <linux/mfd/core.h>
24#include <linux/mfd/dln2.h>
25#include <linux/rculist.h>
26
27struct dln2_header {
28 __le16 size;
29 __le16 id;
30 __le16 echo;
31 __le16 handle;
32};
33
34struct dln2_response {
35 struct dln2_header hdr;
36 __le16 result;
37};
38
39#define DLN2_GENERIC_MODULE_ID 0x00
40#define DLN2_GENERIC_CMD(cmd) DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
41#define CMD_GET_DEVICE_VER DLN2_GENERIC_CMD(0x30)
42#define CMD_GET_DEVICE_SN DLN2_GENERIC_CMD(0x31)
43
44#define DLN2_HW_ID 0x200
45#define DLN2_USB_TIMEOUT 200 /* in ms */
46#define DLN2_MAX_RX_SLOTS 16
47#define DLN2_MAX_URBS 16
48#define DLN2_RX_BUF_SIZE 512
49
50enum dln2_handle {
51 DLN2_HANDLE_EVENT = 0, /* don't change, hardware defined */
52 DLN2_HANDLE_CTRL,
53 DLN2_HANDLE_GPIO,
54 DLN2_HANDLE_I2C,
55 DLN2_HANDLE_SPI,
56 DLN2_HANDLES
57};
58
59/*
60 * Receive context used between the receive demultiplexer and the transfer
61 * routine. While sending a request the transfer routine will look for a free
62 * receive context and use it to wait for a response and to receive the URB and
63 * thus the response data.
64 */
65struct dln2_rx_context {
66 /* completion used to wait for a response */
67 struct completion done;
68
69 /* if non-NULL the URB contains the response */
70 struct urb *urb;
71
72 /* if true then this context is used to wait for a response */
73 bool in_use;
74};
75
76/*
77 * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
78 * handle header field to identify the module in dln2_dev.mod_rx_slots and then
79 * the echo header field to index the slots field and find the receive context
80 * for a particular request.
81 */
82struct dln2_mod_rx_slots {
83 /* RX slots bitmap */
84 DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS);
85
86 /* used to wait for a free RX slot */
87 wait_queue_head_t wq;
88
89 /* used to wait for an RX operation to complete */
90 struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS];
91
92 /* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
93 spinlock_t lock;
94};
95
96struct dln2_dev {
97 struct usb_device *usb_dev;
98 struct usb_interface *interface;
99 u8 ep_in;
100 u8 ep_out;
101
102 struct urb *rx_urb[DLN2_MAX_URBS];
103 void *rx_buf[DLN2_MAX_URBS];
104
105 struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES];
106
107 struct list_head event_cb_list;
108 spinlock_t event_cb_lock;
109
110 bool disconnect;
111 int active_transfers;
112 wait_queue_head_t disconnect_wq;
113 spinlock_t disconnect_lock;
114};
115
116struct dln2_event_cb_entry {
117 struct list_head list;
118 u16 id;
119 struct platform_device *pdev;
120 dln2_event_cb_t callback;
121};
122
123int dln2_register_event_cb(struct platform_device *pdev, u16 id,
124 dln2_event_cb_t event_cb)
125{
126 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
127 struct dln2_event_cb_entry *i, *entry;
128 unsigned long flags;
129 int ret = 0;
130
131 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
132 if (!entry)
133 return -ENOMEM;
134
135 entry->id = id;
136 entry->callback = event_cb;
137 entry->pdev = pdev;
138
139 spin_lock_irqsave(&dln2->event_cb_lock, flags);
140
141 list_for_each_entry(i, &dln2->event_cb_list, list) {
142 if (i->id == id) {
143 ret = -EBUSY;
144 break;
145 }
146 }
147
148 if (!ret)
149 list_add_rcu(&entry->list, &dln2->event_cb_list);
150
151 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
152
153 if (ret)
154 kfree(entry);
155
156 return ret;
157}
158EXPORT_SYMBOL(dln2_register_event_cb);
159
160void dln2_unregister_event_cb(struct platform_device *pdev, u16 id)
161{
162 struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
163 struct dln2_event_cb_entry *i;
164 unsigned long flags;
165 bool found = false;
166
167 spin_lock_irqsave(&dln2->event_cb_lock, flags);
168
169 list_for_each_entry(i, &dln2->event_cb_list, list) {
170 if (i->id == id) {
171 list_del_rcu(&i->list);
172 found = true;
173 break;
174 }
175 }
176
177 spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
178
179 if (found) {
180 synchronize_rcu();
181 kfree(i);
182 }
183}
184EXPORT_SYMBOL(dln2_unregister_event_cb);
185
186/*
187 * Returns true if a valid transfer slot is found. In this case the URB must not
188 * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
189 * is woke up. It will be resubmitted there.
190 */
191static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
192 u16 handle, u16 rx_slot)
193{
194 struct device *dev = &dln2->interface->dev;
195 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
196 struct dln2_rx_context *rxc;
197 bool valid_slot = false;
198
199 if (rx_slot >= DLN2_MAX_RX_SLOTS)
200 goto out;
201
202 rxc = &rxs->slots[rx_slot];
203
204 /*
205 * No need to disable interrupts as this lock is not taken in interrupt
206 * context elsewhere in this driver. This function (or its callers) are
207 * also not exported to other modules.
208 */
209 spin_lock(&rxs->lock);
210 if (rxc->in_use && !rxc->urb) {
211 rxc->urb = urb;
212 complete(&rxc->done);
213 valid_slot = true;
214 }
215 spin_unlock(&rxs->lock);
216
217out:
218 if (!valid_slot)
219 dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot);
220
221 return valid_slot;
222}
223
224static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo,
225 void *data, int len)
226{
227 struct dln2_event_cb_entry *i;
228
229 rcu_read_lock();
230
231 list_for_each_entry_rcu(i, &dln2->event_cb_list, list) {
232 if (i->id == id) {
233 i->callback(i->pdev, echo, data, len);
234 break;
235 }
236 }
237
238 rcu_read_unlock();
239}
240
241static void dln2_rx(struct urb *urb)
242{
243 struct dln2_dev *dln2 = urb->context;
244 struct dln2_header *hdr = urb->transfer_buffer;
245 struct device *dev = &dln2->interface->dev;
246 u16 id, echo, handle, size;
247 u8 *data;
248 int len;
249 int err;
250
251 switch (urb->status) {
252 case 0:
253 /* success */
254 break;
255 case -ECONNRESET:
256 case -ENOENT:
257 case -ESHUTDOWN:
258 case -EPIPE:
259 /* this urb is terminated, clean up */
260 dev_dbg(dev, "urb shutting down with status %d\n", urb->status);
261 return;
262 default:
263 dev_dbg(dev, "nonzero urb status received %d\n", urb->status);
264 goto out;
265 }
266
267 if (urb->actual_length < sizeof(struct dln2_header)) {
268 dev_err(dev, "short response: %d\n", urb->actual_length);
269 goto out;
270 }
271
272 handle = le16_to_cpu(hdr->handle);
273 id = le16_to_cpu(hdr->id);
274 echo = le16_to_cpu(hdr->echo);
275 size = le16_to_cpu(hdr->size);
276
277 if (size != urb->actual_length) {
278 dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
279 handle, id, echo, size, urb->actual_length);
280 goto out;
281 }
282
283 if (handle >= DLN2_HANDLES) {
284 dev_warn(dev, "invalid handle %d\n", handle);
285 goto out;
286 }
287
288 data = urb->transfer_buffer + sizeof(struct dln2_header);
289 len = urb->actual_length - sizeof(struct dln2_header);
290
291 if (handle == DLN2_HANDLE_EVENT) {
292 dln2_run_event_callbacks(dln2, id, echo, data, len);
293 } else {
294 /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
295 if (dln2_transfer_complete(dln2, urb, handle, echo))
296 return;
297 }
298
299out:
300 err = usb_submit_urb(urb, GFP_ATOMIC);
301 if (err < 0)
302 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
303}
304
305static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf,
306 int *obuf_len, gfp_t gfp)
307{
308 int len;
309 void *buf;
310 struct dln2_header *hdr;
311
312 len = *obuf_len + sizeof(*hdr);
313 buf = kmalloc(len, gfp);
314 if (!buf)
315 return NULL;
316
317 hdr = (struct dln2_header *)buf;
318 hdr->id = cpu_to_le16(cmd);
319 hdr->size = cpu_to_le16(len);
320 hdr->echo = cpu_to_le16(echo);
321 hdr->handle = cpu_to_le16(handle);
322
323 memcpy(buf + sizeof(*hdr), obuf, *obuf_len);
324
325 *obuf_len = len;
326
327 return buf;
328}
329
330static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo,
331 const void *obuf, int obuf_len)
332{
333 int ret = 0;
334 int len = obuf_len;
335 void *buf;
336 int actual;
337
338 buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL);
339 if (!buf)
340 return -ENOMEM;
341
342 ret = usb_bulk_msg(dln2->usb_dev,
343 usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out),
344 buf, len, &actual, DLN2_USB_TIMEOUT);
345
346 kfree(buf);
347
348 return ret;
349}
350
351static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot)
352{
353 struct dln2_mod_rx_slots *rxs;
354 unsigned long flags;
355
356 if (dln2->disconnect) {
357 *slot = -ENODEV;
358 return true;
359 }
360
361 rxs = &dln2->mod_rx_slots[handle];
362
363 spin_lock_irqsave(&rxs->lock, flags);
364
365 *slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS);
366
367 if (*slot < DLN2_MAX_RX_SLOTS) {
368 struct dln2_rx_context *rxc = &rxs->slots[*slot];
369
370 set_bit(*slot, rxs->bmap);
371 rxc->in_use = true;
372 }
373
374 spin_unlock_irqrestore(&rxs->lock, flags);
375
376 return *slot < DLN2_MAX_RX_SLOTS;
377}
378
379static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle)
380{
381 int ret;
382 int slot;
383
384 /*
385 * No need to timeout here, the wait is bounded by the timeout in
386 * _dln2_transfer.
387 */
388 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq,
389 find_free_slot(dln2, handle, &slot));
390 if (ret < 0)
391 return ret;
392
393 return slot;
394}
395
396static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot)
397{
398 struct dln2_mod_rx_slots *rxs;
399 struct urb *urb = NULL;
400 unsigned long flags;
401 struct dln2_rx_context *rxc;
402
403 rxs = &dln2->mod_rx_slots[handle];
404
405 spin_lock_irqsave(&rxs->lock, flags);
406
407 clear_bit(slot, rxs->bmap);
408
409 rxc = &rxs->slots[slot];
410 rxc->in_use = false;
411 urb = rxc->urb;
412 rxc->urb = NULL;
413 reinit_completion(&rxc->done);
414
415 spin_unlock_irqrestore(&rxs->lock, flags);
416
417 if (urb) {
418 int err;
419 struct device *dev = &dln2->interface->dev;
420
421 err = usb_submit_urb(urb, GFP_KERNEL);
422 if (err < 0)
423 dev_err(dev, "failed to resubmit RX URB: %d\n", err);
424 }
425
426 wake_up_interruptible(&rxs->wq);
427}
428
429static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
430 const void *obuf, unsigned obuf_len,
431 void *ibuf, unsigned *ibuf_len)
432{
433 int ret = 0;
434 int rx_slot;
435 struct dln2_response *rsp;
436 struct dln2_rx_context *rxc;
437 struct device *dev = &dln2->interface->dev;
438 const unsigned long timeout = DLN2_USB_TIMEOUT * HZ / 1000;
439 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
440 int size;
441
442 spin_lock(&dln2->disconnect_lock);
443 if (!dln2->disconnect)
444 dln2->active_transfers++;
445 else
446 ret = -ENODEV;
447 spin_unlock(&dln2->disconnect_lock);
448
449 if (ret)
450 return ret;
451
452 rx_slot = alloc_rx_slot(dln2, handle);
453 if (rx_slot < 0) {
454 ret = rx_slot;
455 goto out_decr;
456 }
457
458 ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len);
459 if (ret < 0) {
460 dev_err(dev, "USB write failed: %d\n", ret);
461 goto out_free_rx_slot;
462 }
463
464 rxc = &rxs->slots[rx_slot];
465
466 ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout);
467 if (ret <= 0) {
468 if (!ret)
469 ret = -ETIMEDOUT;
470 goto out_free_rx_slot;
471 } else {
472 ret = 0;
473 }
474
475 if (dln2->disconnect) {
476 ret = -ENODEV;
477 goto out_free_rx_slot;
478 }
479
480 /* if we got here we know that the response header has been checked */
481 rsp = rxc->urb->transfer_buffer;
482 size = le16_to_cpu(rsp->hdr.size);
483
484 if (size < sizeof(*rsp)) {
485 ret = -EPROTO;
486 goto out_free_rx_slot;
487 }
488
489 if (le16_to_cpu(rsp->result) > 0x80) {
490 dev_dbg(dev, "%d received response with error %d\n",
491 handle, le16_to_cpu(rsp->result));
492 ret = -EREMOTEIO;
493 goto out_free_rx_slot;
494 }
495
496 if (!ibuf)
497 goto out_free_rx_slot;
498
499 if (*ibuf_len > size - sizeof(*rsp))
500 *ibuf_len = size - sizeof(*rsp);
501
502 memcpy(ibuf, rsp + 1, *ibuf_len);
503
504out_free_rx_slot:
505 free_rx_slot(dln2, handle, rx_slot);
506out_decr:
507 spin_lock(&dln2->disconnect_lock);
508 dln2->active_transfers--;
509 spin_unlock(&dln2->disconnect_lock);
510 if (dln2->disconnect)
511 wake_up(&dln2->disconnect_wq);
512
513 return ret;
514}
515
516int dln2_transfer(struct platform_device *pdev, u16 cmd,
517 const void *obuf, unsigned obuf_len,
518 void *ibuf, unsigned *ibuf_len)
519{
520 struct dln2_platform_data *dln2_pdata;
521 struct dln2_dev *dln2;
522 u16 handle;
523
524 dln2 = dev_get_drvdata(pdev->dev.parent);
525 dln2_pdata = dev_get_platdata(&pdev->dev);
526 handle = dln2_pdata->handle;
527
528 return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf,
529 ibuf_len);
530}
531EXPORT_SYMBOL(dln2_transfer);
532
533static int dln2_check_hw(struct dln2_dev *dln2)
534{
535 int ret;
536 __le32 hw_type;
537 int len = sizeof(hw_type);
538
539 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER,
540 NULL, 0, &hw_type, &len);
541 if (ret < 0)
542 return ret;
543 if (len < sizeof(hw_type))
544 return -EREMOTEIO;
545
546 if (le32_to_cpu(hw_type) != DLN2_HW_ID) {
547 dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n",
548 le32_to_cpu(hw_type));
549 return -ENODEV;
550 }
551
552 return 0;
553}
554
555static int dln2_print_serialno(struct dln2_dev *dln2)
556{
557 int ret;
558 __le32 serial_no;
559 int len = sizeof(serial_no);
560 struct device *dev = &dln2->interface->dev;
561
562 ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0,
563 &serial_no, &len);
564 if (ret < 0)
565 return ret;
566 if (len < sizeof(serial_no))
567 return -EREMOTEIO;
568
569 dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no));
570
571 return 0;
572}
573
574static int dln2_hw_init(struct dln2_dev *dln2)
575{
576 int ret;
577
578 ret = dln2_check_hw(dln2);
579 if (ret < 0)
580 return ret;
581
582 return dln2_print_serialno(dln2);
583}
584
585static void dln2_free_rx_urbs(struct dln2_dev *dln2)
586{
587 int i;
588
589 for (i = 0; i < DLN2_MAX_URBS; i++) {
590 usb_kill_urb(dln2->rx_urb[i]);
591 usb_free_urb(dln2->rx_urb[i]);
592 kfree(dln2->rx_buf[i]);
593 }
594}
595
596static void dln2_free(struct dln2_dev *dln2)
597{
598 dln2_free_rx_urbs(dln2);
599 usb_put_dev(dln2->usb_dev);
600 kfree(dln2);
601}
602
603static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
604 struct usb_host_interface *hostif)
605{
606 int i;
607 int ret;
608 const int rx_max_size = DLN2_RX_BUF_SIZE;
609 struct device *dev = &dln2->interface->dev;
610
611 for (i = 0; i < DLN2_MAX_URBS; i++) {
612 dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
613 if (!dln2->rx_buf[i])
614 return -ENOMEM;
615
616 dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
617 if (!dln2->rx_urb[i])
618 return -ENOMEM;
619
620 usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
621 usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
622 dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
623
624 ret = usb_submit_urb(dln2->rx_urb[i], GFP_KERNEL);
625 if (ret < 0) {
626 dev_err(dev, "failed to submit RX URB: %d\n", ret);
627 return ret;
628 }
629 }
630
631 return 0;
632}
633
634static struct dln2_platform_data dln2_pdata_gpio = {
635 .handle = DLN2_HANDLE_GPIO,
636};
637
638/* Only one I2C port seems to be supported on current hardware */
639static struct dln2_platform_data dln2_pdata_i2c = {
640 .handle = DLN2_HANDLE_I2C,
641 .port = 0,
642};
643
644/* Only one SPI port supported */
645static struct dln2_platform_data dln2_pdata_spi = {
646 .handle = DLN2_HANDLE_SPI,
647 .port = 0,
648};
649
650static const struct mfd_cell dln2_devs[] = {
651 {
652 .name = "dln2-gpio",
653 .platform_data = &dln2_pdata_gpio,
654 .pdata_size = sizeof(struct dln2_platform_data),
655 },
656 {
657 .name = "dln2-i2c",
658 .platform_data = &dln2_pdata_i2c,
659 .pdata_size = sizeof(struct dln2_platform_data),
660 },
661 {
662 .name = "dln2-spi",
663 .platform_data = &dln2_pdata_spi,
664 .pdata_size = sizeof(struct dln2_platform_data),
665 },
666};
667
668static void dln2_disconnect(struct usb_interface *interface)
669{
670 struct dln2_dev *dln2 = usb_get_intfdata(interface);
671 int i, j;
672
673 /* don't allow starting new transfers */
674 spin_lock(&dln2->disconnect_lock);
675 dln2->disconnect = true;
676 spin_unlock(&dln2->disconnect_lock);
677
678 /* cancel in progress transfers */
679 for (i = 0; i < DLN2_HANDLES; i++) {
680 struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i];
681 unsigned long flags;
682
683 spin_lock_irqsave(&rxs->lock, flags);
684
685 /* cancel all response waiters */
686 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) {
687 struct dln2_rx_context *rxc = &rxs->slots[j];
688
689 if (rxc->in_use)
690 complete(&rxc->done);
691 }
692
693 spin_unlock_irqrestore(&rxs->lock, flags);
694 }
695
696 /* wait for transfers to end */
697 wait_event(dln2->disconnect_wq, !dln2->active_transfers);
698
699 mfd_remove_devices(&interface->dev);
700
701 dln2_free(dln2);
702}
703
704static int dln2_probe(struct usb_interface *interface,
705 const struct usb_device_id *usb_id)
706{
707 struct usb_host_interface *hostif = interface->cur_altsetting;
708 struct device *dev = &interface->dev;
709 struct dln2_dev *dln2;
710 int ret;
711 int i, j;
712
713 if (hostif->desc.bInterfaceNumber != 0 ||
714 hostif->desc.bNumEndpoints < 2)
715 return -ENODEV;
716
717 dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
718 if (!dln2)
719 return -ENOMEM;
720
721 dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
722 dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
723 dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
724 dln2->interface = interface;
725 usb_set_intfdata(interface, dln2);
726 init_waitqueue_head(&dln2->disconnect_wq);
727
728 for (i = 0; i < DLN2_HANDLES; i++) {
729 init_waitqueue_head(&dln2->mod_rx_slots[i].wq);
730 spin_lock_init(&dln2->mod_rx_slots[i].lock);
731 for (j = 0; j < DLN2_MAX_RX_SLOTS; j++)
732 init_completion(&dln2->mod_rx_slots[i].slots[j].done);
733 }
734
735 spin_lock_init(&dln2->event_cb_lock);
736 spin_lock_init(&dln2->disconnect_lock);
737 INIT_LIST_HEAD(&dln2->event_cb_list);
738
739 ret = dln2_setup_rx_urbs(dln2, hostif);
740 if (ret)
741 goto out_cleanup;
742
743 ret = dln2_hw_init(dln2);
744 if (ret < 0) {
745 dev_err(dev, "failed to initialize hardware\n");
746 goto out_cleanup;
747 }
748
749 ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
750 if (ret != 0) {
751 dev_err(dev, "failed to add mfd devices to core\n");
752 goto out_cleanup;
753 }
754
755 return 0;
756
757out_cleanup:
758 dln2_free(dln2);
759
760 return ret;
761}
762
763static const struct usb_device_id dln2_table[] = {
764 { USB_DEVICE(0xa257, 0x2013) },
765 { }
766};
767
768MODULE_DEVICE_TABLE(usb, dln2_table);
769
770static struct usb_driver dln2_driver = {
771 .name = "dln2",
772 .probe = dln2_probe,
773 .disconnect = dln2_disconnect,
774 .id_table = dln2_table,
775};
776
777module_usb_driver(dln2_driver);
778
779MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
780MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
781MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 7107cab832e6..c85e2ecb868a 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -106,6 +106,9 @@ static struct mfd_cell crystal_cove_dev[] = {
106 .num_resources = ARRAY_SIZE(gpio_resources), 106 .num_resources = ARRAY_SIZE(gpio_resources),
107 .resources = gpio_resources, 107 .resources = gpio_resources,
108 }, 108 },
109 {
110 .name = "crystal_cove_pmic",
111 },
109}; 112};
110 113
111static struct regmap_config crystal_cove_regmap_config = { 114static struct regmap_config crystal_cove_regmap_config = {
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index c980da479a35..5c38df35a84d 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -193,11 +193,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
193 return -ENODEV; 193 return -ENODEV;
194 } 194 }
195 195
196 ret = mfd_add_devices(&dev->dev, 0, lpc_sch_cells, cells, NULL, 0, NULL); 196 return mfd_add_devices(&dev->dev, 0, lpc_sch_cells, cells, NULL, 0, NULL);
197 if (ret)
198 mfd_remove_devices(&dev->dev);
199
200 return ret;
201} 197}
202 198
203static void lpc_sch_remove(struct pci_dev *dev) 199static void lpc_sch_remove(struct pci_dev *dev)
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index de96b7fb1f6d..3bf8def82f1e 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * max14577.c - mfd core driver for the Maxim 14577/77836 2 * max14577.c - mfd core driver for the Maxim 14577/77836
3 * 3 *
4 * Copyright (C) 2014 Samsung Electrnoics 4 * Copyright (C) 2014 Samsung Electronics
5 * Chanwoo Choi <cw00.choi@samsung.com> 5 * Chanwoo Choi <cw00.choi@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski@samsung.com> 6 * Krzysztof Kozlowski <k.kozlowski@samsung.com>
7 * 7 *
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 711773e8e64b..a159593e27a0 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -43,9 +43,15 @@
43 43
44static const struct mfd_cell max77693_devs[] = { 44static const struct mfd_cell max77693_devs[] = {
45 { .name = "max77693-pmic", }, 45 { .name = "max77693-pmic", },
46 { .name = "max77693-charger", }, 46 {
47 .name = "max77693-charger",
48 .of_compatible = "maxim,max77693-charger",
49 },
47 { .name = "max77693-muic", }, 50 { .name = "max77693-muic", },
48 { .name = "max77693-haptic", }, 51 {
52 .name = "max77693-haptic",
53 .of_compatible = "maxim,max77693-haptic",
54 },
49 { 55 {
50 .name = "max77693-flash", 56 .name = "max77693-flash",
51 .of_compatible = "maxim,max77693-flash", 57 .of_compatible = "maxim,max77693-flash",
@@ -147,6 +153,12 @@ static const struct regmap_irq_chip max77693_muic_irq_chip = {
147 .num_irqs = ARRAY_SIZE(max77693_muic_irqs), 153 .num_irqs = ARRAY_SIZE(max77693_muic_irqs),
148}; 154};
149 155
156static const struct regmap_config max77693_regmap_haptic_config = {
157 .reg_bits = 8,
158 .val_bits = 8,
159 .max_register = MAX77693_HAPTIC_REG_END,
160};
161
150static int max77693_i2c_probe(struct i2c_client *i2c, 162static int max77693_i2c_probe(struct i2c_client *i2c,
151 const struct i2c_device_id *id) 163 const struct i2c_device_id *id)
152{ 164{
@@ -196,6 +208,15 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
196 } 208 }
197 i2c_set_clientdata(max77693->haptic, max77693); 209 i2c_set_clientdata(max77693->haptic, max77693);
198 210
211 max77693->regmap_haptic = devm_regmap_init_i2c(max77693->haptic,
212 &max77693_regmap_haptic_config);
213 if (IS_ERR(max77693->regmap_haptic)) {
214 ret = PTR_ERR(max77693->regmap_haptic);
215 dev_err(max77693->dev,
216 "failed to initialize haptic register map: %d\n", ret);
217 goto err_regmap;
218 }
219
199 /* 220 /*
200 * Initialize register map for MUIC device because use regmap-muic 221 * Initialize register map for MUIC device because use regmap-muic
201 * instance of MUIC device when irq of max77693 is initialized 222 * instance of MUIC device when irq of max77693 is initialized
@@ -207,7 +228,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
207 ret = PTR_ERR(max77693->regmap_muic); 228 ret = PTR_ERR(max77693->regmap_muic);
208 dev_err(max77693->dev, 229 dev_err(max77693->dev,
209 "failed to allocate register map: %d\n", ret); 230 "failed to allocate register map: %d\n", ret);
210 goto err_regmap_muic; 231 goto err_regmap;
211 } 232 }
212 233
213 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, 234 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
@@ -217,7 +238,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
217 &max77693->irq_data_led); 238 &max77693->irq_data_led);
218 if (ret) { 239 if (ret) {
219 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret); 240 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
220 goto err_regmap_muic; 241 goto err_regmap;
221 } 242 }
222 243
223 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, 244 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
@@ -280,7 +301,7 @@ err_irq_charger:
280 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys); 301 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
281err_irq_topsys: 302err_irq_topsys:
282 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led); 303 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
283err_regmap_muic: 304err_regmap:
284 i2c_unregister_device(max77693->haptic); 305 i2c_unregister_device(max77693->haptic);
285err_i2c_haptic: 306err_i2c_haptic:
286 i2c_unregister_device(max77693->muic); 307 i2c_unregister_device(max77693->muic);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f3338fe9d069..2a87f69be53d 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -125,9 +125,15 @@ static int mfd_add_device(struct device *parent, int id,
125 struct platform_device *pdev; 125 struct platform_device *pdev;
126 struct device_node *np = NULL; 126 struct device_node *np = NULL;
127 int ret = -ENOMEM; 127 int ret = -ENOMEM;
128 int platform_id;
128 int r; 129 int r;
129 130
130 pdev = platform_device_alloc(cell->name, id + cell->id); 131 if (id < 0)
132 platform_id = id;
133 else
134 platform_id = id + cell->id;
135
136 pdev = platform_device_alloc(cell->name, platform_id);
131 if (!pdev) 137 if (!pdev)
132 goto fail_alloc; 138 goto fail_alloc;
133 139
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index 9c8eec80ceed..32407404d838 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -130,6 +130,12 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
130 130
131static int rts5227_optimize_phy(struct rtsx_pcr *pcr) 131static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
132{ 132{
133 int err;
134
135 err = rtsx_gops_pm_reset(pcr);
136 if (err < 0)
137 return err;
138
133 /* Optimize RX sensitivity */ 139 /* Optimize RX sensitivity */
134 return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42); 140 return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
135} 141}
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 573de7bfcced..cf425cc959d5 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -130,6 +130,10 @@ static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
130{ 130{
131 int err; 131 int err;
132 132
133 err = rtsx_gops_pm_reset(pcr);
134 if (err < 0)
135 return err;
136
133 err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, 137 err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV,
134 PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED | 138 PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED |
135 PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN | 139 PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN |
diff --git a/drivers/mfd/rtsx_gops.c b/drivers/mfd/rtsx_gops.c
new file mode 100644
index 000000000000..b1a98c678593
--- /dev/null
+++ b/drivers/mfd/rtsx_gops.c
@@ -0,0 +1,37 @@
1/* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * Micky Ching <micky_ching@realsil.com.cn>
20 */
21
22#include <linux/mfd/rtsx_pci.h>
23#include "rtsx_pcr.h"
24
25int rtsx_gops_pm_reset(struct rtsx_pcr *pcr)
26{
27 int err;
28
29 /* init aspm */
30 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0x00);
31 err = rtsx_pci_update_cfg_byte(pcr, LCTLR, ~LCTLR_ASPM_CTL_MASK, 0x00);
32 if (err < 0)
33 return err;
34
35 /* reset PM_CTRL3 before send buffer cmd */
36 return rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
37}
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 07e4c2ebf05a..fe2bbb67defc 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -72,4 +72,7 @@ do { \
72 pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \ 72 pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \
73} while (0) 73} while (0)
74 74
75/* generic operations */
76int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
77
75#endif 78#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 9cf98d142d9a..dbdd0faeb6ce 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -647,8 +647,8 @@ static int rtsx_usb_probe(struct usb_interface *intf,
647 /* initialize USB SG transfer timer */ 647 /* initialize USB SG transfer timer */
648 setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr); 648 setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
649 649
650 ret = mfd_add_devices(&intf->dev, usb_dev->devnum, rtsx_usb_cells, 650 ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
651 ARRAY_SIZE(rtsx_usb_cells), NULL, 0, NULL); 651 ARRAY_SIZE(rtsx_usb_cells));
652 if (ret) 652 if (ret)
653 goto out_init_fail; 653 goto out_init_fail;
654 654
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index dba7e2b6f8e9..0a7bc43db4e4 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -27,11 +27,11 @@
27#include <linux/mfd/samsung/irq.h> 27#include <linux/mfd/samsung/irq.h>
28#include <linux/mfd/samsung/s2mpa01.h> 28#include <linux/mfd/samsung/s2mpa01.h>
29#include <linux/mfd/samsung/s2mps11.h> 29#include <linux/mfd/samsung/s2mps11.h>
30#include <linux/mfd/samsung/s2mps13.h>
30#include <linux/mfd/samsung/s2mps14.h> 31#include <linux/mfd/samsung/s2mps14.h>
31#include <linux/mfd/samsung/s2mpu02.h> 32#include <linux/mfd/samsung/s2mpu02.h>
32#include <linux/mfd/samsung/s5m8763.h> 33#include <linux/mfd/samsung/s5m8763.h>
33#include <linux/mfd/samsung/s5m8767.h> 34#include <linux/mfd/samsung/s5m8767.h>
34#include <linux/regulator/machine.h>
35#include <linux/regmap.h> 35#include <linux/regmap.h>
36 36
37static const struct mfd_cell s5m8751_devs[] = { 37static const struct mfd_cell s5m8751_devs[] = {
@@ -74,6 +74,15 @@ static const struct mfd_cell s2mps11_devs[] = {
74 } 74 }
75}; 75};
76 76
77static const struct mfd_cell s2mps13_devs[] = {
78 { .name = "s2mps13-pmic", },
79 { .name = "s2mps13-rtc", },
80 {
81 .name = "s2mps13-clk",
82 .of_compatible = "samsung,s2mps13-clk",
83 },
84};
85
77static const struct mfd_cell s2mps14_devs[] = { 86static const struct mfd_cell s2mps14_devs[] = {
78 { 87 {
79 .name = "s2mps14-pmic", 88 .name = "s2mps14-pmic",
@@ -108,6 +117,9 @@ static const struct of_device_id sec_dt_match[] = {
108 .compatible = "samsung,s2mps11-pmic", 117 .compatible = "samsung,s2mps11-pmic",
109 .data = (void *)S2MPS11X, 118 .data = (void *)S2MPS11X,
110 }, { 119 }, {
120 .compatible = "samsung,s2mps13-pmic",
121 .data = (void *)S2MPS13X,
122 }, {
111 .compatible = "samsung,s2mps14-pmic", 123 .compatible = "samsung,s2mps14-pmic",
112 .data = (void *)S2MPS14X, 124 .data = (void *)S2MPS14X,
113 }, { 125 }, {
@@ -194,6 +206,15 @@ static const struct regmap_config s2mps11_regmap_config = {
194 .cache_type = REGCACHE_FLAT, 206 .cache_type = REGCACHE_FLAT,
195}; 207};
196 208
209static const struct regmap_config s2mps13_regmap_config = {
210 .reg_bits = 8,
211 .val_bits = 8,
212
213 .max_register = S2MPS13_REG_LDODSCH5,
214 .volatile_reg = s2mps11_volatile,
215 .cache_type = REGCACHE_FLAT,
216};
217
197static const struct regmap_config s2mps14_regmap_config = { 218static const struct regmap_config s2mps14_regmap_config = {
198 .reg_bits = 8, 219 .reg_bits = 8,
199 .val_bits = 8, 220 .val_bits = 8,
@@ -325,6 +346,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
325 case S2MPS11X: 346 case S2MPS11X:
326 regmap = &s2mps11_regmap_config; 347 regmap = &s2mps11_regmap_config;
327 break; 348 break;
349 case S2MPS13X:
350 regmap = &s2mps13_regmap_config;
351 break;
328 case S2MPS14X: 352 case S2MPS14X:
329 regmap = &s2mps14_regmap_config; 353 regmap = &s2mps14_regmap_config;
330 break; 354 break;
@@ -378,6 +402,10 @@ static int sec_pmic_probe(struct i2c_client *i2c,
378 sec_devs = s2mps11_devs; 402 sec_devs = s2mps11_devs;
379 num_sec_devs = ARRAY_SIZE(s2mps11_devs); 403 num_sec_devs = ARRAY_SIZE(s2mps11_devs);
380 break; 404 break;
405 case S2MPS13X:
406 sec_devs = s2mps13_devs;
407 num_sec_devs = ARRAY_SIZE(s2mps13_devs);
408 break;
381 case S2MPS14X: 409 case S2MPS14X:
382 sec_devs = s2mps14_devs; 410 sec_devs = s2mps14_devs;
383 num_sec_devs = ARRAY_SIZE(s2mps14_devs); 411 num_sec_devs = ARRAY_SIZE(s2mps14_devs);
@@ -432,15 +460,6 @@ static int sec_pmic_suspend(struct device *dev)
432 */ 460 */
433 disable_irq(sec_pmic->irq); 461 disable_irq(sec_pmic->irq);
434 462
435 switch (sec_pmic->device_type) {
436 case S2MPS14X:
437 case S2MPU02:
438 regulator_suspend_prepare(PM_SUSPEND_MEM);
439 break;
440 default:
441 break;
442 }
443
444 return 0; 463 return 0;
445} 464}
446 465
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index f9a57869e3ec..ba86a918c2da 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -389,14 +389,22 @@ static const struct regmap_irq_chip s2mps11_irq_chip = {
389 .ack_base = S2MPS11_REG_INT1, 389 .ack_base = S2MPS11_REG_INT1,
390}; 390};
391 391
392#define S2MPS1X_IRQ_CHIP_COMMON_DATA \
393 .irqs = s2mps14_irqs, \
394 .num_irqs = ARRAY_SIZE(s2mps14_irqs), \
395 .num_regs = 3, \
396 .status_base = S2MPS14_REG_INT1, \
397 .mask_base = S2MPS14_REG_INT1M, \
398 .ack_base = S2MPS14_REG_INT1 \
399
400static const struct regmap_irq_chip s2mps13_irq_chip = {
401 .name = "s2mps13",
402 S2MPS1X_IRQ_CHIP_COMMON_DATA,
403};
404
392static const struct regmap_irq_chip s2mps14_irq_chip = { 405static const struct regmap_irq_chip s2mps14_irq_chip = {
393 .name = "s2mps14", 406 .name = "s2mps14",
394 .irqs = s2mps14_irqs, 407 S2MPS1X_IRQ_CHIP_COMMON_DATA,
395 .num_irqs = ARRAY_SIZE(s2mps14_irqs),
396 .num_regs = 3,
397 .status_base = S2MPS14_REG_INT1,
398 .mask_base = S2MPS14_REG_INT1M,
399 .ack_base = S2MPS14_REG_INT1,
400}; 408};
401 409
402static const struct regmap_irq_chip s2mpu02_irq_chip = { 410static const struct regmap_irq_chip s2mpu02_irq_chip = {
@@ -452,6 +460,9 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
452 case S2MPS11X: 460 case S2MPS11X:
453 sec_irq_chip = &s2mps11_irq_chip; 461 sec_irq_chip = &s2mps11_irq_chip;
454 break; 462 break;
463 case S2MPS13X:
464 sec_irq_chip = &s2mps13_irq_chip;
465 break;
455 case S2MPS14X: 466 case S2MPS14X:
456 sec_irq_chip = &s2mps14_irq_chip; 467 sec_irq_chip = &s2mps14_irq_chip;
457 break; 468 break;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index ca15878ce5c0..72373b113885 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -15,6 +15,7 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/list.h>
18#include <linux/of.h> 19#include <linux/of.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
20#include <linux/of_platform.h> 21#include <linux/of_platform.h>
@@ -22,31 +23,94 @@
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24#include <linux/mfd/syscon.h> 25#include <linux/mfd/syscon.h>
26#include <linux/slab.h>
25 27
26static struct platform_driver syscon_driver; 28static struct platform_driver syscon_driver;
27 29
30static DEFINE_SPINLOCK(syscon_list_slock);
31static LIST_HEAD(syscon_list);
32
28struct syscon { 33struct syscon {
34 struct device_node *np;
29 struct regmap *regmap; 35 struct regmap *regmap;
36 struct list_head list;
37};
38
39static struct regmap_config syscon_regmap_config = {
40 .reg_bits = 32,
41 .val_bits = 32,
42 .reg_stride = 4,
30}; 43};
31 44
32static int syscon_match_node(struct device *dev, void *data) 45static struct syscon *of_syscon_register(struct device_node *np)
33{ 46{
34 struct device_node *dn = data; 47 struct syscon *syscon;
48 struct regmap *regmap;
49 void __iomem *base;
50 int ret;
51 struct regmap_config syscon_config = syscon_regmap_config;
52
53 if (!of_device_is_compatible(np, "syscon"))
54 return ERR_PTR(-EINVAL);
55
56 syscon = kzalloc(sizeof(*syscon), GFP_KERNEL);
57 if (!syscon)
58 return ERR_PTR(-ENOMEM);
59
60 base = of_iomap(np, 0);
61 if (!base) {
62 ret = -ENOMEM;
63 goto err_map;
64 }
65
66 /* Parse the device's DT node for an endianness specification */
67 if (of_property_read_bool(np, "big-endian"))
68 syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
69 else if (of_property_read_bool(np, "little-endian"))
70 syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
71
72 regmap = regmap_init_mmio(NULL, base, &syscon_config);
73 if (IS_ERR(regmap)) {
74 pr_err("regmap init failed\n");
75 ret = PTR_ERR(regmap);
76 goto err_regmap;
77 }
78
79 syscon->regmap = regmap;
80 syscon->np = np;
81
82 spin_lock(&syscon_list_slock);
83 list_add_tail(&syscon->list, &syscon_list);
84 spin_unlock(&syscon_list_slock);
35 85
36 return (dev->of_node == dn) ? 1 : 0; 86 return syscon;
87
88err_regmap:
89 iounmap(base);
90err_map:
91 kfree(syscon);
92 return ERR_PTR(ret);
37} 93}
38 94
39struct regmap *syscon_node_to_regmap(struct device_node *np) 95struct regmap *syscon_node_to_regmap(struct device_node *np)
40{ 96{
41 struct syscon *syscon; 97 struct syscon *entry, *syscon = NULL;
42 struct device *dev;
43 98
44 dev = driver_find_device(&syscon_driver.driver, NULL, np, 99 spin_lock(&syscon_list_slock);
45 syscon_match_node);
46 if (!dev)
47 return ERR_PTR(-EPROBE_DEFER);
48 100
49 syscon = dev_get_drvdata(dev); 101 list_for_each_entry(entry, &syscon_list, list)
102 if (entry->np == np) {
103 syscon = entry;
104 break;
105 }
106
107 spin_unlock(&syscon_list_slock);
108
109 if (!syscon)
110 syscon = of_syscon_register(np);
111
112 if (IS_ERR(syscon))
113 return ERR_CAST(syscon);
50 114
51 return syscon->regmap; 115 return syscon->regmap;
52} 116}
@@ -110,17 +174,6 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
110} 174}
111EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle); 175EXPORT_SYMBOL_GPL(syscon_regmap_lookup_by_phandle);
112 176
113static const struct of_device_id of_syscon_match[] = {
114 { .compatible = "syscon", },
115 { },
116};
117
118static struct regmap_config syscon_regmap_config = {
119 .reg_bits = 32,
120 .val_bits = 32,
121 .reg_stride = 4,
122};
123
124static int syscon_probe(struct platform_device *pdev) 177static int syscon_probe(struct platform_device *pdev)
125{ 178{
126 struct device *dev = &pdev->dev; 179 struct device *dev = &pdev->dev;
@@ -167,7 +220,6 @@ static struct platform_driver syscon_driver = {
167 .driver = { 220 .driver = {
168 .name = "syscon", 221 .name = "syscon",
169 .owner = THIS_MODULE, 222 .owner = THIS_MODULE,
170 .of_match_table = of_syscon_match,
171 }, 223 },
172 .probe = syscon_probe, 224 .probe = syscon_probe,
173 .id_table = syscon_ids, 225 .id_table = syscon_ids,
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 9e04a7485981..439d905bb219 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -87,7 +87,7 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
87 unsigned long flags; 87 unsigned long flags;
88 u8 dev_ctl; 88 u8 dev_ctl;
89 89
90 clk_enable(t7l66xb->clk32k); 90 clk_prepare_enable(t7l66xb->clk32k);
91 91
92 spin_lock_irqsave(&t7l66xb->lock, flags); 92 spin_lock_irqsave(&t7l66xb->lock, flags);
93 93
@@ -118,7 +118,7 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
118 118
119 spin_unlock_irqrestore(&t7l66xb->lock, flags); 119 spin_unlock_irqrestore(&t7l66xb->lock, flags);
120 120
121 clk_disable(t7l66xb->clk32k); 121 clk_disable_unprepare(t7l66xb->clk32k);
122 122
123 return 0; 123 return 0;
124} 124}
@@ -285,7 +285,7 @@ static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
285 285
286 if (pdata && pdata->suspend) 286 if (pdata && pdata->suspend)
287 pdata->suspend(dev); 287 pdata->suspend(dev);
288 clk_disable(t7l66xb->clk48m); 288 clk_disable_unprepare(t7l66xb->clk48m);
289 289
290 return 0; 290 return 0;
291} 291}
@@ -295,7 +295,7 @@ static int t7l66xb_resume(struct platform_device *dev)
295 struct t7l66xb *t7l66xb = platform_get_drvdata(dev); 295 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
296 struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev); 296 struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
297 297
298 clk_enable(t7l66xb->clk48m); 298 clk_prepare_enable(t7l66xb->clk48m);
299 if (pdata && pdata->resume) 299 if (pdata && pdata->resume)
300 pdata->resume(dev); 300 pdata->resume(dev);
301 301
@@ -369,7 +369,7 @@ static int t7l66xb_probe(struct platform_device *dev)
369 goto err_ioremap; 369 goto err_ioremap;
370 } 370 }
371 371
372 clk_enable(t7l66xb->clk48m); 372 clk_prepare_enable(t7l66xb->clk48m);
373 373
374 if (pdata && pdata->enable) 374 if (pdata && pdata->enable)
375 pdata->enable(dev); 375 pdata->enable(dev);
@@ -414,9 +414,9 @@ static int t7l66xb_remove(struct platform_device *dev)
414 int ret; 414 int ret;
415 415
416 ret = pdata->disable(dev); 416 ret = pdata->disable(dev);
417 clk_disable(t7l66xb->clk48m); 417 clk_disable_unprepare(t7l66xb->clk48m);
418 clk_put(t7l66xb->clk48m); 418 clk_put(t7l66xb->clk48m);
419 clk_disable(t7l66xb->clk32k); 419 clk_disable_unprepare(t7l66xb->clk32k);
420 clk_put(t7l66xb->clk32k); 420 clk_put(t7l66xb->clk32k);
421 t7l66xb_detach_irq(dev); 421 t7l66xb_detach_irq(dev);
422 iounmap(t7l66xb->scr); 422 iounmap(t7l66xb->scr);
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 0072e668c208..aacb3720065c 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -241,10 +241,8 @@ static struct irq_domain_ops tc3589x_irq_ops = {
241 241
242static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) 242static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
243{ 243{
244 int base = tc3589x->irq_base;
245
246 tc3589x->domain = irq_domain_add_simple( 244 tc3589x->domain = irq_domain_add_simple(
247 np, TC3589x_NR_INTERNAL_IRQS, base, 245 np, TC3589x_NR_INTERNAL_IRQS, 0,
248 &tc3589x_irq_ops, tc3589x); 246 &tc3589x_irq_ops, tc3589x);
249 247
250 if (!tc3589x->domain) { 248 if (!tc3589x->domain) {
@@ -298,7 +296,7 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
298 if (blocks & TC3589x_BLOCK_GPIO) { 296 if (blocks & TC3589x_BLOCK_GPIO) {
299 ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio, 297 ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio,
300 ARRAY_SIZE(tc3589x_dev_gpio), NULL, 298 ARRAY_SIZE(tc3589x_dev_gpio), NULL,
301 tc3589x->irq_base, tc3589x->domain); 299 0, tc3589x->domain);
302 if (ret) { 300 if (ret) {
303 dev_err(tc3589x->dev, "failed to add gpio child\n"); 301 dev_err(tc3589x->dev, "failed to add gpio child\n");
304 return ret; 302 return ret;
@@ -309,7 +307,7 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
309 if (blocks & TC3589x_BLOCK_KEYPAD) { 307 if (blocks & TC3589x_BLOCK_KEYPAD) {
310 ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad, 308 ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad,
311 ARRAY_SIZE(tc3589x_dev_keypad), NULL, 309 ARRAY_SIZE(tc3589x_dev_keypad), NULL,
312 tc3589x->irq_base, tc3589x->domain); 310 0, tc3589x->domain);
313 if (ret) { 311 if (ret) {
314 dev_err(tc3589x->dev, "failed to keypad child\n"); 312 dev_err(tc3589x->dev, "failed to keypad child\n");
315 return ret; 313 return ret;
@@ -404,7 +402,6 @@ static int tc3589x_probe(struct i2c_client *i2c,
404 tc3589x->dev = &i2c->dev; 402 tc3589x->dev = &i2c->dev;
405 tc3589x->i2c = i2c; 403 tc3589x->i2c = i2c;
406 tc3589x->pdata = pdata; 404 tc3589x->pdata = pdata;
407 tc3589x->irq_base = pdata->irq_base;
408 405
409 switch (version) { 406 switch (version) {
410 case TC3589X_TC35893: 407 case TC3589X_TC35893:
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index e71f88000ae5..85fab3729102 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -52,7 +52,7 @@ static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
52 52
53 if (pdata && pdata->suspend) 53 if (pdata && pdata->suspend)
54 pdata->suspend(dev); 54 pdata->suspend(dev);
55 clk_disable(tc6387xb->clk32k); 55 clk_disable_unprepare(tc6387xb->clk32k);
56 56
57 return 0; 57 return 0;
58} 58}
@@ -62,7 +62,7 @@ static int tc6387xb_resume(struct platform_device *dev)
62 struct tc6387xb *tc6387xb = platform_get_drvdata(dev); 62 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
63 struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev); 63 struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
64 64
65 clk_enable(tc6387xb->clk32k); 65 clk_prepare_enable(tc6387xb->clk32k);
66 if (pdata && pdata->resume) 66 if (pdata && pdata->resume)
67 pdata->resume(dev); 67 pdata->resume(dev);
68 68
@@ -100,7 +100,7 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
100 struct platform_device *dev = to_platform_device(mmc->dev.parent); 100 struct platform_device *dev = to_platform_device(mmc->dev.parent);
101 struct tc6387xb *tc6387xb = platform_get_drvdata(dev); 101 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
102 102
103 clk_enable(tc6387xb->clk32k); 103 clk_prepare_enable(tc6387xb->clk32k);
104 104
105 tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0, 105 tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
106 tc6387xb_mmc_resources[0].start & 0xfffe); 106 tc6387xb_mmc_resources[0].start & 0xfffe);
@@ -113,7 +113,7 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc)
113 struct platform_device *dev = to_platform_device(mmc->dev.parent); 113 struct platform_device *dev = to_platform_device(mmc->dev.parent);
114 struct tc6387xb *tc6387xb = platform_get_drvdata(dev); 114 struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
115 115
116 clk_disable(tc6387xb->clk32k); 116 clk_disable_unprepare(tc6387xb->clk32k);
117 117
118 return 0; 118 return 0;
119} 119}
@@ -214,7 +214,7 @@ static int tc6387xb_remove(struct platform_device *dev)
214 mfd_remove_devices(&dev->dev); 214 mfd_remove_devices(&dev->dev);
215 iounmap(tc6387xb->scr); 215 iounmap(tc6387xb->scr);
216 release_resource(&tc6387xb->rscr); 216 release_resource(&tc6387xb->rscr);
217 clk_disable(tc6387xb->clk32k); 217 clk_disable_unprepare(tc6387xb->clk32k);
218 clk_put(tc6387xb->clk32k); 218 clk_put(tc6387xb->clk32k);
219 kfree(tc6387xb); 219 kfree(tc6387xb);
220 220
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 4fac16bcd732..d35f11fbeab7 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
263 return 0; 263 return 0;
264} 264}
265 265
266static int tc6393xb_ohci_suspend(struct platform_device *dev)
267{
268 struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
269
270 /* We can't properly store/restore OHCI state, so fail here */
271 if (tcpd->resume_restore)
272 return -EBUSY;
273
274 return tc6393xb_ohci_disable(dev);
275}
276
266static int tc6393xb_fb_enable(struct platform_device *dev) 277static int tc6393xb_fb_enable(struct platform_device *dev)
267{ 278{
268 struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent); 279 struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
403 .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources), 414 .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
404 .resources = tc6393xb_ohci_resources, 415 .resources = tc6393xb_ohci_resources,
405 .enable = tc6393xb_ohci_enable, 416 .enable = tc6393xb_ohci_enable,
406 .suspend = tc6393xb_ohci_disable, 417 .suspend = tc6393xb_ohci_suspend,
407 .resume = tc6393xb_ohci_enable, 418 .resume = tc6393xb_ohci_enable,
408 .disable = tc6393xb_ohci_disable, 419 .disable = tc6393xb_ohci_disable,
409 }, 420 },
@@ -654,7 +665,7 @@ static int tc6393xb_probe(struct platform_device *dev)
654 goto err_ioremap; 665 goto err_ioremap;
655 } 666 }
656 667
657 ret = clk_enable(tc6393xb->clk); 668 ret = clk_prepare_enable(tc6393xb->clk);
658 if (ret) 669 if (ret)
659 goto err_clk_enable; 670 goto err_clk_enable;
660 671
@@ -717,7 +728,7 @@ err_gpio_add:
717 gpiochip_remove(&tc6393xb->gpio); 728 gpiochip_remove(&tc6393xb->gpio);
718 tcpd->disable(dev); 729 tcpd->disable(dev);
719err_enable: 730err_enable:
720 clk_disable(tc6393xb->clk); 731 clk_disable_unprepare(tc6393xb->clk);
721err_clk_enable: 732err_clk_enable:
722 iounmap(tc6393xb->scr); 733 iounmap(tc6393xb->scr);
723err_ioremap: 734err_ioremap:
@@ -748,7 +759,7 @@ static int tc6393xb_remove(struct platform_device *dev)
748 gpiochip_remove(&tc6393xb->gpio); 759 gpiochip_remove(&tc6393xb->gpio);
749 760
750 ret = tcpd->disable(dev); 761 ret = tcpd->disable(dev);
751 clk_disable(tc6393xb->clk); 762 clk_disable_unprepare(tc6393xb->clk);
752 iounmap(tc6393xb->scr); 763 iounmap(tc6393xb->scr);
753 release_resource(&tc6393xb->rscr); 764 release_resource(&tc6393xb->rscr);
754 clk_put(tc6393xb->clk); 765 clk_put(tc6393xb->clk);
@@ -776,7 +787,7 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
776 ioread8(tc6393xb->scr + SCR_GPI_BCR(i)); 787 ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
777 } 788 }
778 ret = tcpd->suspend(dev); 789 ret = tcpd->suspend(dev);
779 clk_disable(tc6393xb->clk); 790 clk_disable_unprepare(tc6393xb->clk);
780 791
781 return ret; 792 return ret;
782} 793}
@@ -788,7 +799,7 @@ static int tc6393xb_resume(struct platform_device *dev)
788 int ret; 799 int ret;
789 int i; 800 int i;
790 801
791 clk_enable(tc6393xb->clk); 802 clk_prepare_enable(tc6393xb->clk);
792 803
793 ret = tcpd->resume(dev); 804 ret = tcpd->resume(dev);
794 if (ret) 805 if (ret)
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 1c3e6e2efe41..14b62e11aff4 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -76,58 +76,58 @@ static struct mfd_cell tps65090s[] = {
76static const struct regmap_irq tps65090_irqs[] = { 76static const struct regmap_irq tps65090_irqs[] = {
77 /* INT1 IRQs*/ 77 /* INT1 IRQs*/
78 [TPS65090_IRQ_VAC_STATUS_CHANGE] = { 78 [TPS65090_IRQ_VAC_STATUS_CHANGE] = {
79 .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE, 79 .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE,
80 }, 80 },
81 [TPS65090_IRQ_VSYS_STATUS_CHANGE] = { 81 [TPS65090_IRQ_VSYS_STATUS_CHANGE] = {
82 .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE, 82 .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE,
83 }, 83 },
84 [TPS65090_IRQ_BAT_STATUS_CHANGE] = { 84 [TPS65090_IRQ_BAT_STATUS_CHANGE] = {
85 .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE, 85 .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE,
86 }, 86 },
87 [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = { 87 [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = {
88 .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE, 88 .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE,
89 }, 89 },
90 [TPS65090_IRQ_CHARGING_COMPLETE] = { 90 [TPS65090_IRQ_CHARGING_COMPLETE] = {
91 .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE, 91 .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE,
92 }, 92 },
93 [TPS65090_IRQ_OVERLOAD_DCDC1] = { 93 [TPS65090_IRQ_OVERLOAD_DCDC1] = {
94 .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1, 94 .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1,
95 }, 95 },
96 [TPS65090_IRQ_OVERLOAD_DCDC2] = { 96 [TPS65090_IRQ_OVERLOAD_DCDC2] = {
97 .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2, 97 .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2,
98 }, 98 },
99 /* INT2 IRQs*/ 99 /* INT2 IRQs*/
100 [TPS65090_IRQ_OVERLOAD_DCDC3] = { 100 [TPS65090_IRQ_OVERLOAD_DCDC3] = {
101 .reg_offset = 1, 101 .reg_offset = 1,
102 .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3, 102 .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3,
103 }, 103 },
104 [TPS65090_IRQ_OVERLOAD_FET1] = { 104 [TPS65090_IRQ_OVERLOAD_FET1] = {
105 .reg_offset = 1, 105 .reg_offset = 1,
106 .mask = TPS65090_INT2_MASK_OVERLOAD_FET1, 106 .mask = TPS65090_INT2_MASK_OVERLOAD_FET1,
107 }, 107 },
108 [TPS65090_IRQ_OVERLOAD_FET2] = { 108 [TPS65090_IRQ_OVERLOAD_FET2] = {
109 .reg_offset = 1, 109 .reg_offset = 1,
110 .mask = TPS65090_INT2_MASK_OVERLOAD_FET2, 110 .mask = TPS65090_INT2_MASK_OVERLOAD_FET2,
111 }, 111 },
112 [TPS65090_IRQ_OVERLOAD_FET3] = { 112 [TPS65090_IRQ_OVERLOAD_FET3] = {
113 .reg_offset = 1, 113 .reg_offset = 1,
114 .mask = TPS65090_INT2_MASK_OVERLOAD_FET3, 114 .mask = TPS65090_INT2_MASK_OVERLOAD_FET3,
115 }, 115 },
116 [TPS65090_IRQ_OVERLOAD_FET4] = { 116 [TPS65090_IRQ_OVERLOAD_FET4] = {
117 .reg_offset = 1, 117 .reg_offset = 1,
118 .mask = TPS65090_INT2_MASK_OVERLOAD_FET4, 118 .mask = TPS65090_INT2_MASK_OVERLOAD_FET4,
119 }, 119 },
120 [TPS65090_IRQ_OVERLOAD_FET5] = { 120 [TPS65090_IRQ_OVERLOAD_FET5] = {
121 .reg_offset = 1, 121 .reg_offset = 1,
122 .mask = TPS65090_INT2_MASK_OVERLOAD_FET5, 122 .mask = TPS65090_INT2_MASK_OVERLOAD_FET5,
123 }, 123 },
124 [TPS65090_IRQ_OVERLOAD_FET6] = { 124 [TPS65090_IRQ_OVERLOAD_FET6] = {
125 .reg_offset = 1, 125 .reg_offset = 1,
126 .mask = TPS65090_INT2_MASK_OVERLOAD_FET6, 126 .mask = TPS65090_INT2_MASK_OVERLOAD_FET6,
127 }, 127 },
128 [TPS65090_IRQ_OVERLOAD_FET7] = { 128 [TPS65090_IRQ_OVERLOAD_FET7] = {
129 .reg_offset = 1, 129 .reg_offset = 1,
130 .mask = TPS65090_INT2_MASK_OVERLOAD_FET7, 130 .mask = TPS65090_INT2_MASK_OVERLOAD_FET7,
131 }, 131 },
132}; 132};
133 133
@@ -176,7 +176,7 @@ MODULE_DEVICE_TABLE(of, tps65090_of_match);
176#endif 176#endif
177 177
178static int tps65090_i2c_probe(struct i2c_client *client, 178static int tps65090_i2c_probe(struct i2c_client *client,
179 const struct i2c_device_id *id) 179 const struct i2c_device_id *id)
180{ 180{
181 struct tps65090_platform_data *pdata = dev_get_platdata(&client->dev); 181 struct tps65090_platform_data *pdata = dev_get_platdata(&client->dev);
182 int irq_base = 0; 182 int irq_base = 0;
@@ -210,11 +210,11 @@ static int tps65090_i2c_probe(struct i2c_client *client,
210 210
211 if (client->irq) { 211 if (client->irq) {
212 ret = regmap_add_irq_chip(tps65090->rmap, client->irq, 212 ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
213 IRQF_ONESHOT | IRQF_TRIGGER_LOW, irq_base, 213 IRQF_ONESHOT | IRQF_TRIGGER_LOW, irq_base,
214 &tps65090_irq_chip, &tps65090->irq_data); 214 &tps65090_irq_chip, &tps65090->irq_data);
215 if (ret) { 215 if (ret) {
216 dev_err(&client->dev, 216 dev_err(&client->dev,
217 "IRQ init failed with err: %d\n", ret); 217 "IRQ init failed with err: %d\n", ret);
218 return ret; 218 return ret;
219 } 219 }
220 } else { 220 } else {
@@ -223,8 +223,8 @@ static int tps65090_i2c_probe(struct i2c_client *client,
223 } 223 }
224 224
225 ret = mfd_add_devices(tps65090->dev, -1, tps65090s, 225 ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
226 ARRAY_SIZE(tps65090s), NULL, 226 ARRAY_SIZE(tps65090s), NULL,
227 0, regmap_irq_get_domain(tps65090->irq_data)); 227 0, regmap_irq_get_domain(tps65090->irq_data));
228 if (ret) { 228 if (ret) {
229 dev_err(&client->dev, "add mfd devices failed with err: %d\n", 229 dev_err(&client->dev, "add mfd devices failed with err: %d\n",
230 ret); 230 ret);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index a8ee52c95f2f..80a919a8ca97 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -33,9 +33,11 @@
33static const struct mfd_cell tps65217s[] = { 33static const struct mfd_cell tps65217s[] = {
34 { 34 {
35 .name = "tps65217-pmic", 35 .name = "tps65217-pmic",
36 .of_compatible = "ti,tps65217-pmic",
36 }, 37 },
37 { 38 {
38 .name = "tps65217-bl", 39 .name = "tps65217-bl",
40 .of_compatible = "ti,tps65217-bl",
39 }, 41 },
40}; 42};
41 43
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 50f9091bcd38..7d63e324e6a8 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -831,6 +831,9 @@ static struct twl4030_power_data osc_off_idle = {
831 831
832static struct of_device_id twl4030_power_of_match[] = { 832static struct of_device_id twl4030_power_of_match[] = {
833 { 833 {
834 .compatible = "ti,twl4030-power",
835 },
836 {
834 .compatible = "ti,twl4030-power-reset", 837 .compatible = "ti,twl4030-power-reset",
835 .data = &omap3_reset, 838 .data = &omap3_reset,
836 }, 839 },
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
index 3c2b8f9e3c84..e6b3c70aeb22 100644
--- a/drivers/mfd/viperboard.c
+++ b/drivers/mfd/viperboard.c
@@ -93,9 +93,8 @@ static int vprbrd_probe(struct usb_interface *interface,
93 version >> 8, version & 0xff, 93 version >> 8, version & 0xff,
94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum); 94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
95 95
96 ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO, 96 ret = mfd_add_hotplug_devices(&interface->dev, vprbrd_devs,
97 vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0, 97 ARRAY_SIZE(vprbrd_devs));
98 NULL);
99 if (ret != 0) { 98 if (ret != 0) {
100 dev_err(&interface->dev, "Failed to add mfd devices to core."); 99 dev_err(&interface->dev, "Failed to add mfd devices to core.");
101 goto error; 100 goto error;
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index d6f35bbf795b..b326a82017ee 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -336,8 +336,6 @@ static const struct reg_default wm5102_reg_default[] = {
336 { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */ 336 { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
337 { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */ 337 { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
338 { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */ 338 { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
339 { 0x00000225, 0x0400 }, /* R549 - HP Ctrl 1L */
340 { 0x00000226, 0x0400 }, /* R550 - HP Ctrl 1R */
341 { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */ 339 { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
342 { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */ 340 { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
343 { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */ 341 { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
@@ -1112,6 +1110,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
1112 case ARIZONA_MIC_BIAS_CTRL_1: 1110 case ARIZONA_MIC_BIAS_CTRL_1:
1113 case ARIZONA_MIC_BIAS_CTRL_2: 1111 case ARIZONA_MIC_BIAS_CTRL_2:
1114 case ARIZONA_MIC_BIAS_CTRL_3: 1112 case ARIZONA_MIC_BIAS_CTRL_3:
1113 case ARIZONA_HP_CTRL_1L:
1114 case ARIZONA_HP_CTRL_1R:
1115 case ARIZONA_ACCESSORY_DETECT_MODE_1: 1115 case ARIZONA_ACCESSORY_DETECT_MODE_1:
1116 case ARIZONA_HEADPHONE_DETECT_1: 1116 case ARIZONA_HEADPHONE_DETECT_1:
1117 case ARIZONA_HEADPHONE_DETECT_2: 1117 case ARIZONA_HEADPHONE_DETECT_2:
@@ -1949,6 +1949,8 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
1949 case ARIZONA_DSP1_SCRATCH_1: 1949 case ARIZONA_DSP1_SCRATCH_1:
1950 case ARIZONA_DSP1_SCRATCH_2: 1950 case ARIZONA_DSP1_SCRATCH_2:
1951 case ARIZONA_DSP1_SCRATCH_3: 1951 case ARIZONA_DSP1_SCRATCH_3:
1952 case ARIZONA_HP_CTRL_1L:
1953 case ARIZONA_HP_CTRL_1R:
1952 case ARIZONA_HEADPHONE_DETECT_2: 1954 case ARIZONA_HEADPHONE_DETECT_2:
1953 case ARIZONA_HP_DACVAL: 1955 case ARIZONA_HP_DACVAL:
1954 case ARIZONA_MIC_DETECT_3: 1956 case ARIZONA_MIC_DETECT_3:
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 4642b5b816a0..12cad94b4035 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -895,8 +895,16 @@ static const struct reg_default wm5110_reg_default[] = {
895 { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */ 895 { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
896 { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */ 896 { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
897 { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */ 897 { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
898 { 0x0000054B, 0x0002 }, /* R1355 - AIF2 Frame Ctrl 5 */
899 { 0x0000054C, 0x0003 }, /* R1356 - AIF2 Frame Ctrl 6 */
900 { 0x0000054D, 0x0004 }, /* R1357 - AIF2 Frame Ctrl 7 */
901 { 0x0000054E, 0x0005 }, /* R1358 - AIF2 Frame Ctrl 8 */
898 { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */ 902 { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
899 { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */ 903 { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
904 { 0x00000553, 0x0002 }, /* R1363 - AIF2 Frame Ctrl 13 */
905 { 0x00000554, 0x0003 }, /* R1364 - AIF2 Frame Ctrl 14 */
906 { 0x00000555, 0x0004 }, /* R1365 - AIF2 Frame Ctrl 15 */
907 { 0x00000556, 0x0005 }, /* R1366 - AIF2 Frame Ctrl 16 */
900 { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */ 908 { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
901 { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */ 909 { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
902 { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */ 910 { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
@@ -1790,6 +1798,8 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1790 case ARIZONA_MIC_BIAS_CTRL_1: 1798 case ARIZONA_MIC_BIAS_CTRL_1:
1791 case ARIZONA_MIC_BIAS_CTRL_2: 1799 case ARIZONA_MIC_BIAS_CTRL_2:
1792 case ARIZONA_MIC_BIAS_CTRL_3: 1800 case ARIZONA_MIC_BIAS_CTRL_3:
1801 case ARIZONA_HP_CTRL_1L:
1802 case ARIZONA_HP_CTRL_1R:
1793 case ARIZONA_ACCESSORY_DETECT_MODE_1: 1803 case ARIZONA_ACCESSORY_DETECT_MODE_1:
1794 case ARIZONA_HEADPHONE_DETECT_1: 1804 case ARIZONA_HEADPHONE_DETECT_1:
1795 case ARIZONA_HEADPHONE_DETECT_2: 1805 case ARIZONA_HEADPHONE_DETECT_2:
@@ -1934,8 +1944,16 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1934 case ARIZONA_AIF2_FRAME_CTRL_2: 1944 case ARIZONA_AIF2_FRAME_CTRL_2:
1935 case ARIZONA_AIF2_FRAME_CTRL_3: 1945 case ARIZONA_AIF2_FRAME_CTRL_3:
1936 case ARIZONA_AIF2_FRAME_CTRL_4: 1946 case ARIZONA_AIF2_FRAME_CTRL_4:
1947 case ARIZONA_AIF2_FRAME_CTRL_5:
1948 case ARIZONA_AIF2_FRAME_CTRL_6:
1949 case ARIZONA_AIF2_FRAME_CTRL_7:
1950 case ARIZONA_AIF2_FRAME_CTRL_8:
1937 case ARIZONA_AIF2_FRAME_CTRL_11: 1951 case ARIZONA_AIF2_FRAME_CTRL_11:
1938 case ARIZONA_AIF2_FRAME_CTRL_12: 1952 case ARIZONA_AIF2_FRAME_CTRL_12:
1953 case ARIZONA_AIF2_FRAME_CTRL_13:
1954 case ARIZONA_AIF2_FRAME_CTRL_14:
1955 case ARIZONA_AIF2_FRAME_CTRL_15:
1956 case ARIZONA_AIF2_FRAME_CTRL_16:
1939 case ARIZONA_AIF2_TX_ENABLES: 1957 case ARIZONA_AIF2_TX_ENABLES:
1940 case ARIZONA_AIF2_RX_ENABLES: 1958 case ARIZONA_AIF2_RX_ENABLES:
1941 case ARIZONA_AIF3_BCLK_CTRL: 1959 case ARIZONA_AIF3_BCLK_CTRL:
@@ -2825,6 +2843,8 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
2825 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS: 2843 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
2826 case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS: 2844 case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
2827 case ARIZONA_MIC_DETECT_3: 2845 case ARIZONA_MIC_DETECT_3:
2846 case ARIZONA_HP_CTRL_1L:
2847 case ARIZONA_HP_CTRL_1R:
2828 case ARIZONA_HEADPHONE_DETECT_2: 2848 case ARIZONA_HEADPHONE_DETECT_2:
2829 case ARIZONA_INPUT_ENABLES_STATUS: 2849 case ARIZONA_INPUT_ENABLES_STATUS:
2830 case ARIZONA_OUTPUT_STATUS_1: 2850 case ARIZONA_OUTPUT_STATUS_1:
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 4ab527f5c53b..f5124a8acad8 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -308,7 +308,7 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
308 goto err; 308 goto err;
309 } 309 }
310 310
311 mode = id2 & WM8350_CONF_STS_MASK >> 10; 311 mode = (id2 & WM8350_CONF_STS_MASK) >> 10;
312 cust_id = id2 & WM8350_CUST_ID_MASK; 312 cust_id = id2 & WM8350_CUST_ID_MASK;
313 chip_rev = (id2 & WM8350_CHIP_REV_MASK) >> 12; 313 chip_rev = (id2 & WM8350_CHIP_REV_MASK) >> 12;
314 dev_info(wm8350->dev, 314 dev_info(wm8350->dev,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e6fab94e2c8a..6ca9d25cc3f0 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -116,7 +116,7 @@ static const char *wm8958_main_supplies[] = {
116 "SPKVDD2", 116 "SPKVDD2",
117}; 117};
118 118
119#ifdef CONFIG_PM_RUNTIME 119#ifdef CONFIG_PM
120static int wm8994_suspend(struct device *dev) 120static int wm8994_suspend(struct device *dev)
121{ 121{
122 struct wm8994 *wm8994 = dev_get_drvdata(dev); 122 struct wm8994 *wm8994 = dev_get_drvdata(dev);
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index 510da3b52324..c0c25d75aacc 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -670,6 +670,7 @@ static const struct reg_default wm8997_reg_default[] = {
670 { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */ 670 { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
671 { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */ 671 { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
672 { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */ 672 { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
673 { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
673 { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */ 674 { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
674 { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */ 675 { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
675 { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */ 676 { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
@@ -886,6 +887,8 @@ static bool wm8997_readable_register(struct device *dev, unsigned int reg)
886 case ARIZONA_MIC_BIAS_CTRL_1: 887 case ARIZONA_MIC_BIAS_CTRL_1:
887 case ARIZONA_MIC_BIAS_CTRL_2: 888 case ARIZONA_MIC_BIAS_CTRL_2:
888 case ARIZONA_MIC_BIAS_CTRL_3: 889 case ARIZONA_MIC_BIAS_CTRL_3:
890 case ARIZONA_HP_CTRL_1L:
891 case ARIZONA_HP_CTRL_1R:
889 case ARIZONA_ACCESSORY_DETECT_MODE_1: 892 case ARIZONA_ACCESSORY_DETECT_MODE_1:
890 case ARIZONA_HEADPHONE_DETECT_1: 893 case ARIZONA_HEADPHONE_DETECT_1:
891 case ARIZONA_HEADPHONE_DETECT_2: 894 case ARIZONA_HEADPHONE_DETECT_2:
@@ -1328,6 +1331,7 @@ static bool wm8997_readable_register(struct device *dev, unsigned int reg)
1328 case ARIZONA_INTERRUPT_STATUS_4: 1331 case ARIZONA_INTERRUPT_STATUS_4:
1329 case ARIZONA_INTERRUPT_STATUS_5: 1332 case ARIZONA_INTERRUPT_STATUS_5:
1330 case ARIZONA_INTERRUPT_STATUS_1_MASK: 1333 case ARIZONA_INTERRUPT_STATUS_1_MASK:
1334 case ARIZONA_INTERRUPT_STATUS_2_MASK:
1331 case ARIZONA_INTERRUPT_STATUS_3_MASK: 1335 case ARIZONA_INTERRUPT_STATUS_3_MASK:
1332 case ARIZONA_INTERRUPT_STATUS_4_MASK: 1336 case ARIZONA_INTERRUPT_STATUS_4_MASK:
1333 case ARIZONA_INTERRUPT_STATUS_5_MASK: 1337 case ARIZONA_INTERRUPT_STATUS_5_MASK:
@@ -1477,6 +1481,8 @@ static bool wm8997_volatile_register(struct device *dev, unsigned int reg)
1477 case ARIZONA_SAMPLE_RATE_3_STATUS: 1481 case ARIZONA_SAMPLE_RATE_3_STATUS:
1478 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS: 1482 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
1479 case ARIZONA_MIC_DETECT_3: 1483 case ARIZONA_MIC_DETECT_3:
1484 case ARIZONA_HP_CTRL_1L:
1485 case ARIZONA_HP_CTRL_1R:
1480 case ARIZONA_HEADPHONE_DETECT_2: 1486 case ARIZONA_HEADPHONE_DETECT_2:
1481 case ARIZONA_INPUT_ENABLES_STATUS: 1487 case ARIZONA_INPUT_ENABLES_STATUS:
1482 case ARIZONA_OUTPUT_STATUS_1: 1488 case ARIZONA_OUTPUT_STATUS_1:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bbeb4516facf..006242c8bca0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -75,7 +75,7 @@ config ATMEL_TCB_CLKSRC
75config ATMEL_TCB_CLKSRC_BLOCK 75config ATMEL_TCB_CLKSRC_BLOCK
76 int 76 int
77 depends on ATMEL_TCB_CLKSRC 77 depends on ATMEL_TCB_CLKSRC
78 prompt "TC Block" if ARCH_AT91RM9200 || ARCH_AT91SAM9260 || CPU_AT32AP700X 78 prompt "TC Block" if CPU_AT32AP700X
79 default 0 79 default 0
80 range 0 1 80 range 0 1
81 help 81 help
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 868a30a1b417..3739ffa9cdf1 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -609,7 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
609 return ret; 609 return ret;
610} 610}
611 611
612#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME) 612#ifdef CONFIG_PM
613static int apds990x_chip_on(struct apds990x_chip *chip) 613static int apds990x_chip_on(struct apds990x_chip *chip)
614{ 614{
615 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), 615 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -1237,7 +1237,7 @@ static int apds990x_resume(struct device *dev)
1237} 1237}
1238#endif 1238#endif
1239 1239
1240#ifdef CONFIG_PM_RUNTIME 1240#ifdef CONFIG_PM
1241static int apds990x_runtime_suspend(struct device *dev) 1241static int apds990x_runtime_suspend(struct device *dev)
1242{ 1242{
1243 struct i2c_client *client = container_of(dev, struct i2c_client, dev); 1243 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 7b55f8a152d4..b756381b8250 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1358,7 +1358,7 @@ static int bh1770_resume(struct device *dev)
1358} 1358}
1359#endif 1359#endif
1360 1360
1361#ifdef CONFIG_PM_RUNTIME 1361#ifdef CONFIG_PM
1362static int bh1770_runtime_suspend(struct device *dev) 1362static int bh1770_runtime_suspend(struct device *dev)
1363{ 1363{
1364 struct i2c_client *client = container_of(dev, struct i2c_client, dev); 1364 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 634f72929e12..0a1af93ec638 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -18,7 +18,7 @@
18 18
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/spi/eeprom.h> 20#include <linux/spi/eeprom.h>
21#include <linux/of.h> 21#include <linux/property.h>
22 22
23/* 23/*
24 * NOTE: this is an *EEPROM* driver. The vagaries of product naming 24 * NOTE: this is an *EEPROM* driver. The vagaries of product naming
@@ -301,35 +301,33 @@ static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
301 301
302/*-------------------------------------------------------------------------*/ 302/*-------------------------------------------------------------------------*/
303 303
304static int at25_np_to_chip(struct device *dev, 304static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
305 struct device_node *np,
306 struct spi_eeprom *chip)
307{ 305{
308 u32 val; 306 u32 val;
309 307
310 memset(chip, 0, sizeof(*chip)); 308 memset(chip, 0, sizeof(*chip));
311 strncpy(chip->name, np->name, sizeof(chip->name)); 309 strncpy(chip->name, "at25", sizeof(chip->name));
312 310
313 if (of_property_read_u32(np, "size", &val) == 0 || 311 if (device_property_read_u32(dev, "size", &val) == 0 ||
314 of_property_read_u32(np, "at25,byte-len", &val) == 0) { 312 device_property_read_u32(dev, "at25,byte-len", &val) == 0) {
315 chip->byte_len = val; 313 chip->byte_len = val;
316 } else { 314 } else {
317 dev_err(dev, "Error: missing \"size\" property\n"); 315 dev_err(dev, "Error: missing \"size\" property\n");
318 return -ENODEV; 316 return -ENODEV;
319 } 317 }
320 318
321 if (of_property_read_u32(np, "pagesize", &val) == 0 || 319 if (device_property_read_u32(dev, "pagesize", &val) == 0 ||
322 of_property_read_u32(np, "at25,page-size", &val) == 0) { 320 device_property_read_u32(dev, "at25,page-size", &val) == 0) {
323 chip->page_size = (u16)val; 321 chip->page_size = (u16)val;
324 } else { 322 } else {
325 dev_err(dev, "Error: missing \"pagesize\" property\n"); 323 dev_err(dev, "Error: missing \"pagesize\" property\n");
326 return -ENODEV; 324 return -ENODEV;
327 } 325 }
328 326
329 if (of_property_read_u32(np, "at25,addr-mode", &val) == 0) { 327 if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) {
330 chip->flags = (u16)val; 328 chip->flags = (u16)val;
331 } else { 329 } else {
332 if (of_property_read_u32(np, "address-width", &val)) { 330 if (device_property_read_u32(dev, "address-width", &val)) {
333 dev_err(dev, 331 dev_err(dev,
334 "Error: missing \"address-width\" property\n"); 332 "Error: missing \"address-width\" property\n");
335 return -ENODEV; 333 return -ENODEV;
@@ -350,7 +348,7 @@ static int at25_np_to_chip(struct device *dev,
350 val); 348 val);
351 return -ENODEV; 349 return -ENODEV;
352 } 350 }
353 if (of_find_property(np, "read-only", NULL)) 351 if (device_property_present(dev, "read-only"))
354 chip->flags |= EE_READONLY; 352 chip->flags |= EE_READONLY;
355 } 353 }
356 return 0; 354 return 0;
@@ -360,21 +358,15 @@ static int at25_probe(struct spi_device *spi)
360{ 358{
361 struct at25_data *at25 = NULL; 359 struct at25_data *at25 = NULL;
362 struct spi_eeprom chip; 360 struct spi_eeprom chip;
363 struct device_node *np = spi->dev.of_node;
364 int err; 361 int err;
365 int sr; 362 int sr;
366 int addrlen; 363 int addrlen;
367 364
368 /* Chip description */ 365 /* Chip description */
369 if (!spi->dev.platform_data) { 366 if (!spi->dev.platform_data) {
370 if (np) { 367 err = at25_fw_to_chip(&spi->dev, &chip);
371 err = at25_np_to_chip(&spi->dev, np, &chip); 368 if (err)
372 if (err) 369 return err;
373 return err;
374 } else {
375 dev_err(&spi->dev, "Error: no chip description\n");
376 return -ENODEV;
377 }
378 } else 370 } else
379 chip = *(struct spi_eeprom *)spi->dev.platform_data; 371 chip = *(struct spi_eeprom *)spi->dev.platform_data;
380 372
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 0ff4b02177be..0cf2c9d676be 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -170,7 +170,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
170} 170}
171 171
172/** 172/**
173 * eeprom_93cx6_read - Read multiple words from eeprom 173 * eeprom_93cx6_read - Read a word from eeprom
174 * @eeprom: Pointer to eeprom structure 174 * @eeprom: Pointer to eeprom structure
175 * @word: Word index from where we should start reading 175 * @word: Word index from where we should start reading
176 * @data: target pointer where the information will have to be stored 176 * @data: target pointer where the information will have to be stored
@@ -235,6 +235,66 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
235EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); 235EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
236 236
237/** 237/**
238 * eeprom_93cx6_readb - Read a byte from eeprom
239 * @eeprom: Pointer to eeprom structure
240 * @word: Byte index from where we should start reading
241 * @data: target pointer where the information will have to be stored
242 *
243 * This function will read a byte of the eeprom data
244 * into the given data pointer.
245 */
246void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte,
247 u8 *data)
248{
249 u16 command;
250 u16 tmp;
251
252 /*
253 * Initialize the eeprom register
254 */
255 eeprom_93cx6_startup(eeprom);
256
257 /*
258 * Select the read opcode and the byte to be read.
259 */
260 command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte;
261 eeprom_93cx6_write_bits(eeprom, command,
262 PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1);
263
264 /*
265 * Read the requested 8 bits.
266 */
267 eeprom_93cx6_read_bits(eeprom, &tmp, 8);
268 *data = tmp & 0xff;
269
270 /*
271 * Cleanup eeprom register.
272 */
273 eeprom_93cx6_cleanup(eeprom);
274}
275EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
276
277/**
278 * eeprom_93cx6_multireadb - Read multiple bytes from eeprom
279 * @eeprom: Pointer to eeprom structure
280 * @byte: Index from where we should start reading
281 * @data: target pointer where the information will have to be stored
282 * @words: Number of bytes that should be read.
283 *
284 * This function will read all requested bytes from the eeprom,
285 * this is done by calling eeprom_93cx6_readb() multiple times.
286 */
287void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte,
288 u8 *data, const u16 bytes)
289{
290 unsigned int i;
291
292 for (i = 0; i < bytes; i++)
293 eeprom_93cx6_readb(eeprom, byte + i, &data[i]);
294}
295EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb);
296
297/**
238 * eeprom_93cx6_wren - set the write enable state 298 * eeprom_93cx6_wren - set the write enable state
239 * @eeprom: Pointer to eeprom structure 299 * @eeprom: Pointer to eeprom structure
240 * @enable: true to enable writes, otherwise disable writes 300 * @enable: true to enable writes, otherwise disable writes
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 2cf2bbc0b927..180a5442fd4b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -187,6 +187,7 @@ void enclosure_unregister(struct enclosure_device *edev)
187EXPORT_SYMBOL_GPL(enclosure_unregister); 187EXPORT_SYMBOL_GPL(enclosure_unregister);
188 188
189#define ENCLOSURE_NAME_SIZE 64 189#define ENCLOSURE_NAME_SIZE 64
190#define COMPONENT_NAME_SIZE 64
190 191
191static void enclosure_link_name(struct enclosure_component *cdev, char *name) 192static void enclosure_link_name(struct enclosure_component *cdev, char *name)
192{ 193{
@@ -246,6 +247,29 @@ static void enclosure_component_release(struct device *dev)
246 put_device(dev->parent); 247 put_device(dev->parent);
247} 248}
248 249
250static struct enclosure_component *
251enclosure_component_find_by_name(struct enclosure_device *edev,
252 const char *name)
253{
254 int i;
255 const char *cname;
256 struct enclosure_component *ecomp;
257
258 if (!edev || !name || !name[0])
259 return NULL;
260
261 for (i = 0; i < edev->components; i++) {
262 ecomp = &edev->component[i];
263 cname = dev_name(&ecomp->cdev);
264 if (ecomp->number != -1 &&
265 cname && cname[0] &&
266 !strcmp(cname, name))
267 return ecomp;
268 }
269
270 return NULL;
271}
272
249static const struct attribute_group *enclosure_component_groups[]; 273static const struct attribute_group *enclosure_component_groups[];
250 274
251/** 275/**
@@ -269,7 +293,8 @@ enclosure_component_register(struct enclosure_device *edev,
269{ 293{
270 struct enclosure_component *ecomp; 294 struct enclosure_component *ecomp;
271 struct device *cdev; 295 struct device *cdev;
272 int err; 296 int err, i;
297 char newname[COMPONENT_NAME_SIZE];
273 298
274 if (number >= edev->components) 299 if (number >= edev->components)
275 return ERR_PTR(-EINVAL); 300 return ERR_PTR(-EINVAL);
@@ -283,9 +308,20 @@ enclosure_component_register(struct enclosure_device *edev,
283 ecomp->number = number; 308 ecomp->number = number;
284 cdev = &ecomp->cdev; 309 cdev = &ecomp->cdev;
285 cdev->parent = get_device(&edev->edev); 310 cdev->parent = get_device(&edev->edev);
286 if (name && name[0]) 311
287 dev_set_name(cdev, "%s", name); 312 if (name && name[0]) {
288 else 313 /* Some hardware (e.g. enclosure in RX300 S6) has components
314 * with non unique names. Registering duplicates in sysfs
315 * will lead to warnings during bootup. So make the names
316 * unique by appending consecutive numbers -1, -2, ... */
317 i = 1;
318 snprintf(newname, COMPONENT_NAME_SIZE,
319 "%s", name);
320 while (enclosure_component_find_by_name(edev, newname))
321 snprintf(newname, COMPONENT_NAME_SIZE,
322 "%s-%i", name, i++);
323 dev_set_name(cdev, "%s", newname);
324 } else
289 dev_set_name(cdev, "%u", number); 325 dev_set_name(cdev, "%u", number);
290 326
291 cdev->release = enclosure_component_release; 327 cdev->release = enclosure_component_release;
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 5918586f2f76..c49d244265ec 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -395,7 +395,7 @@ static void genwqe_vma_open(struct vm_area_struct *vma)
395static void genwqe_vma_close(struct vm_area_struct *vma) 395static void genwqe_vma_close(struct vm_area_struct *vma)
396{ 396{
397 unsigned long vsize = vma->vm_end - vma->vm_start; 397 unsigned long vsize = vma->vm_end - vma->vm_start;
398 struct inode *inode = vma->vm_file->f_dentry->d_inode; 398 struct inode *inode = file_inode(vma->vm_file);
399 struct dma_mapping *dma_map; 399 struct dma_mapping *dma_map;
400 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, 400 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
401 cdev_genwqe); 401 cdev_genwqe);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index d324f8a97b88..63fe096d4462 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -235,7 +235,7 @@ static int lis3lv02d_i2c_resume(struct device *dev)
235} 235}
236#endif /* CONFIG_PM_SLEEP */ 236#endif /* CONFIG_PM_SLEEP */
237 237
238#ifdef CONFIG_PM_RUNTIME 238#ifdef CONFIG_PM
239static int lis3_i2c_runtime_suspend(struct device *dev) 239static int lis3_i2c_runtime_suspend(struct device *dev)
240{ 240{
241 struct i2c_client *client = container_of(dev, struct i2c_client, dev); 241 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
@@ -253,7 +253,7 @@ static int lis3_i2c_runtime_resume(struct device *dev)
253 lis3lv02d_poweron(lis3); 253 lis3lv02d_poweron(lis3);
254 return 0; 254 return 0;
255} 255}
256#endif /* CONFIG_PM_RUNTIME */ 256#endif /* CONFIG_PM */
257 257
258static const struct i2c_device_id lis3lv02d_id[] = { 258static const struct i2c_device_id lis3lv02d_id[] = {
259 {"lis3lv02d", LIS3LV02D}, 259 {"lis3lv02d", LIS3LV02D},
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 71744b16cc8c..61b04d7646f1 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -530,9 +530,9 @@ struct mei_device {
530 * Power Gating support 530 * Power Gating support
531 */ 531 */
532 enum mei_pg_event pg_event; 532 enum mei_pg_event pg_event;
533#ifdef CONFIG_PM_RUNTIME 533#ifdef CONFIG_PM
534 struct dev_pm_domain pg_domain; 534 struct dev_pm_domain pg_domain;
535#endif /* CONFIG_PM_RUNTIME */ 535#endif /* CONFIG_PM */
536 536
537 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; 537 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];
538 u32 rd_msg_hdr; 538 u32 rd_msg_hdr;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f3225b1643ab..cf20d397068a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -89,13 +89,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
89 89
90MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); 90MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
91 91
92#ifdef CONFIG_PM_RUNTIME 92#ifdef CONFIG_PM
93static inline void mei_me_set_pm_domain(struct mei_device *dev); 93static inline void mei_me_set_pm_domain(struct mei_device *dev);
94static inline void mei_me_unset_pm_domain(struct mei_device *dev); 94static inline void mei_me_unset_pm_domain(struct mei_device *dev);
95#else 95#else
96static inline void mei_me_set_pm_domain(struct mei_device *dev) {} 96static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
97static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} 97static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
98#endif /* CONFIG_PM_RUNTIME */ 98#endif /* CONFIG_PM */
99 99
100/** 100/**
101 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface 101 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
@@ -357,7 +357,7 @@ static int mei_me_pci_resume(struct device *device)
357} 357}
358#endif /* CONFIG_PM_SLEEP */ 358#endif /* CONFIG_PM_SLEEP */
359 359
360#ifdef CONFIG_PM_RUNTIME 360#ifdef CONFIG_PM
361static int mei_me_pm_runtime_idle(struct device *device) 361static int mei_me_pm_runtime_idle(struct device *device)
362{ 362{
363 struct pci_dev *pdev = to_pci_dev(device); 363 struct pci_dev *pdev = to_pci_dev(device);
@@ -453,9 +453,7 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev)
453 /* stop using pm callbacks if any */ 453 /* stop using pm callbacks if any */
454 dev->dev->pm_domain = NULL; 454 dev->dev->pm_domain = NULL;
455} 455}
456#endif /* CONFIG_PM_RUNTIME */
457 456
458#ifdef CONFIG_PM
459static const struct dev_pm_ops mei_me_pm_ops = { 457static const struct dev_pm_ops mei_me_pm_ops = {
460 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, 458 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
461 mei_me_pci_resume) 459 mei_me_pci_resume)
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index bee1c6fb7e75..1f572deacf54 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -42,13 +42,13 @@ static const struct pci_device_id mei_txe_pci_tbl[] = {
42}; 42};
43MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 43MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
44 44
45#ifdef CONFIG_PM_RUNTIME 45#ifdef CONFIG_PM
46static inline void mei_txe_set_pm_domain(struct mei_device *dev); 46static inline void mei_txe_set_pm_domain(struct mei_device *dev);
47static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 47static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
48#else 48#else
49static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 49static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
50static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 50static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
51#endif /* CONFIG_PM_RUNTIME */ 51#endif /* CONFIG_PM */
52 52
53static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) 53static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
54{ 54{
@@ -295,7 +295,7 @@ static int mei_txe_pci_resume(struct device *device)
295} 295}
296#endif /* CONFIG_PM_SLEEP */ 296#endif /* CONFIG_PM_SLEEP */
297 297
298#ifdef CONFIG_PM_RUNTIME 298#ifdef CONFIG_PM
299static int mei_txe_pm_runtime_idle(struct device *device) 299static int mei_txe_pm_runtime_idle(struct device *device)
300{ 300{
301 struct pci_dev *pdev = to_pci_dev(device); 301 struct pci_dev *pdev = to_pci_dev(device);
@@ -401,9 +401,7 @@ static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
401 /* stop using pm callbacks if any */ 401 /* stop using pm callbacks if any */
402 dev->dev->pm_domain = NULL; 402 dev->dev->pm_domain = NULL;
403} 403}
404#endif /* CONFIG_PM_RUNTIME */
405 404
406#ifdef CONFIG_PM
407static const struct dev_pm_ops mei_txe_pm_ops = { 405static const struct dev_pm_ops mei_txe_pm_ops = {
408 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 406 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
409 mei_txe_pci_resume) 407 mei_txe_pci_resume)
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index e64794730e21..e486a0c26267 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -68,7 +68,7 @@ static inline struct device *mic_dev(struct mic_vdev *mvdev)
68} 68}
69 69
70/* This gets the device's feature bits. */ 70/* This gets the device's feature bits. */
71static u32 mic_get_features(struct virtio_device *vdev) 71static u64 mic_get_features(struct virtio_device *vdev)
72{ 72{
73 unsigned int i, bits; 73 unsigned int i, bits;
74 u32 features = 0; 74 u32 features = 0;
@@ -76,8 +76,7 @@ static u32 mic_get_features(struct virtio_device *vdev)
76 u8 __iomem *in_features = mic_vq_features(desc); 76 u8 __iomem *in_features = mic_vq_features(desc);
77 int feature_len = ioread8(&desc->feature_len); 77 int feature_len = ioread8(&desc->feature_len);
78 78
79 bits = min_t(unsigned, feature_len, 79 bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
80 sizeof(vdev->features)) * 8;
81 for (i = 0; i < bits; i++) 80 for (i = 0; i < bits; i++)
82 if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) 81 if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
83 features |= BIT(i); 82 features |= BIT(i);
@@ -85,7 +84,7 @@ static u32 mic_get_features(struct virtio_device *vdev)
85 return features; 84 return features;
86} 85}
87 86
88static void mic_finalize_features(struct virtio_device *vdev) 87static int mic_finalize_features(struct virtio_device *vdev)
89{ 88{
90 unsigned int i, bits; 89 unsigned int i, bits;
91 struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; 90 struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
@@ -97,14 +96,19 @@ static void mic_finalize_features(struct virtio_device *vdev)
97 /* Give virtio_ring a chance to accept features. */ 96 /* Give virtio_ring a chance to accept features. */
98 vring_transport_features(vdev); 97 vring_transport_features(vdev);
99 98
99 /* Make sure we don't have any features > 32 bits! */
100 BUG_ON((u32)vdev->features != vdev->features);
101
100 memset_io(out_features, 0, feature_len); 102 memset_io(out_features, 0, feature_len);
101 bits = min_t(unsigned, feature_len, 103 bits = min_t(unsigned, feature_len,
102 sizeof(vdev->features)) * 8; 104 sizeof(vdev->features)) * 8;
103 for (i = 0; i < bits; i++) { 105 for (i = 0; i < bits; i++) {
104 if (test_bit(i, vdev->features)) 106 if (__virtio_test_bit(vdev, i))
105 iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), 107 iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
106 &out_features[i / 8]); 108 &out_features[i / 8]);
107 } 109 }
110
111 return 0;
108} 112}
109 113
110/* 114/*
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index b3a812384a6f..c344483fa7d6 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -145,7 +145,7 @@ static struct regmap_config vexpress_syscfg_regmap_config = {
145static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, 145static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
146 void *context) 146 void *context)
147{ 147{
148 struct platform_device *pdev = to_platform_device(dev); 148 int err;
149 struct vexpress_syscfg *syscfg = context; 149 struct vexpress_syscfg *syscfg = context;
150 struct vexpress_syscfg_func *func; 150 struct vexpress_syscfg_func *func;
151 struct property *prop; 151 struct property *prop;
@@ -155,32 +155,18 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
155 u32 site, position, dcc; 155 u32 site, position, dcc;
156 int i; 156 int i;
157 157
158 if (dev->of_node) { 158 err = vexpress_config_get_topo(dev->of_node, &site,
159 int err = vexpress_config_get_topo(dev->of_node, &site,
160 &position, &dcc); 159 &position, &dcc);
160 if (err)
161 return ERR_PTR(err);
161 162
162 if (err) 163 prop = of_find_property(dev->of_node,
163 return ERR_PTR(err); 164 "arm,vexpress-sysreg,func", NULL);
164 165 if (!prop)
165 prop = of_find_property(dev->of_node, 166 return ERR_PTR(-EINVAL);
166 "arm,vexpress-sysreg,func", NULL); 167
167 if (!prop) 168 num = prop->length / sizeof(u32) / 2;
168 return ERR_PTR(-EINVAL); 169 val = prop->value;
169
170 num = prop->length / sizeof(u32) / 2;
171 val = prop->value;
172 } else {
173 if (pdev->num_resources != 1 ||
174 pdev->resource[0].flags != IORESOURCE_BUS)
175 return ERR_PTR(-EFAULT);
176
177 site = pdev->resource[0].start;
178 if (site == VEXPRESS_SITE_MASTER)
179 site = vexpress_config_get_master();
180 position = 0;
181 dcc = 0;
182 num = 1;
183 }
184 170
185 /* 171 /*
186 * "arm,vexpress-energy" function used to be described 172 * "arm,vexpress-energy" function used to be described
@@ -207,13 +193,8 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
207 for (i = 0; i < num; i++) { 193 for (i = 0; i < num; i++) {
208 u32 function, device; 194 u32 function, device;
209 195
210 if (dev->of_node) { 196 function = be32_to_cpup(val++);
211 function = be32_to_cpup(val++); 197 device = be32_to_cpup(val++);
212 device = be32_to_cpup(val++);
213 } else {
214 function = pdev->resource[0].end;
215 device = pdev->id;
216 }
217 198
218 dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", 199 dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
219 func, site, position, dcc, 200 func, site, position, dcc,
@@ -265,17 +246,6 @@ static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
265}; 246};
266 247
267 248
268/* Non-DT hack, to be gone... */
269static struct device *vexpress_syscfg_bridge;
270
271int vexpress_syscfg_device_register(struct platform_device *pdev)
272{
273 pdev->dev.parent = vexpress_syscfg_bridge;
274
275 return platform_device_register(pdev);
276}
277
278
279static int vexpress_syscfg_probe(struct platform_device *pdev) 249static int vexpress_syscfg_probe(struct platform_device *pdev)
280{ 250{
281 struct vexpress_syscfg *syscfg; 251 struct vexpress_syscfg *syscfg;
@@ -303,10 +273,6 @@ static int vexpress_syscfg_probe(struct platform_device *pdev)
303 if (IS_ERR(bridge)) 273 if (IS_ERR(bridge))
304 return PTR_ERR(bridge); 274 return PTR_ERR(bridge);
305 275
306 /* Non-DT case */
307 if (!pdev->dev.of_node)
308 vexpress_syscfg_bridge = bridge;
309
310 return 0; 276 return 0;
311} 277}
312 278
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1fa4c80ff886..4409d79ed650 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -78,13 +78,16 @@ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
78 78
79/* 79/*
80 * We've only got one major, so number of mmcblk devices is 80 * We've only got one major, so number of mmcblk devices is
81 * limited to 256 / number of minors per device. 81 * limited to (1 << 20) / number of minors per device. It is also
82 * currently limited by the size of the static bitmaps below.
82 */ 83 */
83static int max_devices; 84static int max_devices;
84 85
85/* 256 minors, so at most 256 separate devices */ 86#define MAX_DEVICES 256
86static DECLARE_BITMAP(dev_use, 256); 87
87static DECLARE_BITMAP(name_use, 256); 88/* TODO: Replace these with struct ida */
89static DECLARE_BITMAP(dev_use, MAX_DEVICES);
90static DECLARE_BITMAP(name_use, MAX_DEVICES);
88 91
89/* 92/*
90 * There is one mmc_blk_data per slot. 93 * There is one mmc_blk_data per slot.
@@ -112,7 +115,7 @@ struct mmc_blk_data {
112 115
113 /* 116 /*
114 * Only set in main mmc_blk_data associated 117 * Only set in main mmc_blk_data associated
115 * with mmc_card with mmc_set_drvdata, and keeps 118 * with mmc_card with dev_set_drvdata, and keeps
116 * track of the current selected device partition. 119 * track of the current selected device partition.
117 */ 120 */
118 unsigned int part_curr; 121 unsigned int part_curr;
@@ -260,7 +263,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
260 int ret; 263 int ret;
261 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 264 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
262 265
263 ret = snprintf(buf, PAGE_SIZE, "%d", 266 ret = snprintf(buf, PAGE_SIZE, "%d\n",
264 get_disk_ro(dev_to_disk(dev)) ^ 267 get_disk_ro(dev_to_disk(dev)) ^
265 md->read_only); 268 md->read_only);
266 mmc_blk_put(md); 269 mmc_blk_put(md);
@@ -642,7 +645,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
642 struct mmc_blk_data *md) 645 struct mmc_blk_data *md)
643{ 646{
644 int ret; 647 int ret;
645 struct mmc_blk_data *main_md = mmc_get_drvdata(card); 648 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
646 649
647 if (main_md->part_curr == md->part_type) 650 if (main_md->part_curr == md->part_type)
648 return 0; 651 return 0;
@@ -1004,7 +1007,8 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1004 err = mmc_hw_reset(host); 1007 err = mmc_hw_reset(host);
1005 /* Ensure we switch back to the correct partition */ 1008 /* Ensure we switch back to the correct partition */
1006 if (err != -EOPNOTSUPP) { 1009 if (err != -EOPNOTSUPP) {
1007 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); 1010 struct mmc_blk_data *main_md =
1011 dev_get_drvdata(&host->card->dev);
1008 int part_err; 1012 int part_err;
1009 1013
1010 main_md->part_curr = main_md->part_type; 1014 main_md->part_curr = main_md->part_type;
@@ -1308,19 +1312,11 @@ static int mmc_blk_packed_err_check(struct mmc_card *card,
1308 } 1312 }
1309 1313
1310 if (status & R1_EXCEPTION_EVENT) { 1314 if (status & R1_EXCEPTION_EVENT) {
1311 ext_csd = kzalloc(512, GFP_KERNEL); 1315 err = mmc_get_ext_csd(card, &ext_csd);
1312 if (!ext_csd) {
1313 pr_err("%s: unable to allocate buffer for ext_csd\n",
1314 req->rq_disk->disk_name);
1315 return -ENOMEM;
1316 }
1317
1318 err = mmc_send_ext_csd(card, ext_csd);
1319 if (err) { 1316 if (err) {
1320 pr_err("%s: error %d sending ext_csd\n", 1317 pr_err("%s: error %d sending ext_csd\n",
1321 req->rq_disk->disk_name, err); 1318 req->rq_disk->disk_name, err);
1322 check = MMC_BLK_ABORT; 1319 return MMC_BLK_ABORT;
1323 goto free;
1324 } 1320 }
1325 1321
1326 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & 1322 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
@@ -1338,7 +1334,6 @@ static int mmc_blk_packed_err_check(struct mmc_card *card,
1338 req->rq_disk->disk_name, packed->nr_entries, 1334 req->rq_disk->disk_name, packed->nr_entries,
1339 packed->blocks, packed->idx_failure); 1335 packed->blocks, packed->idx_failure);
1340 } 1336 }
1341free:
1342 kfree(ext_csd); 1337 kfree(ext_csd);
1343 } 1338 }
1344 1339
@@ -2093,7 +2088,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2093 2088
2094 /* 2089 /*
2095 * !subname implies we are creating main mmc_blk_data that will be 2090 * !subname implies we are creating main mmc_blk_data that will be
2096 * associated with mmc_card with mmc_set_drvdata. Due to device 2091 * associated with mmc_card with dev_set_drvdata. Due to device
2097 * partitions, devidx will not coincide with a per-physical card 2092 * partitions, devidx will not coincide with a per-physical card
2098 * index anymore so we keep track of a name index. 2093 * index anymore so we keep track of a name index.
2099 */ 2094 */
@@ -2425,8 +2420,9 @@ static const struct mmc_fixup blk_fixups[] =
2425 END_FIXUP 2420 END_FIXUP
2426}; 2421};
2427 2422
2428static int mmc_blk_probe(struct mmc_card *card) 2423static int mmc_blk_probe(struct device *dev)
2429{ 2424{
2425 struct mmc_card *card = mmc_dev_to_card(dev);
2430 struct mmc_blk_data *md, *part_md; 2426 struct mmc_blk_data *md, *part_md;
2431 char cap_str[10]; 2427 char cap_str[10];
2432 2428
@@ -2451,7 +2447,7 @@ static int mmc_blk_probe(struct mmc_card *card)
2451 if (mmc_blk_alloc_parts(card, md)) 2447 if (mmc_blk_alloc_parts(card, md))
2452 goto out; 2448 goto out;
2453 2449
2454 mmc_set_drvdata(card, md); 2450 dev_set_drvdata(dev, md);
2455 2451
2456 if (mmc_add_disk(md)) 2452 if (mmc_add_disk(md))
2457 goto out; 2453 goto out;
@@ -2481,9 +2477,10 @@ static int mmc_blk_probe(struct mmc_card *card)
2481 return 0; 2477 return 0;
2482} 2478}
2483 2479
2484static void mmc_blk_remove(struct mmc_card *card) 2480static int mmc_blk_remove(struct device *dev)
2485{ 2481{
2486 struct mmc_blk_data *md = mmc_get_drvdata(card); 2482 struct mmc_card *card = mmc_dev_to_card(dev);
2483 struct mmc_blk_data *md = dev_get_drvdata(dev);
2487 2484
2488 mmc_blk_remove_parts(card, md); 2485 mmc_blk_remove_parts(card, md);
2489 pm_runtime_get_sync(&card->dev); 2486 pm_runtime_get_sync(&card->dev);
@@ -2494,13 +2491,15 @@ static void mmc_blk_remove(struct mmc_card *card)
2494 pm_runtime_disable(&card->dev); 2491 pm_runtime_disable(&card->dev);
2495 pm_runtime_put_noidle(&card->dev); 2492 pm_runtime_put_noidle(&card->dev);
2496 mmc_blk_remove_req(md); 2493 mmc_blk_remove_req(md);
2497 mmc_set_drvdata(card, NULL); 2494 dev_set_drvdata(dev, NULL);
2495
2496 return 0;
2498} 2497}
2499 2498
2500static int _mmc_blk_suspend(struct mmc_card *card) 2499static int _mmc_blk_suspend(struct device *dev)
2501{ 2500{
2502 struct mmc_blk_data *part_md; 2501 struct mmc_blk_data *part_md;
2503 struct mmc_blk_data *md = mmc_get_drvdata(card); 2502 struct mmc_blk_data *md = dev_get_drvdata(dev);
2504 2503
2505 if (md) { 2504 if (md) {
2506 mmc_queue_suspend(&md->queue); 2505 mmc_queue_suspend(&md->queue);
@@ -2511,21 +2510,21 @@ static int _mmc_blk_suspend(struct mmc_card *card)
2511 return 0; 2510 return 0;
2512} 2511}
2513 2512
2514static void mmc_blk_shutdown(struct mmc_card *card) 2513static void mmc_blk_shutdown(struct device *dev)
2515{ 2514{
2516 _mmc_blk_suspend(card); 2515 _mmc_blk_suspend(dev);
2517} 2516}
2518 2517
2519#ifdef CONFIG_PM 2518#ifdef CONFIG_PM_SLEEP
2520static int mmc_blk_suspend(struct mmc_card *card) 2519static int mmc_blk_suspend(struct device *dev)
2521{ 2520{
2522 return _mmc_blk_suspend(card); 2521 return _mmc_blk_suspend(dev);
2523} 2522}
2524 2523
2525static int mmc_blk_resume(struct mmc_card *card) 2524static int mmc_blk_resume(struct device *dev)
2526{ 2525{
2527 struct mmc_blk_data *part_md; 2526 struct mmc_blk_data *part_md;
2528 struct mmc_blk_data *md = mmc_get_drvdata(card); 2527 struct mmc_blk_data *md = dev_get_drvdata(dev);
2529 2528
2530 if (md) { 2529 if (md) {
2531 /* 2530 /*
@@ -2540,19 +2539,15 @@ static int mmc_blk_resume(struct mmc_card *card)
2540 } 2539 }
2541 return 0; 2540 return 0;
2542} 2541}
2543#else
2544#define mmc_blk_suspend NULL
2545#define mmc_blk_resume NULL
2546#endif 2542#endif
2547 2543
2548static struct mmc_driver mmc_driver = { 2544static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2549 .drv = { 2545
2550 .name = "mmcblk", 2546static struct device_driver mmc_driver = {
2551 }, 2547 .name = "mmcblk",
2548 .pm = &mmc_blk_pm_ops,
2552 .probe = mmc_blk_probe, 2549 .probe = mmc_blk_probe,
2553 .remove = mmc_blk_remove, 2550 .remove = mmc_blk_remove,
2554 .suspend = mmc_blk_suspend,
2555 .resume = mmc_blk_resume,
2556 .shutdown = mmc_blk_shutdown, 2551 .shutdown = mmc_blk_shutdown,
2557}; 2552};
2558 2553
@@ -2563,7 +2558,7 @@ static int __init mmc_blk_init(void)
2563 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 2558 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2564 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 2559 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2565 2560
2566 max_devices = 256 / perdev_minors; 2561 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2567 2562
2568 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2563 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2569 if (res) 2564 if (res)
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 0c0fc52d42c5..0a7430f94d29 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -14,6 +14,7 @@
14#include <linux/mmc/host.h> 14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h> 15#include <linux/mmc/mmc.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/device.h>
17 18
18#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
19#include <linux/swap.h> /* For nr_free_buffer_pages() */ 20#include <linux/swap.h> /* For nr_free_buffer_pages() */
@@ -32,6 +33,8 @@
32#define BUFFER_ORDER 2 33#define BUFFER_ORDER 2
33#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 34#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
34 35
36#define TEST_ALIGN_END 8
37
35/* 38/*
36 * Limit the test area size to the maximum MMC HC erase group size. Note that 39 * Limit the test area size to the maximum MMC HC erase group size. Note that
37 * the maximum SD allocation unit size is just 4MiB. 40 * the maximum SD allocation unit size is just 4MiB.
@@ -1174,7 +1177,7 @@ static int mmc_test_align_write(struct mmc_test_card *test)
1174 int ret, i; 1177 int ret, i;
1175 struct scatterlist sg; 1178 struct scatterlist sg;
1176 1179
1177 for (i = 1;i < 4;i++) { 1180 for (i = 1; i < TEST_ALIGN_END; i++) {
1178 sg_init_one(&sg, test->buffer + i, 512); 1181 sg_init_one(&sg, test->buffer + i, 512);
1179 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1182 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1180 if (ret) 1183 if (ret)
@@ -1189,7 +1192,7 @@ static int mmc_test_align_read(struct mmc_test_card *test)
1189 int ret, i; 1192 int ret, i;
1190 struct scatterlist sg; 1193 struct scatterlist sg;
1191 1194
1192 for (i = 1;i < 4;i++) { 1195 for (i = 1; i < TEST_ALIGN_END; i++) {
1193 sg_init_one(&sg, test->buffer + i, 512); 1196 sg_init_one(&sg, test->buffer + i, 512);
1194 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1197 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1195 if (ret) 1198 if (ret)
@@ -1216,7 +1219,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
1216 if (size < 1024) 1219 if (size < 1024)
1217 return RESULT_UNSUP_HOST; 1220 return RESULT_UNSUP_HOST;
1218 1221
1219 for (i = 1;i < 4;i++) { 1222 for (i = 1; i < TEST_ALIGN_END; i++) {
1220 sg_init_one(&sg, test->buffer + i, size); 1223 sg_init_one(&sg, test->buffer + i, size);
1221 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 1224 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1222 if (ret) 1225 if (ret)
@@ -1243,7 +1246,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
1243 if (size < 1024) 1246 if (size < 1024)
1244 return RESULT_UNSUP_HOST; 1247 return RESULT_UNSUP_HOST;
1245 1248
1246 for (i = 1;i < 4;i++) { 1249 for (i = 1; i < TEST_ALIGN_END; i++) {
1247 sg_init_one(&sg, test->buffer + i, size); 1250 sg_init_one(&sg, test->buffer + i, size);
1248 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 1251 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1249 if (ret) 1252 if (ret)
@@ -2997,8 +3000,9 @@ err:
2997 return ret; 3000 return ret;
2998} 3001}
2999 3002
3000static int mmc_test_probe(struct mmc_card *card) 3003static int mmc_test_probe(struct device *dev)
3001{ 3004{
3005 struct mmc_card *card = mmc_dev_to_card(dev);
3002 int ret; 3006 int ret;
3003 3007
3004 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3008 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
@@ -3013,20 +3017,22 @@ static int mmc_test_probe(struct mmc_card *card)
3013 return 0; 3017 return 0;
3014} 3018}
3015 3019
3016static void mmc_test_remove(struct mmc_card *card) 3020static int mmc_test_remove(struct device *dev)
3017{ 3021{
3022 struct mmc_card *card = mmc_dev_to_card(dev);
3023
3018 mmc_test_free_result(card); 3024 mmc_test_free_result(card);
3019 mmc_test_free_dbgfs_file(card); 3025 mmc_test_free_dbgfs_file(card);
3026
3027 return 0;
3020} 3028}
3021 3029
3022static void mmc_test_shutdown(struct mmc_card *card) 3030static void mmc_test_shutdown(struct device *dev)
3023{ 3031{
3024} 3032}
3025 3033
3026static struct mmc_driver mmc_driver = { 3034static struct device_driver mmc_driver = {
3027 .drv = { 3035 .name = "mmc_test",
3028 .name = "mmc_test",
3029 },
3030 .probe = mmc_test_probe, 3036 .probe = mmc_test_probe,
3031 .remove = mmc_test_remove, 3037 .remove = mmc_test_remove,
3032 .shutdown = mmc_test_shutdown, 3038 .shutdown = mmc_test_shutdown,
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index cfa6110632c3..236d194c2883 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -232,13 +232,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
232 if (!mqrq_cur->bounce_buf) { 232 if (!mqrq_cur->bounce_buf) {
233 pr_warn("%s: unable to allocate bounce cur buffer\n", 233 pr_warn("%s: unable to allocate bounce cur buffer\n",
234 mmc_card_name(card)); 234 mmc_card_name(card));
235 } 235 } else {
236 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 236 mqrq_prev->bounce_buf =
237 if (!mqrq_prev->bounce_buf) { 237 kmalloc(bouncesz, GFP_KERNEL);
238 pr_warn("%s: unable to allocate bounce prev buffer\n", 238 if (!mqrq_prev->bounce_buf) {
239 mmc_card_name(card)); 239 pr_warn("%s: unable to allocate bounce prev buffer\n",
240 kfree(mqrq_cur->bounce_buf); 240 mmc_card_name(card));
241 mqrq_cur->bounce_buf = NULL; 241 kfree(mqrq_cur->bounce_buf);
242 mqrq_cur->bounce_buf = NULL;
243 }
242 } 244 }
243 } 245 }
244 246
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 8a1f1240e058..86d271148528 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -25,8 +25,6 @@
25#include "sdio_cis.h" 25#include "sdio_cis.h"
26#include "bus.h" 26#include "bus.h"
27 27
28#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
29
30static ssize_t type_show(struct device *dev, 28static ssize_t type_show(struct device *dev,
31 struct device_attribute *attr, char *buf) 29 struct device_attribute *attr, char *buf)
32{ 30{
@@ -106,33 +104,14 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
106 return retval; 104 return retval;
107} 105}
108 106
109static int mmc_bus_probe(struct device *dev)
110{
111 struct mmc_driver *drv = to_mmc_driver(dev->driver);
112 struct mmc_card *card = mmc_dev_to_card(dev);
113
114 return drv->probe(card);
115}
116
117static int mmc_bus_remove(struct device *dev)
118{
119 struct mmc_driver *drv = to_mmc_driver(dev->driver);
120 struct mmc_card *card = mmc_dev_to_card(dev);
121
122 drv->remove(card);
123
124 return 0;
125}
126
127static void mmc_bus_shutdown(struct device *dev) 107static void mmc_bus_shutdown(struct device *dev)
128{ 108{
129 struct mmc_driver *drv = to_mmc_driver(dev->driver);
130 struct mmc_card *card = mmc_dev_to_card(dev); 109 struct mmc_card *card = mmc_dev_to_card(dev);
131 struct mmc_host *host = card->host; 110 struct mmc_host *host = card->host;
132 int ret; 111 int ret;
133 112
134 if (dev->driver && drv->shutdown) 113 if (dev->driver && dev->driver->shutdown)
135 drv->shutdown(card); 114 dev->driver->shutdown(dev);
136 115
137 if (host->bus_ops->shutdown) { 116 if (host->bus_ops->shutdown) {
138 ret = host->bus_ops->shutdown(host); 117 ret = host->bus_ops->shutdown(host);
@@ -145,16 +124,13 @@ static void mmc_bus_shutdown(struct device *dev)
145#ifdef CONFIG_PM_SLEEP 124#ifdef CONFIG_PM_SLEEP
146static int mmc_bus_suspend(struct device *dev) 125static int mmc_bus_suspend(struct device *dev)
147{ 126{
148 struct mmc_driver *drv = to_mmc_driver(dev->driver);
149 struct mmc_card *card = mmc_dev_to_card(dev); 127 struct mmc_card *card = mmc_dev_to_card(dev);
150 struct mmc_host *host = card->host; 128 struct mmc_host *host = card->host;
151 int ret; 129 int ret;
152 130
153 if (dev->driver && drv->suspend) { 131 ret = pm_generic_suspend(dev);
154 ret = drv->suspend(card); 132 if (ret)
155 if (ret) 133 return ret;
156 return ret;
157 }
158 134
159 ret = host->bus_ops->suspend(host); 135 ret = host->bus_ops->suspend(host);
160 return ret; 136 return ret;
@@ -162,7 +138,6 @@ static int mmc_bus_suspend(struct device *dev)
162 138
163static int mmc_bus_resume(struct device *dev) 139static int mmc_bus_resume(struct device *dev)
164{ 140{
165 struct mmc_driver *drv = to_mmc_driver(dev->driver);
166 struct mmc_card *card = mmc_dev_to_card(dev); 141 struct mmc_card *card = mmc_dev_to_card(dev);
167 struct mmc_host *host = card->host; 142 struct mmc_host *host = card->host;
168 int ret; 143 int ret;
@@ -172,14 +147,12 @@ static int mmc_bus_resume(struct device *dev)
172 pr_warn("%s: error %d during resume (card was removed?)\n", 147 pr_warn("%s: error %d during resume (card was removed?)\n",
173 mmc_hostname(host), ret); 148 mmc_hostname(host), ret);
174 149
175 if (dev->driver && drv->resume) 150 ret = pm_generic_resume(dev);
176 ret = drv->resume(card);
177
178 return ret; 151 return ret;
179} 152}
180#endif 153#endif
181 154
182#ifdef CONFIG_PM_RUNTIME 155#ifdef CONFIG_PM
183static int mmc_runtime_suspend(struct device *dev) 156static int mmc_runtime_suspend(struct device *dev)
184{ 157{
185 struct mmc_card *card = mmc_dev_to_card(dev); 158 struct mmc_card *card = mmc_dev_to_card(dev);
@@ -195,7 +168,7 @@ static int mmc_runtime_resume(struct device *dev)
195 168
196 return host->bus_ops->runtime_resume(host); 169 return host->bus_ops->runtime_resume(host);
197} 170}
198#endif /* !CONFIG_PM_RUNTIME */ 171#endif /* !CONFIG_PM */
199 172
200static const struct dev_pm_ops mmc_bus_pm_ops = { 173static const struct dev_pm_ops mmc_bus_pm_ops = {
201 SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL) 174 SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL)
@@ -207,8 +180,6 @@ static struct bus_type mmc_bus_type = {
207 .dev_groups = mmc_dev_groups, 180 .dev_groups = mmc_dev_groups,
208 .match = mmc_bus_match, 181 .match = mmc_bus_match,
209 .uevent = mmc_bus_uevent, 182 .uevent = mmc_bus_uevent,
210 .probe = mmc_bus_probe,
211 .remove = mmc_bus_remove,
212 .shutdown = mmc_bus_shutdown, 183 .shutdown = mmc_bus_shutdown,
213 .pm = &mmc_bus_pm_ops, 184 .pm = &mmc_bus_pm_ops,
214}; 185};
@@ -227,24 +198,22 @@ void mmc_unregister_bus(void)
227 * mmc_register_driver - register a media driver 198 * mmc_register_driver - register a media driver
228 * @drv: MMC media driver 199 * @drv: MMC media driver
229 */ 200 */
230int mmc_register_driver(struct mmc_driver *drv) 201int mmc_register_driver(struct device_driver *drv)
231{ 202{
232 drv->drv.bus = &mmc_bus_type; 203 drv->bus = &mmc_bus_type;
233 return driver_register(&drv->drv); 204 return driver_register(drv);
234} 205}
235
236EXPORT_SYMBOL(mmc_register_driver); 206EXPORT_SYMBOL(mmc_register_driver);
237 207
238/** 208/**
239 * mmc_unregister_driver - unregister a media driver 209 * mmc_unregister_driver - unregister a media driver
240 * @drv: MMC media driver 210 * @drv: MMC media driver
241 */ 211 */
242void mmc_unregister_driver(struct mmc_driver *drv) 212void mmc_unregister_driver(struct device_driver *drv)
243{ 213{
244 drv->drv.bus = &mmc_bus_type; 214 drv->bus = &mmc_bus_type;
245 driver_unregister(&drv->drv); 215 driver_unregister(drv);
246} 216}
247
248EXPORT_SYMBOL(mmc_unregister_driver); 217EXPORT_SYMBOL(mmc_unregister_driver);
249 218
250static void mmc_release_card(struct device *dev) 219static void mmc_release_card(struct device *dev)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f26a5f1d926d..9584bffa8b22 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -149,6 +149,14 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
149 149
150 led_trigger_event(host->led, LED_OFF); 150 led_trigger_event(host->led, LED_OFF);
151 151
152 if (mrq->sbc) {
153 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
154 mmc_hostname(host), mrq->sbc->opcode,
155 mrq->sbc->error,
156 mrq->sbc->resp[0], mrq->sbc->resp[1],
157 mrq->sbc->resp[2], mrq->sbc->resp[3]);
158 }
159
152 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", 160 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
153 mmc_hostname(host), cmd->opcode, err, 161 mmc_hostname(host), cmd->opcode, err,
154 cmd->resp[0], cmd->resp[1], 162 cmd->resp[0], cmd->resp[1],
@@ -214,6 +222,10 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
214 222
215 mrq->cmd->error = 0; 223 mrq->cmd->error = 0;
216 mrq->cmd->mrq = mrq; 224 mrq->cmd->mrq = mrq;
225 if (mrq->sbc) {
226 mrq->sbc->error = 0;
227 mrq->sbc->mrq = mrq;
228 }
217 if (mrq->data) { 229 if (mrq->data) {
218 BUG_ON(mrq->data->blksz > host->max_blk_size); 230 BUG_ON(mrq->data->blksz > host->max_blk_size);
219 BUG_ON(mrq->data->blocks > host->max_blk_count); 231 BUG_ON(mrq->data->blocks > host->max_blk_count);
@@ -538,8 +550,18 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
538 if (host->card && mmc_card_mmc(host->card) && 550 if (host->card && mmc_card_mmc(host->card) &&
539 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || 551 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
540 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && 552 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
541 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) 553 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
554
555 /* Cancel the prepared request */
556 if (areq)
557 mmc_post_req(host, areq->mrq, -EINVAL);
558
542 mmc_start_bkops(host->card, true); 559 mmc_start_bkops(host->card, true);
560
561 /* prepare the request again */
562 if (areq)
563 mmc_pre_req(host, areq->mrq, !host->areq);
564 }
543 } 565 }
544 566
545 if (!err && areq) 567 if (!err && areq)
@@ -709,27 +731,16 @@ int mmc_read_bkops_status(struct mmc_card *card)
709 int err; 731 int err;
710 u8 *ext_csd; 732 u8 *ext_csd;
711 733
712 /*
713 * In future work, we should consider storing the entire ext_csd.
714 */
715 ext_csd = kmalloc(512, GFP_KERNEL);
716 if (!ext_csd) {
717 pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
718 mmc_hostname(card->host));
719 return -ENOMEM;
720 }
721
722 mmc_claim_host(card->host); 734 mmc_claim_host(card->host);
723 err = mmc_send_ext_csd(card, ext_csd); 735 err = mmc_get_ext_csd(card, &ext_csd);
724 mmc_release_host(card->host); 736 mmc_release_host(card->host);
725 if (err) 737 if (err)
726 goto out; 738 return err;
727 739
728 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; 740 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
729 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; 741 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
730out:
731 kfree(ext_csd); 742 kfree(ext_csd);
732 return err; 743 return 0;
733} 744}
734EXPORT_SYMBOL(mmc_read_bkops_status); 745EXPORT_SYMBOL(mmc_read_bkops_status);
735 746
@@ -1088,6 +1099,22 @@ void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1088 mmc_host_clk_release(host); 1099 mmc_host_clk_release(host);
1089} 1100}
1090 1101
1102/*
1103 * Set initial state after a power cycle or a hw_reset.
1104 */
1105void mmc_set_initial_state(struct mmc_host *host)
1106{
1107 if (mmc_host_is_spi(host))
1108 host->ios.chip_select = MMC_CS_HIGH;
1109 else
1110 host->ios.chip_select = MMC_CS_DONTCARE;
1111 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1112 host->ios.bus_width = MMC_BUS_WIDTH_1;
1113 host->ios.timing = MMC_TIMING_LEGACY;
1114
1115 mmc_set_ios(host);
1116}
1117
1091/** 1118/**
1092 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number 1119 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1093 * @vdd: voltage (mV) 1120 * @vdd: voltage (mV)
@@ -1420,18 +1447,20 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1420 pr_warn("%s: cannot verify signal voltage switch\n", 1447 pr_warn("%s: cannot verify signal voltage switch\n",
1421 mmc_hostname(host)); 1448 mmc_hostname(host));
1422 1449
1450 mmc_host_clk_hold(host);
1451
1423 cmd.opcode = SD_SWITCH_VOLTAGE; 1452 cmd.opcode = SD_SWITCH_VOLTAGE;
1424 cmd.arg = 0; 1453 cmd.arg = 0;
1425 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1454 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1426 1455
1427 err = mmc_wait_for_cmd(host, &cmd, 0); 1456 err = mmc_wait_for_cmd(host, &cmd, 0);
1428 if (err) 1457 if (err)
1429 return err; 1458 goto err_command;
1430 1459
1431 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) 1460 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1432 return -EIO; 1461 err = -EIO;
1433 1462 goto err_command;
1434 mmc_host_clk_hold(host); 1463 }
1435 /* 1464 /*
1436 * The card should drive cmd and dat[0:3] low immediately 1465 * The card should drive cmd and dat[0:3] low immediately
1437 * after the response of cmd11, but wait 1 ms to be sure 1466 * after the response of cmd11, but wait 1 ms to be sure
@@ -1480,6 +1509,7 @@ power_cycle:
1480 mmc_power_cycle(host, ocr); 1509 mmc_power_cycle(host, ocr);
1481 } 1510 }
1482 1511
1512err_command:
1483 mmc_host_clk_release(host); 1513 mmc_host_clk_release(host);
1484 1514
1485 return err; 1515 return err;
@@ -1526,15 +1556,9 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
1526 mmc_host_clk_hold(host); 1556 mmc_host_clk_hold(host);
1527 1557
1528 host->ios.vdd = fls(ocr) - 1; 1558 host->ios.vdd = fls(ocr) - 1;
1529 if (mmc_host_is_spi(host))
1530 host->ios.chip_select = MMC_CS_HIGH;
1531 else
1532 host->ios.chip_select = MMC_CS_DONTCARE;
1533 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1534 host->ios.power_mode = MMC_POWER_UP; 1559 host->ios.power_mode = MMC_POWER_UP;
1535 host->ios.bus_width = MMC_BUS_WIDTH_1; 1560 /* Set initial state and call mmc_set_ios */
1536 host->ios.timing = MMC_TIMING_LEGACY; 1561 mmc_set_initial_state(host);
1537 mmc_set_ios(host);
1538 1562
1539 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ 1563 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1540 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) 1564 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
@@ -1574,14 +1598,9 @@ void mmc_power_off(struct mmc_host *host)
1574 host->ios.clock = 0; 1598 host->ios.clock = 0;
1575 host->ios.vdd = 0; 1599 host->ios.vdd = 0;
1576 1600
1577 if (!mmc_host_is_spi(host)) {
1578 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1579 host->ios.chip_select = MMC_CS_DONTCARE;
1580 }
1581 host->ios.power_mode = MMC_POWER_OFF; 1601 host->ios.power_mode = MMC_POWER_OFF;
1582 host->ios.bus_width = MMC_BUS_WIDTH_1; 1602 /* Set initial state and call mmc_set_ios */
1583 host->ios.timing = MMC_TIMING_LEGACY; 1603 mmc_set_initial_state(host);
1584 mmc_set_ios(host);
1585 1604
1586 /* 1605 /*
1587 * Some configurations, such as the 802.11 SDIO card in the OLPC 1606 * Some configurations, such as the 802.11 SDIO card in the OLPC
@@ -2259,30 +2278,16 @@ static int mmc_do_hw_reset(struct mmc_host *host, int check)
2259 2278
2260 /* If the reset has happened, then a status command will fail */ 2279 /* If the reset has happened, then a status command will fail */
2261 if (check) { 2280 if (check) {
2262 struct mmc_command cmd = {0}; 2281 u32 status;
2263 int err;
2264 2282
2265 cmd.opcode = MMC_SEND_STATUS; 2283 if (!mmc_send_status(card, &status)) {
2266 if (!mmc_host_is_spi(card->host))
2267 cmd.arg = card->rca << 16;
2268 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2269 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2270 if (!err) {
2271 mmc_host_clk_release(host); 2284 mmc_host_clk_release(host);
2272 return -ENOSYS; 2285 return -ENOSYS;
2273 } 2286 }
2274 } 2287 }
2275 2288
2276 if (mmc_host_is_spi(host)) { 2289 /* Set initial state and call mmc_set_ios */
2277 host->ios.chip_select = MMC_CS_HIGH; 2290 mmc_set_initial_state(host);
2278 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2279 } else {
2280 host->ios.chip_select = MMC_CS_DONTCARE;
2281 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2282 }
2283 host->ios.bus_width = MMC_BUS_WIDTH_1;
2284 host->ios.timing = MMC_TIMING_LEGACY;
2285 mmc_set_ios(host);
2286 2291
2287 mmc_host_clk_release(host); 2292 mmc_host_clk_release(host);
2288 2293
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 443a584660f0..d76597c65e3a 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
49void mmc_power_up(struct mmc_host *host, u32 ocr); 49void mmc_power_up(struct mmc_host *host, u32 ocr);
50void mmc_power_off(struct mmc_host *host); 50void mmc_power_off(struct mmc_host *host);
51void mmc_power_cycle(struct mmc_host *host, u32 ocr); 51void mmc_power_cycle(struct mmc_host *host, u32 ocr);
52void mmc_set_initial_state(struct mmc_host *host);
52 53
53static inline void mmc_delay(unsigned int ms) 54static inline void mmc_delay(unsigned int ms)
54{ 55{
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 91eb16223246..e9142108a6c6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -291,14 +291,8 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
291 if (!buf) 291 if (!buf)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 ext_csd = kmalloc(512, GFP_KERNEL);
295 if (!ext_csd) {
296 err = -ENOMEM;
297 goto out_free;
298 }
299
300 mmc_get_card(card); 294 mmc_get_card(card);
301 err = mmc_send_ext_csd(card, ext_csd); 295 err = mmc_get_ext_csd(card, &ext_csd);
302 mmc_put_card(card); 296 mmc_put_card(card);
303 if (err) 297 if (err)
304 goto out_free; 298 goto out_free;
@@ -314,7 +308,6 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
314 308
315out_free: 309out_free:
316 kfree(buf); 310 kfree(buf);
317 kfree(ext_csd);
318 return err; 311 return err;
319} 312}
320 313
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a301a78a2bd1..02ad79229f65 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -177,65 +177,6 @@ static int mmc_decode_csd(struct mmc_card *card)
177 return 0; 177 return 0;
178} 178}
179 179
180/*
181 * Read extended CSD.
182 */
183static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
184{
185 int err;
186 u8 *ext_csd;
187
188 BUG_ON(!card);
189 BUG_ON(!new_ext_csd);
190
191 *new_ext_csd = NULL;
192
193 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
194 return 0;
195
196 /*
197 * As the ext_csd is so large and mostly unused, we don't store the
198 * raw block in mmc_card.
199 */
200 ext_csd = kmalloc(512, GFP_KERNEL);
201 if (!ext_csd) {
202 pr_err("%s: could not allocate a buffer to "
203 "receive the ext_csd.\n", mmc_hostname(card->host));
204 return -ENOMEM;
205 }
206
207 err = mmc_send_ext_csd(card, ext_csd);
208 if (err) {
209 kfree(ext_csd);
210 *new_ext_csd = NULL;
211
212 /* If the host or the card can't do the switch,
213 * fail more gracefully. */
214 if ((err != -EINVAL)
215 && (err != -ENOSYS)
216 && (err != -EFAULT))
217 return err;
218
219 /*
220 * High capacity cards should have this "magic" size
221 * stored in their CSD.
222 */
223 if (card->csd.capacity == (4096 * 512)) {
224 pr_err("%s: unable to read EXT_CSD "
225 "on a possible high capacity card. "
226 "Card will be ignored.\n",
227 mmc_hostname(card->host));
228 } else {
229 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
230 mmc_hostname(card->host));
231 err = 0;
232 }
233 } else
234 *new_ext_csd = ext_csd;
235
236 return err;
237}
238
239static void mmc_select_card_type(struct mmc_card *card) 180static void mmc_select_card_type(struct mmc_card *card)
240{ 181{
241 struct mmc_host *host = card->host; 182 struct mmc_host *host = card->host;
@@ -391,16 +332,11 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
391/* 332/*
392 * Decode extended CSD. 333 * Decode extended CSD.
393 */ 334 */
394static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) 335static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
395{ 336{
396 int err = 0, idx; 337 int err = 0, idx;
397 unsigned int part_size; 338 unsigned int part_size;
398 339
399 BUG_ON(!card);
400
401 if (!ext_csd)
402 return 0;
403
404 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 340 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
405 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; 341 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
406 if (card->csd.structure == 3) { 342 if (card->csd.structure == 3) {
@@ -628,16 +564,56 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
628 card->ext_csd.data_sector_size = 512; 564 card->ext_csd.data_sector_size = 512;
629 } 565 }
630 566
567 /* eMMC v5 or later */
568 if (card->ext_csd.rev >= 7) {
569 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
570 MMC_FIRMWARE_LEN);
571 card->ext_csd.ffu_capable =
572 (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
573 !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
574 }
631out: 575out:
632 return err; 576 return err;
633} 577}
634 578
635static inline void mmc_free_ext_csd(u8 *ext_csd) 579static int mmc_read_ext_csd(struct mmc_card *card)
636{ 580{
581 u8 *ext_csd;
582 int err;
583
584 if (!mmc_can_ext_csd(card))
585 return 0;
586
587 err = mmc_get_ext_csd(card, &ext_csd);
588 if (err) {
589 /* If the host or the card can't do the switch,
590 * fail more gracefully. */
591 if ((err != -EINVAL)
592 && (err != -ENOSYS)
593 && (err != -EFAULT))
594 return err;
595
596 /*
597 * High capacity cards should have this "magic" size
598 * stored in their CSD.
599 */
600 if (card->csd.capacity == (4096 * 512)) {
601 pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
602 mmc_hostname(card->host));
603 } else {
604 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
605 mmc_hostname(card->host));
606 err = 0;
607 }
608
609 return err;
610 }
611
612 err = mmc_decode_ext_csd(card, ext_csd);
637 kfree(ext_csd); 613 kfree(ext_csd);
614 return err;
638} 615}
639 616
640
641static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) 617static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
642{ 618{
643 u8 *bw_ext_csd; 619 u8 *bw_ext_csd;
@@ -647,11 +623,8 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
647 return 0; 623 return 0;
648 624
649 err = mmc_get_ext_csd(card, &bw_ext_csd); 625 err = mmc_get_ext_csd(card, &bw_ext_csd);
650 626 if (err)
651 if (err || bw_ext_csd == NULL) { 627 return err;
652 err = -EINVAL;
653 goto out;
654 }
655 628
656 /* only compare read only fields */ 629 /* only compare read only fields */
657 err = !((card->ext_csd.raw_partition_support == 630 err = !((card->ext_csd.raw_partition_support ==
@@ -710,8 +683,7 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
710 if (err) 683 if (err)
711 err = -EINVAL; 684 err = -EINVAL;
712 685
713out: 686 kfree(bw_ext_csd);
714 mmc_free_ext_csd(bw_ext_csd);
715 return err; 687 return err;
716} 688}
717 689
@@ -722,7 +694,7 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
722MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 694MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
723MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); 695MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
724MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); 696MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
725MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 697MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
726MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 698MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
727MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); 699MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
728MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); 700MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
@@ -735,6 +707,22 @@ MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
735MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); 707MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
736MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); 708MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
737 709
710static ssize_t mmc_fwrev_show(struct device *dev,
711 struct device_attribute *attr,
712 char *buf)
713{
714 struct mmc_card *card = mmc_dev_to_card(dev);
715
716 if (card->ext_csd.rev < 7) {
717 return sprintf(buf, "0x%x\n", card->cid.fwrev);
718 } else {
719 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
720 card->ext_csd.fwrev);
721 }
722}
723
724static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
725
738static struct attribute *mmc_std_attrs[] = { 726static struct attribute *mmc_std_attrs[] = {
739 &dev_attr_cid.attr, 727 &dev_attr_cid.attr,
740 &dev_attr_csd.attr, 728 &dev_attr_csd.attr,
@@ -742,6 +730,7 @@ static struct attribute *mmc_std_attrs[] = {
742 &dev_attr_erase_size.attr, 730 &dev_attr_erase_size.attr,
743 &dev_attr_preferred_erase_size.attr, 731 &dev_attr_preferred_erase_size.attr,
744 &dev_attr_fwrev.attr, 732 &dev_attr_fwrev.attr,
733 &dev_attr_ffu_capable.attr,
745 &dev_attr_hwrev.attr, 734 &dev_attr_hwrev.attr,
746 &dev_attr_manfid.attr, 735 &dev_attr_manfid.attr,
747 &dev_attr_name.attr, 736 &dev_attr_name.attr,
@@ -774,14 +763,6 @@ static int __mmc_select_powerclass(struct mmc_card *card,
774 unsigned int pwrclass_val = 0; 763 unsigned int pwrclass_val = 0;
775 int err = 0; 764 int err = 0;
776 765
777 /* Power class selection is supported for versions >= 4.0 */
778 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
779 return 0;
780
781 /* Power class values are defined only for 4/8 bit bus */
782 if (bus_width == EXT_CSD_BUS_WIDTH_1)
783 return 0;
784
785 switch (1 << host->ios.vdd) { 766 switch (1 << host->ios.vdd) {
786 case MMC_VDD_165_195: 767 case MMC_VDD_165_195:
787 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) 768 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
@@ -844,7 +825,7 @@ static int mmc_select_powerclass(struct mmc_card *card)
844 int err, ddr; 825 int err, ddr;
845 826
846 /* Power class selection is supported for versions >= 4.0 */ 827 /* Power class selection is supported for versions >= 4.0 */
847 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 828 if (!mmc_can_ext_csd(card))
848 return 0; 829 return 0;
849 830
850 bus_width = host->ios.bus_width; 831 bus_width = host->ios.bus_width;
@@ -905,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
905 unsigned idx, bus_width = 0; 886 unsigned idx, bus_width = 0;
906 int err = 0; 887 int err = 0;
907 888
908 if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) && 889 if (!mmc_can_ext_csd(card) &&
909 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) 890 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
910 return 0; 891 return 0;
911 892
@@ -998,7 +979,7 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
998 ext_csd_bits, 979 ext_csd_bits,
999 card->ext_csd.generic_cmd6_time); 980 card->ext_csd.generic_cmd6_time);
1000 if (err) { 981 if (err) {
1001 pr_warn("%s: switch to bus width %d ddr failed\n", 982 pr_err("%s: switch to bus width %d ddr failed\n",
1002 mmc_hostname(host), 1 << bus_width); 983 mmc_hostname(host), 1 << bus_width);
1003 return err; 984 return err;
1004 } 985 }
@@ -1069,7 +1050,7 @@ static int mmc_select_hs400(struct mmc_card *card)
1069 card->ext_csd.generic_cmd6_time, 1050 card->ext_csd.generic_cmd6_time,
1070 true, true, true); 1051 true, true, true);
1071 if (err) { 1052 if (err) {
1072 pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n", 1053 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1073 mmc_hostname(host), err); 1054 mmc_hostname(host), err);
1074 return err; 1055 return err;
1075 } 1056 }
@@ -1079,7 +1060,7 @@ static int mmc_select_hs400(struct mmc_card *card)
1079 EXT_CSD_DDR_BUS_WIDTH_8, 1060 EXT_CSD_DDR_BUS_WIDTH_8,
1080 card->ext_csd.generic_cmd6_time); 1061 card->ext_csd.generic_cmd6_time);
1081 if (err) { 1062 if (err) {
1082 pr_warn("%s: switch to bus width for hs400 failed, err:%d\n", 1063 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1083 mmc_hostname(host), err); 1064 mmc_hostname(host), err);
1084 return err; 1065 return err;
1085 } 1066 }
@@ -1089,7 +1070,7 @@ static int mmc_select_hs400(struct mmc_card *card)
1089 card->ext_csd.generic_cmd6_time, 1070 card->ext_csd.generic_cmd6_time,
1090 true, true, true); 1071 true, true, true);
1091 if (err) { 1072 if (err) {
1092 pr_warn("%s: switch to hs400 failed, err:%d\n", 1073 pr_err("%s: switch to hs400 failed, err:%d\n",
1093 mmc_hostname(host), err); 1074 mmc_hostname(host), err);
1094 return err; 1075 return err;
1095 } 1076 }
@@ -1146,8 +1127,7 @@ static int mmc_select_timing(struct mmc_card *card)
1146{ 1127{
1147 int err = 0; 1128 int err = 0;
1148 1129
1149 if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 && 1130 if (!mmc_can_ext_csd(card))
1150 card->ext_csd.hs_max_dtr == 0))
1151 goto bus_speed; 1131 goto bus_speed;
1152 1132
1153 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) 1133 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
@@ -1232,7 +1212,7 @@ static int mmc_hs200_tuning(struct mmc_card *card)
1232 mmc_host_clk_release(host); 1212 mmc_host_clk_release(host);
1233 1213
1234 if (err) 1214 if (err)
1235 pr_warn("%s: tuning execution failed\n", 1215 pr_err("%s: tuning execution failed\n",
1236 mmc_hostname(host)); 1216 mmc_hostname(host));
1237 } 1217 }
1238 1218
@@ -1252,7 +1232,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1252 int err; 1232 int err;
1253 u32 cid[4]; 1233 u32 cid[4];
1254 u32 rocr; 1234 u32 rocr;
1255 u8 *ext_csd = NULL;
1256 1235
1257 BUG_ON(!host); 1236 BUG_ON(!host);
1258 WARN_ON(!host->claimed); 1237 WARN_ON(!host->claimed);
@@ -1361,14 +1340,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1361 } 1340 }
1362 1341
1363 if (!oldcard) { 1342 if (!oldcard) {
1364 /* 1343 /* Read extended CSD. */
1365 * Fetch and process extended CSD. 1344 err = mmc_read_ext_csd(card);
1366 */
1367
1368 err = mmc_get_ext_csd(card, &ext_csd);
1369 if (err)
1370 goto free_card;
1371 err = mmc_read_ext_csd(card, ext_csd);
1372 if (err) 1345 if (err)
1373 goto free_card; 1346 goto free_card;
1374 1347
@@ -1458,18 +1431,18 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1458 if (mmc_card_hs200(card)) { 1431 if (mmc_card_hs200(card)) {
1459 err = mmc_hs200_tuning(card); 1432 err = mmc_hs200_tuning(card);
1460 if (err) 1433 if (err)
1461 goto err; 1434 goto free_card;
1462 1435
1463 err = mmc_select_hs400(card); 1436 err = mmc_select_hs400(card);
1464 if (err) 1437 if (err)
1465 goto err; 1438 goto free_card;
1466 } else if (mmc_card_hs(card)) { 1439 } else if (mmc_card_hs(card)) {
1467 /* Select the desired bus width optionally */ 1440 /* Select the desired bus width optionally */
1468 err = mmc_select_bus_width(card); 1441 err = mmc_select_bus_width(card);
1469 if (!IS_ERR_VALUE(err)) { 1442 if (!IS_ERR_VALUE(err)) {
1470 err = mmc_select_hs_ddr(card); 1443 err = mmc_select_hs_ddr(card);
1471 if (err) 1444 if (err)
1472 goto err; 1445 goto free_card;
1473 } 1446 }
1474 } 1447 }
1475 1448
@@ -1545,15 +1518,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1545 if (!oldcard) 1518 if (!oldcard)
1546 host->card = card; 1519 host->card = card;
1547 1520
1548 mmc_free_ext_csd(ext_csd);
1549 return 0; 1521 return 0;
1550 1522
1551free_card: 1523free_card:
1552 if (!oldcard) 1524 if (!oldcard)
1553 mmc_remove_card(card); 1525 mmc_remove_card(card);
1554err: 1526err:
1555 mmc_free_ext_csd(ext_csd);
1556
1557 return err; 1527 return err;
1558} 1528}
1559 1529
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 7911e0510a1d..3b044c5b029c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -264,20 +264,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
264 struct mmc_command cmd = {0}; 264 struct mmc_command cmd = {0};
265 struct mmc_data data = {0}; 265 struct mmc_data data = {0};
266 struct scatterlist sg; 266 struct scatterlist sg;
267 void *data_buf;
268 int is_on_stack;
269
270 is_on_stack = object_is_on_stack(buf);
271 if (is_on_stack) {
272 /*
273 * dma onto stack is unsafe/nonportable, but callers to this
274 * routine normally provide temporary on-stack buffers ...
275 */
276 data_buf = kmalloc(len, GFP_KERNEL);
277 if (!data_buf)
278 return -ENOMEM;
279 } else
280 data_buf = buf;
281 267
282 mrq.cmd = &cmd; 268 mrq.cmd = &cmd;
283 mrq.data = &data; 269 mrq.data = &data;
@@ -298,7 +284,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
298 data.sg = &sg; 284 data.sg = &sg;
299 data.sg_len = 1; 285 data.sg_len = 1;
300 286
301 sg_init_one(&sg, data_buf, len); 287 sg_init_one(&sg, buf, len);
302 288
303 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { 289 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
304 /* 290 /*
@@ -312,11 +298,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
312 298
313 mmc_wait_for_req(host, &mrq); 299 mmc_wait_for_req(host, &mrq);
314 300
315 if (is_on_stack) {
316 memcpy(buf, data_buf, len);
317 kfree(data_buf);
318 }
319
320 if (cmd.error) 301 if (cmd.error)
321 return cmd.error; 302 return cmd.error;
322 if (data.error) 303 if (data.error)
@@ -334,7 +315,7 @@ int mmc_send_csd(struct mmc_card *card, u32 *csd)
334 return mmc_send_cxd_native(card->host, card->rca << 16, 315 return mmc_send_cxd_native(card->host, card->rca << 16,
335 csd, MMC_SEND_CSD); 316 csd, MMC_SEND_CSD);
336 317
337 csd_tmp = kmalloc(16, GFP_KERNEL); 318 csd_tmp = kzalloc(16, GFP_KERNEL);
338 if (!csd_tmp) 319 if (!csd_tmp)
339 return -ENOMEM; 320 return -ENOMEM;
340 321
@@ -362,7 +343,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
362 cid, MMC_SEND_CID); 343 cid, MMC_SEND_CID);
363 } 344 }
364 345
365 cid_tmp = kmalloc(16, GFP_KERNEL); 346 cid_tmp = kzalloc(16, GFP_KERNEL);
366 if (!cid_tmp) 347 if (!cid_tmp)
367 return -ENOMEM; 348 return -ENOMEM;
368 349
@@ -378,12 +359,35 @@ err:
378 return ret; 359 return ret;
379} 360}
380 361
381int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) 362int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
382{ 363{
383 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 364 int err;
384 ext_csd, 512); 365 u8 *ext_csd;
366
367 if (!card || !new_ext_csd)
368 return -EINVAL;
369
370 if (!mmc_can_ext_csd(card))
371 return -EOPNOTSUPP;
372
373 /*
374 * As the ext_csd is so large and mostly unused, we don't store the
375 * raw block in mmc_card.
376 */
377 ext_csd = kzalloc(512, GFP_KERNEL);
378 if (!ext_csd)
379 return -ENOMEM;
380
381 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
382 512);
383 if (err)
384 kfree(ext_csd);
385 else
386 *new_ext_csd = ext_csd;
387
388 return err;
385} 389}
386EXPORT_SYMBOL_GPL(mmc_send_ext_csd); 390EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387 391
388int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 392int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389{ 393{
@@ -543,6 +547,75 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
543} 547}
544EXPORT_SYMBOL_GPL(mmc_switch); 548EXPORT_SYMBOL_GPL(mmc_switch);
545 549
550int mmc_send_tuning(struct mmc_host *host)
551{
552 struct mmc_request mrq = {NULL};
553 struct mmc_command cmd = {0};
554 struct mmc_data data = {0};
555 struct scatterlist sg;
556 struct mmc_ios *ios = &host->ios;
557 const u8 *tuning_block_pattern;
558 int size, err = 0;
559 u8 *data_buf;
560 u32 opcode;
561
562 if (ios->bus_width == MMC_BUS_WIDTH_8) {
563 tuning_block_pattern = tuning_blk_pattern_8bit;
564 size = sizeof(tuning_blk_pattern_8bit);
565 opcode = MMC_SEND_TUNING_BLOCK_HS200;
566 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
567 tuning_block_pattern = tuning_blk_pattern_4bit;
568 size = sizeof(tuning_blk_pattern_4bit);
569 opcode = MMC_SEND_TUNING_BLOCK;
570 } else
571 return -EINVAL;
572
573 data_buf = kzalloc(size, GFP_KERNEL);
574 if (!data_buf)
575 return -ENOMEM;
576
577 mrq.cmd = &cmd;
578 mrq.data = &data;
579
580 cmd.opcode = opcode;
581 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
582
583 data.blksz = size;
584 data.blocks = 1;
585 data.flags = MMC_DATA_READ;
586
587 /*
588 * According to the tuning specs, Tuning process
589 * is normally shorter 40 executions of CMD19,
590 * and timeout value should be shorter than 150 ms
591 */
592 data.timeout_ns = 150 * NSEC_PER_MSEC;
593
594 data.sg = &sg;
595 data.sg_len = 1;
596 sg_init_one(&sg, data_buf, size);
597
598 mmc_wait_for_req(host, &mrq);
599
600 if (cmd.error) {
601 err = cmd.error;
602 goto out;
603 }
604
605 if (data.error) {
606 err = data.error;
607 goto out;
608 }
609
610 if (memcmp(data_buf, tuning_block_pattern, size))
611 err = -EIO;
612
613out:
614 kfree(data_buf);
615 return err;
616}
617EXPORT_SYMBOL_GPL(mmc_send_tuning);
618
546static int 619static int
547mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 620mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
548 u8 len) 621 u8 len)
@@ -675,3 +748,8 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
675 748
676 return 0; 749 return 0;
677} 750}
751
752int mmc_can_ext_csd(struct mmc_card *card)
753{
754 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
755}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 390dac665b2a..6f4b00ed93de 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -20,13 +20,13 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
20int mmc_all_send_cid(struct mmc_host *host, u32 *cid); 20int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
21int mmc_set_relative_addr(struct mmc_card *card); 21int mmc_set_relative_addr(struct mmc_card *card);
22int mmc_send_csd(struct mmc_card *card, u32 *csd); 22int mmc_send_csd(struct mmc_card *card, u32 *csd);
23int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
24int mmc_send_status(struct mmc_card *card, u32 *status); 23int mmc_send_status(struct mmc_card *card, u32 *status);
25int mmc_send_cid(struct mmc_host *host, u32 *cid); 24int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 25int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 26int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_bus_test(struct mmc_card *card, u8 bus_width); 27int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); 28int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
29int mmc_can_ext_csd(struct mmc_card *card);
30 30
31#endif 31#endif
32 32
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2439e717655b..fd0750b5a634 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -980,8 +980,12 @@ static int mmc_sdio_resume(struct mmc_host *host)
980 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { 980 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
981 sdio_reset(host); 981 sdio_reset(host);
982 mmc_go_idle(host); 982 mmc_go_idle(host);
983 err = mmc_sdio_init_card(host, host->card->ocr, host->card, 983 mmc_send_if_cond(host, host->card->ocr);
984 mmc_card_keep_power(host)); 984 err = mmc_send_io_op_cond(host, 0, NULL);
985 if (!err)
986 err = mmc_sdio_init_card(host, host->card->ocr,
987 host->card,
988 mmc_card_keep_power(host));
985 } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 989 } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
986 /* We may have switched to 1-bit mode during suspend */ 990 /* We may have switched to 1-bit mode during suspend */
987 err = sdio_enable_4bit_bus(host->card); 991 err = sdio_enable_4bit_bus(host->card);
@@ -1035,7 +1039,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
1035 1039
1036 sdio_reset(host); 1040 sdio_reset(host);
1037 mmc_go_idle(host); 1041 mmc_go_idle(host);
1038 mmc_send_if_cond(host, host->ocr_avail); 1042 mmc_send_if_cond(host, host->card->ocr);
1039 1043
1040 ret = mmc_send_io_op_cond(host, 0, NULL); 1044 ret = mmc_send_io_op_cond(host, 0, NULL);
1041 if (ret) 1045 if (ret)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 6da97b170563..60885316afba 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -26,6 +26,8 @@
26#include "sdio_cis.h" 26#include "sdio_cis.h"
27#include "sdio_bus.h" 27#include "sdio_bus.h"
28 28
29#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
30
29/* show configuration fields */ 31/* show configuration fields */
30#define sdio_config_attr(field, format_string) \ 32#define sdio_config_attr(field, format_string) \
31static ssize_t \ 33static ssize_t \
@@ -196,8 +198,6 @@ static int sdio_bus_remove(struct device *dev)
196 return ret; 198 return ret;
197} 199}
198 200
199#ifdef CONFIG_PM
200
201static const struct dev_pm_ops sdio_bus_pm_ops = { 201static const struct dev_pm_ops sdio_bus_pm_ops = {
202 SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) 202 SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
203 SET_RUNTIME_PM_OPS( 203 SET_RUNTIME_PM_OPS(
@@ -207,14 +207,6 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
207 ) 207 )
208}; 208};
209 209
210#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
211
212#else /* !CONFIG_PM */
213
214#define SDIO_PM_OPS_PTR NULL
215
216#endif /* !CONFIG_PM */
217
218static struct bus_type sdio_bus_type = { 210static struct bus_type sdio_bus_type = {
219 .name = "sdio", 211 .name = "sdio",
220 .dev_groups = sdio_dev_groups, 212 .dev_groups = sdio_dev_groups,
@@ -222,7 +214,7 @@ static struct bus_type sdio_bus_type = {
222 .uevent = sdio_bus_uevent, 214 .uevent = sdio_bus_uevent,
223 .probe = sdio_bus_probe, 215 .probe = sdio_bus_probe,
224 .remove = sdio_bus_remove, 216 .remove = sdio_bus_remove,
225 .pm = SDIO_PM_OPS_PTR, 217 .pm = &sdio_bus_pm_ops,
226}; 218};
227 219
228int sdio_register_bus(void) 220int sdio_register_bus(void)
@@ -295,7 +287,7 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
295static void sdio_acpi_set_handle(struct sdio_func *func) 287static void sdio_acpi_set_handle(struct sdio_func *func)
296{ 288{
297 struct mmc_host *host = func->card->host; 289 struct mmc_host *host = func->card->host;
298 u64 addr = (host->slotno << 16) | func->num; 290 u64 addr = ((u64)host->slotno << 16) | func->num;
299 291
300 acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr); 292 acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
301} 293}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 13860656104b..2d6fbdd11803 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -580,7 +580,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
580config MMC_DW 580config MMC_DW
581 tristate "Synopsys DesignWare Memory Card Interface" 581 tristate "Synopsys DesignWare Memory Card Interface"
582 depends on HAS_DMA 582 depends on HAS_DMA
583 depends on ARC || ARM || MIPS || COMPILE_TEST 583 depends on ARC || ARM || ARM64 || MIPS || COMPILE_TEST
584 help 584 help
585 This selects support for the Synopsys DesignWare Mobile Storage IP 585 This selects support for the Synopsys DesignWare Mobile Storage IP
586 block, this provides host support for SD and MMC interfaces, in both 586 block, this provides host support for SD and MMC interfaces, in both
@@ -748,3 +748,8 @@ config MMC_SUNXI
748 help 748 help
749 This selects support for the SD/MMC Host Controller on 749 This selects support for the SD/MMC Host Controller on
750 Allwinner sunxi SoCs. 750 Allwinner sunxi SoCs.
751
752config MMC_TOSHIBA_PCI
753 tristate "Toshiba Type A SD/MMC Card Interface Driver"
754 depends on PCI
755 help
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b09ecfb88269..f7b0a77cf419 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
55obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o 55obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
56obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o 56obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
57obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o 57obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
58obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
58 59
59obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o 60obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
60obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o 61obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 77250d4b1979..62aba9af19f4 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -30,13 +30,16 @@
30#include <linux/stat.h> 30#include <linux/stat.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/platform_data/atmel.h> 32#include <linux/platform_data/atmel.h>
33#include <linux/platform_data/mmc-atmel-mci.h>
33 34
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
35#include <linux/mmc/sdio.h> 36#include <linux/mmc/sdio.h>
36 37
37#include <mach/atmel-mci.h>
38#include <linux/atmel-mci.h> 38#include <linux/atmel-mci.h>
39#include <linux/atmel_pdc.h> 39#include <linux/atmel_pdc.h>
40#include <linux/pm.h>
41#include <linux/pm_runtime.h>
42#include <linux/pinctrl/consumer.h>
40 43
41#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
42#include <asm/io.h> 45#include <asm/io.h>
@@ -44,6 +47,8 @@
44 47
45#include "atmel-mci-regs.h" 48#include "atmel-mci-regs.h"
46 49
50#define AUTOSUSPEND_DELAY 50
51
47#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) 52#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
48#define ATMCI_DMA_THRESHOLD 16 53#define ATMCI_DMA_THRESHOLD 16
49 54
@@ -386,20 +391,19 @@ static int atmci_regs_show(struct seq_file *s, void *v)
386 if (!buf) 391 if (!buf)
387 return -ENOMEM; 392 return -ENOMEM;
388 393
394 pm_runtime_get_sync(&host->pdev->dev);
395
389 /* 396 /*
390 * Grab a more or less consistent snapshot. Note that we're 397 * Grab a more or less consistent snapshot. Note that we're
391 * not disabling interrupts, so IMR and SR may not be 398 * not disabling interrupts, so IMR and SR may not be
392 * consistent. 399 * consistent.
393 */ 400 */
394 ret = clk_prepare_enable(host->mck);
395 if (ret)
396 goto out;
397
398 spin_lock_bh(&host->lock); 401 spin_lock_bh(&host->lock);
399 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); 402 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
400 spin_unlock_bh(&host->lock); 403 spin_unlock_bh(&host->lock);
401 404
402 clk_disable_unprepare(host->mck); 405 pm_runtime_mark_last_busy(&host->pdev->dev);
406 pm_runtime_put_autosuspend(&host->pdev->dev);
403 407
404 seq_printf(s, "MR:\t0x%08x%s%s ", 408 seq_printf(s, "MR:\t0x%08x%s%s ",
405 buf[ATMCI_MR / 4], 409 buf[ATMCI_MR / 4],
@@ -449,7 +453,6 @@ static int atmci_regs_show(struct seq_file *s, void *v)
449 val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); 453 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
450 } 454 }
451 455
452out:
453 kfree(buf); 456 kfree(buf);
454 457
455 return ret; 458 return ret;
@@ -560,6 +563,9 @@ atmci_of_init(struct platform_device *pdev)
560 pdata->slot[slot_id].detect_is_active_high = 563 pdata->slot[slot_id].detect_is_active_high =
561 of_property_read_bool(cnp, "cd-inverted"); 564 of_property_read_bool(cnp, "cd-inverted");
562 565
566 pdata->slot[slot_id].non_removable =
567 of_property_read_bool(cnp, "non-removable");
568
563 pdata->slot[slot_id].wp_pin = 569 pdata->slot[slot_id].wp_pin =
564 of_get_named_gpio(cnp, "wp-gpios", 0); 570 of_get_named_gpio(cnp, "wp-gpios", 0);
565 } 571 }
@@ -1252,6 +1258,8 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1252 WARN_ON(slot->mrq); 1258 WARN_ON(slot->mrq);
1253 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); 1259 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1254 1260
1261 pm_runtime_get_sync(&host->pdev->dev);
1262
1255 /* 1263 /*
1256 * We may "know" the card is gone even though there's still an 1264 * We may "know" the card is gone even though there's still an
1257 * electrical connection. If so, we really need to communicate 1265 * electrical connection. If so, we really need to communicate
@@ -1281,7 +1289,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1281 struct atmel_mci_slot *slot = mmc_priv(mmc); 1289 struct atmel_mci_slot *slot = mmc_priv(mmc);
1282 struct atmel_mci *host = slot->host; 1290 struct atmel_mci *host = slot->host;
1283 unsigned int i; 1291 unsigned int i;
1284 bool unprepare_clk; 1292
1293 pm_runtime_get_sync(&host->pdev->dev);
1285 1294
1286 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; 1295 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1287 switch (ios->bus_width) { 1296 switch (ios->bus_width) {
@@ -1297,13 +1306,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1297 unsigned int clock_min = ~0U; 1306 unsigned int clock_min = ~0U;
1298 u32 clkdiv; 1307 u32 clkdiv;
1299 1308
1300 clk_prepare(host->mck);
1301 unprepare_clk = true;
1302
1303 spin_lock_bh(&host->lock); 1309 spin_lock_bh(&host->lock);
1304 if (!host->mode_reg) { 1310 if (!host->mode_reg) {
1305 clk_enable(host->mck);
1306 unprepare_clk = false;
1307 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1311 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1308 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1312 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1309 if (host->caps.has_cfg_reg) 1313 if (host->caps.has_cfg_reg)
@@ -1371,8 +1375,6 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1371 } else { 1375 } else {
1372 bool any_slot_active = false; 1376 bool any_slot_active = false;
1373 1377
1374 unprepare_clk = false;
1375
1376 spin_lock_bh(&host->lock); 1378 spin_lock_bh(&host->lock);
1377 slot->clock = 0; 1379 slot->clock = 0;
1378 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 1380 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
@@ -1385,17 +1387,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1385 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 1387 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1386 if (host->mode_reg) { 1388 if (host->mode_reg) {
1387 atmci_readl(host, ATMCI_MR); 1389 atmci_readl(host, ATMCI_MR);
1388 clk_disable(host->mck);
1389 unprepare_clk = true;
1390 } 1390 }
1391 host->mode_reg = 0; 1391 host->mode_reg = 0;
1392 } 1392 }
1393 spin_unlock_bh(&host->lock); 1393 spin_unlock_bh(&host->lock);
1394 } 1394 }
1395 1395
1396 if (unprepare_clk)
1397 clk_unprepare(host->mck);
1398
1399 switch (ios->power_mode) { 1396 switch (ios->power_mode) {
1400 case MMC_POWER_OFF: 1397 case MMC_POWER_OFF:
1401 if (!IS_ERR(mmc->supply.vmmc)) 1398 if (!IS_ERR(mmc->supply.vmmc))
@@ -1421,6 +1418,9 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1421 */ 1418 */
1422 break; 1419 break;
1423 } 1420 }
1421
1422 pm_runtime_mark_last_busy(&host->pdev->dev);
1423 pm_runtime_put_autosuspend(&host->pdev->dev);
1424} 1424}
1425 1425
1426static int atmci_get_ro(struct mmc_host *mmc) 1426static int atmci_get_ro(struct mmc_host *mmc)
@@ -1512,6 +1512,9 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1512 spin_unlock(&host->lock); 1512 spin_unlock(&host->lock);
1513 mmc_request_done(prev_mmc, mrq); 1513 mmc_request_done(prev_mmc, mrq);
1514 spin_lock(&host->lock); 1514 spin_lock(&host->lock);
1515
1516 pm_runtime_mark_last_busy(&host->pdev->dev);
1517 pm_runtime_put_autosuspend(&host->pdev->dev);
1515} 1518}
1516 1519
1517static void atmci_command_complete(struct atmel_mci *host, 1520static void atmci_command_complete(struct atmel_mci *host,
@@ -2137,7 +2140,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2137 return IRQ_HANDLED; 2140 return IRQ_HANDLED;
2138} 2141}
2139 2142
2140static int __init atmci_init_slot(struct atmel_mci *host, 2143static int atmci_init_slot(struct atmel_mci *host,
2141 struct mci_slot_pdata *slot_data, unsigned int id, 2144 struct mci_slot_pdata *slot_data, unsigned int id,
2142 u32 sdc_reg, u32 sdio_irq) 2145 u32 sdc_reg, u32 sdio_irq)
2143{ 2146{
@@ -2206,8 +2209,12 @@ static int __init atmci_init_slot(struct atmel_mci *host,
2206 } 2209 }
2207 } 2210 }
2208 2211
2209 if (!gpio_is_valid(slot->detect_pin)) 2212 if (!gpio_is_valid(slot->detect_pin)) {
2210 mmc->caps |= MMC_CAP_NEEDS_POLL; 2213 if (slot_data->non_removable)
2214 mmc->caps |= MMC_CAP_NONREMOVABLE;
2215 else
2216 mmc->caps |= MMC_CAP_NEEDS_POLL;
2217 }
2211 2218
2212 if (gpio_is_valid(slot->wp_pin)) { 2219 if (gpio_is_valid(slot->wp_pin)) {
2213 if (devm_gpio_request(&host->pdev->dev, slot->wp_pin, 2220 if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
@@ -2265,55 +2272,25 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
2265 mmc_free_host(slot->mmc); 2272 mmc_free_host(slot->mmc);
2266} 2273}
2267 2274
2268static bool atmci_filter(struct dma_chan *chan, void *pdata) 2275static int atmci_configure_dma(struct atmel_mci *host)
2269{ 2276{
2270 struct mci_platform_data *sl_pdata = pdata; 2277 host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
2271 struct mci_dma_data *sl; 2278 "rxtx");
2272 2279 if (IS_ERR(host->dma.chan))
2273 if (!sl_pdata) 2280 return PTR_ERR(host->dma.chan);
2274 return false; 2281
2282 dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
2283 dma_chan_name(host->dma.chan));
2284
2285 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2286 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2287 host->dma_conf.src_maxburst = 1;
2288 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2289 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2290 host->dma_conf.dst_maxburst = 1;
2291 host->dma_conf.device_fc = false;
2275 2292
2276 sl = sl_pdata->dma_slave; 2293 return 0;
2277 if (sl && find_slave_dev(sl) == chan->device->dev) {
2278 chan->private = slave_data_ptr(sl);
2279 return true;
2280 } else {
2281 return false;
2282 }
2283}
2284
2285static bool atmci_configure_dma(struct atmel_mci *host)
2286{
2287 struct mci_platform_data *pdata;
2288 dma_cap_mask_t mask;
2289
2290 if (host == NULL)
2291 return false;
2292
2293 pdata = host->pdev->dev.platform_data;
2294
2295 dma_cap_zero(mask);
2296 dma_cap_set(DMA_SLAVE, mask);
2297
2298 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2299 &host->pdev->dev, "rxtx");
2300 if (!host->dma.chan) {
2301 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2302 return false;
2303 } else {
2304 dev_info(&host->pdev->dev,
2305 "using %s for DMA transfers\n",
2306 dma_chan_name(host->dma.chan));
2307
2308 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2309 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2310 host->dma_conf.src_maxburst = 1;
2311 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2312 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2313 host->dma_conf.dst_maxburst = 1;
2314 host->dma_conf.device_fc = false;
2315 return true;
2316 }
2317} 2294}
2318 2295
2319/* 2296/*
@@ -2321,7 +2298,7 @@ static bool atmci_configure_dma(struct atmel_mci *host)
2321 * HSMCI provides DMA support and a new config register but no more supports 2298 * HSMCI provides DMA support and a new config register but no more supports
2322 * PDC. 2299 * PDC.
2323 */ 2300 */
2324static void __init atmci_get_cap(struct atmel_mci *host) 2301static void atmci_get_cap(struct atmel_mci *host)
2325{ 2302{
2326 unsigned int version; 2303 unsigned int version;
2327 2304
@@ -2370,7 +2347,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2370 } 2347 }
2371} 2348}
2372 2349
2373static int __init atmci_probe(struct platform_device *pdev) 2350static int atmci_probe(struct platform_device *pdev)
2374{ 2351{
2375 struct mci_platform_data *pdata; 2352 struct mci_platform_data *pdata;
2376 struct atmel_mci *host; 2353 struct atmel_mci *host;
@@ -2417,19 +2394,23 @@ static int __init atmci_probe(struct platform_device *pdev)
2417 2394
2418 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 2395 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2419 host->bus_hz = clk_get_rate(host->mck); 2396 host->bus_hz = clk_get_rate(host->mck);
2420 clk_disable_unprepare(host->mck);
2421 2397
2422 host->mapbase = regs->start; 2398 host->mapbase = regs->start;
2423 2399
2424 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); 2400 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2425 2401
2426 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); 2402 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2427 if (ret) 2403 if (ret) {
2404 clk_disable_unprepare(host->mck);
2428 return ret; 2405 return ret;
2406 }
2429 2407
2430 /* Get MCI capabilities and set operations according to it */ 2408 /* Get MCI capabilities and set operations according to it */
2431 atmci_get_cap(host); 2409 atmci_get_cap(host);
2432 if (atmci_configure_dma(host)) { 2410 ret = atmci_configure_dma(host);
2411 if (ret == -EPROBE_DEFER)
2412 goto err_dma_probe_defer;
2413 if (ret == 0) {
2433 host->prepare_data = &atmci_prepare_data_dma; 2414 host->prepare_data = &atmci_prepare_data_dma;
2434 host->submit_data = &atmci_submit_data_dma; 2415 host->submit_data = &atmci_submit_data_dma;
2435 host->stop_transfer = &atmci_stop_transfer_dma; 2416 host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2449,6 +2430,12 @@ static int __init atmci_probe(struct platform_device *pdev)
2449 2430
2450 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); 2431 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2451 2432
2433 pm_runtime_get_noresume(&pdev->dev);
2434 pm_runtime_set_active(&pdev->dev);
2435 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
2436 pm_runtime_use_autosuspend(&pdev->dev);
2437 pm_runtime_enable(&pdev->dev);
2438
2452 /* We need at least one slot to succeed */ 2439 /* We need at least one slot to succeed */
2453 nr_slots = 0; 2440 nr_slots = 0;
2454 ret = -ENODEV; 2441 ret = -ENODEV;
@@ -2491,6 +2478,9 @@ static int __init atmci_probe(struct platform_device *pdev)
2491 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2478 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2492 host->mapbase, irq, nr_slots); 2479 host->mapbase, irq, nr_slots);
2493 2480
2481 pm_runtime_mark_last_busy(&host->pdev->dev);
2482 pm_runtime_put_autosuspend(&pdev->dev);
2483
2494 return 0; 2484 return 0;
2495 2485
2496err_dma_alloc: 2486err_dma_alloc:
@@ -2499,18 +2489,26 @@ err_dma_alloc:
2499 atmci_cleanup_slot(host->slot[i], i); 2489 atmci_cleanup_slot(host->slot[i], i);
2500 } 2490 }
2501err_init_slot: 2491err_init_slot:
2492 clk_disable_unprepare(host->mck);
2493
2494 pm_runtime_disable(&pdev->dev);
2495 pm_runtime_put_noidle(&pdev->dev);
2496
2502 del_timer_sync(&host->timer); 2497 del_timer_sync(&host->timer);
2503 if (host->dma.chan) 2498 if (!IS_ERR(host->dma.chan))
2504 dma_release_channel(host->dma.chan); 2499 dma_release_channel(host->dma.chan);
2500err_dma_probe_defer:
2505 free_irq(irq, host); 2501 free_irq(irq, host);
2506 return ret; 2502 return ret;
2507} 2503}
2508 2504
2509static int __exit atmci_remove(struct platform_device *pdev) 2505static int atmci_remove(struct platform_device *pdev)
2510{ 2506{
2511 struct atmel_mci *host = platform_get_drvdata(pdev); 2507 struct atmel_mci *host = platform_get_drvdata(pdev);
2512 unsigned int i; 2508 unsigned int i;
2513 2509
2510 pm_runtime_get_sync(&pdev->dev);
2511
2514 if (host->buffer) 2512 if (host->buffer)
2515 dma_free_coherent(&pdev->dev, host->buf_size, 2513 dma_free_coherent(&pdev->dev, host->buf_size,
2516 host->buffer, host->buf_phys_addr); 2514 host->buffer, host->buf_phys_addr);
@@ -2520,41 +2518,62 @@ static int __exit atmci_remove(struct platform_device *pdev)
2520 atmci_cleanup_slot(host->slot[i], i); 2518 atmci_cleanup_slot(host->slot[i], i);
2521 } 2519 }
2522 2520
2523 clk_prepare_enable(host->mck);
2524 atmci_writel(host, ATMCI_IDR, ~0UL); 2521 atmci_writel(host, ATMCI_IDR, ~0UL);
2525 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 2522 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2526 atmci_readl(host, ATMCI_SR); 2523 atmci_readl(host, ATMCI_SR);
2527 clk_disable_unprepare(host->mck);
2528 2524
2529 del_timer_sync(&host->timer); 2525 del_timer_sync(&host->timer);
2530 if (host->dma.chan) 2526 if (!IS_ERR(host->dma.chan))
2531 dma_release_channel(host->dma.chan); 2527 dma_release_channel(host->dma.chan);
2532 2528
2533 free_irq(platform_get_irq(pdev, 0), host); 2529 free_irq(platform_get_irq(pdev, 0), host);
2534 2530
2531 clk_disable_unprepare(host->mck);
2532
2533 pm_runtime_disable(&pdev->dev);
2534 pm_runtime_put_noidle(&pdev->dev);
2535
2535 return 0; 2536 return 0;
2536} 2537}
2537 2538
2538static struct platform_driver atmci_driver = { 2539#ifdef CONFIG_PM
2539 .remove = __exit_p(atmci_remove), 2540static int atmci_runtime_suspend(struct device *dev)
2540 .driver = {
2541 .name = "atmel_mci",
2542 .of_match_table = of_match_ptr(atmci_dt_ids),
2543 },
2544};
2545
2546static int __init atmci_init(void)
2547{ 2541{
2548 return platform_driver_probe(&atmci_driver, atmci_probe); 2542 struct atmel_mci *host = dev_get_drvdata(dev);
2543
2544 clk_disable_unprepare(host->mck);
2545
2546 pinctrl_pm_select_sleep_state(dev);
2547
2548 return 0;
2549} 2549}
2550 2550
2551static void __exit atmci_exit(void) 2551static int atmci_runtime_resume(struct device *dev)
2552{ 2552{
2553 platform_driver_unregister(&atmci_driver); 2553 struct atmel_mci *host = dev_get_drvdata(dev);
2554
2555 pinctrl_pm_select_default_state(dev);
2556
2557 return clk_prepare_enable(host->mck);
2554} 2558}
2559#endif
2555 2560
2556late_initcall(atmci_init); /* try to load after dma driver when built-in */ 2561static const struct dev_pm_ops atmci_dev_pm_ops = {
2557module_exit(atmci_exit); 2562 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2563 pm_runtime_force_resume)
2564 SET_PM_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
2565};
2566
2567static struct platform_driver atmci_driver = {
2568 .probe = atmci_probe,
2569 .remove = atmci_remove,
2570 .driver = {
2571 .name = "atmel_mci",
2572 .of_match_table = of_match_ptr(atmci_dt_ids),
2573 .pm = &atmci_dev_pm_ops,
2574 },
2575};
2576module_platform_driver(atmci_driver);
2558 2577
2559MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); 2578MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2560MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 2579MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0fbc53ac7eae..509365cb22c6 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -25,6 +25,7 @@
25#define NUM_PINS(x) (x + 2) 25#define NUM_PINS(x) (x + 2)
26 26
27#define SDMMC_CLKSEL 0x09C 27#define SDMMC_CLKSEL 0x09C
28#define SDMMC_CLKSEL64 0x0A8
28#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0) 29#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
29#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16) 30#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
30#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24) 31#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
@@ -65,6 +66,8 @@ enum dw_mci_exynos_type {
65 DW_MCI_TYPE_EXYNOS5250, 66 DW_MCI_TYPE_EXYNOS5250,
66 DW_MCI_TYPE_EXYNOS5420, 67 DW_MCI_TYPE_EXYNOS5420,
67 DW_MCI_TYPE_EXYNOS5420_SMU, 68 DW_MCI_TYPE_EXYNOS5420_SMU,
69 DW_MCI_TYPE_EXYNOS7,
70 DW_MCI_TYPE_EXYNOS7_SMU,
68}; 71};
69 72
70/* Exynos implementation specific driver private data */ 73/* Exynos implementation specific driver private data */
@@ -95,6 +98,12 @@ static struct dw_mci_exynos_compatible {
95 }, { 98 }, {
96 .compatible = "samsung,exynos5420-dw-mshc-smu", 99 .compatible = "samsung,exynos5420-dw-mshc-smu",
97 .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU, 100 .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
101 }, {
102 .compatible = "samsung,exynos7-dw-mshc",
103 .ctrl_type = DW_MCI_TYPE_EXYNOS7,
104 }, {
105 .compatible = "samsung,exynos7-dw-mshc-smu",
106 .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
98 }, 107 },
99}; 108};
100 109
@@ -102,7 +111,8 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
102{ 111{
103 struct dw_mci_exynos_priv_data *priv = host->priv; 112 struct dw_mci_exynos_priv_data *priv = host->priv;
104 113
105 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) { 114 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
115 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
106 mci_writel(host, MPSBEGIN0, 0); 116 mci_writel(host, MPSBEGIN0, 0);
107 mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM); 117 mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM);
108 mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT | 118 mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT |
@@ -153,11 +163,22 @@ static int dw_mci_exynos_resume(struct device *dev)
153static int dw_mci_exynos_resume_noirq(struct device *dev) 163static int dw_mci_exynos_resume_noirq(struct device *dev)
154{ 164{
155 struct dw_mci *host = dev_get_drvdata(dev); 165 struct dw_mci *host = dev_get_drvdata(dev);
166 struct dw_mci_exynos_priv_data *priv = host->priv;
156 u32 clksel; 167 u32 clksel;
157 168
158 clksel = mci_readl(host, CLKSEL); 169 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
159 if (clksel & SDMMC_CLKSEL_WAKEUP_INT) 170 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
160 mci_writel(host, CLKSEL, clksel); 171 clksel = mci_readl(host, CLKSEL64);
172 else
173 clksel = mci_readl(host, CLKSEL);
174
175 if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
176 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
177 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
178 mci_writel(host, CLKSEL64, clksel);
179 else
180 mci_writel(host, CLKSEL, clksel);
181 }
161 182
162 return 0; 183 return 0;
163} 184}
@@ -169,6 +190,7 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
169 190
170static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) 191static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
171{ 192{
193 struct dw_mci_exynos_priv_data *priv = host->priv;
172 /* 194 /*
173 * Exynos4412 and Exynos5250 extends the use of CMD register with the 195 * Exynos4412 and Exynos5250 extends the use of CMD register with the
174 * use of bit 29 (which is reserved on standard MSHC controllers) for 196 * use of bit 29 (which is reserved on standard MSHC controllers) for
@@ -176,8 +198,14 @@ static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
176 * HOLD register should be bypassed in case there is no phase shift 198 * HOLD register should be bypassed in case there is no phase shift
177 * applied on CMD/DATA that is sent to the card. 199 * applied on CMD/DATA that is sent to the card.
178 */ 200 */
179 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) 201 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
180 *cmdr |= SDMMC_CMD_USE_HOLD_REG; 202 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
203 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64)))
204 *cmdr |= SDMMC_CMD_USE_HOLD_REG;
205 } else {
206 if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
207 *cmdr |= SDMMC_CMD_USE_HOLD_REG;
208 }
181} 209}
182 210
183static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) 211static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
@@ -188,12 +216,20 @@ static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
188 u8 div = priv->ciu_div + 1; 216 u8 div = priv->ciu_div + 1;
189 217
190 if (ios->timing == MMC_TIMING_MMC_DDR52) { 218 if (ios->timing == MMC_TIMING_MMC_DDR52) {
191 mci_writel(host, CLKSEL, priv->ddr_timing); 219 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
220 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
221 mci_writel(host, CLKSEL64, priv->ddr_timing);
222 else
223 mci_writel(host, CLKSEL, priv->ddr_timing);
192 /* Should be double rate for DDR mode */ 224 /* Should be double rate for DDR mode */
193 if (ios->bus_width == MMC_BUS_WIDTH_8) 225 if (ios->bus_width == MMC_BUS_WIDTH_8)
194 wanted <<= 1; 226 wanted <<= 1;
195 } else { 227 } else {
196 mci_writel(host, CLKSEL, priv->sdr_timing); 228 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
229 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
230 mci_writel(host, CLKSEL64, priv->sdr_timing);
231 else
232 mci_writel(host, CLKSEL, priv->sdr_timing);
197 } 233 }
198 234
199 /* Don't care if wanted clock is zero */ 235 /* Don't care if wanted clock is zero */
@@ -265,26 +301,51 @@ static int dw_mci_exynos_parse_dt(struct dw_mci *host)
265 301
266static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) 302static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
267{ 303{
268 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); 304 struct dw_mci_exynos_priv_data *priv = host->priv;
305
306 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
307 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
308 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
309 else
310 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
269} 311}
270 312
271static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) 313static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
272{ 314{
273 u32 clksel; 315 u32 clksel;
274 clksel = mci_readl(host, CLKSEL); 316 struct dw_mci_exynos_priv_data *priv = host->priv;
317
318 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
319 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
320 clksel = mci_readl(host, CLKSEL64);
321 else
322 clksel = mci_readl(host, CLKSEL);
275 clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample); 323 clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample);
276 mci_writel(host, CLKSEL, clksel); 324 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
325 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
326 mci_writel(host, CLKSEL64, clksel);
327 else
328 mci_writel(host, CLKSEL, clksel);
277} 329}
278 330
279static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) 331static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
280{ 332{
333 struct dw_mci_exynos_priv_data *priv = host->priv;
281 u32 clksel; 334 u32 clksel;
282 u8 sample; 335 u8 sample;
283 336
284 clksel = mci_readl(host, CLKSEL); 337 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
338 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
339 clksel = mci_readl(host, CLKSEL64);
340 else
341 clksel = mci_readl(host, CLKSEL);
285 sample = (clksel + 1) & 0x7; 342 sample = (clksel + 1) & 0x7;
286 clksel = (clksel & ~0x7) | sample; 343 clksel = (clksel & ~0x7) | sample;
287 mci_writel(host, CLKSEL, clksel); 344 if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
345 priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
346 mci_writel(host, CLKSEL64, clksel);
347 else
348 mci_writel(host, CLKSEL, clksel);
288 return sample; 349 return sample;
289} 350}
290 351
@@ -411,6 +472,10 @@ static const struct of_device_id dw_mci_exynos_match[] = {
411 .data = &exynos_drv_data, }, 472 .data = &exynos_drv_data, },
412 { .compatible = "samsung,exynos5420-dw-mshc-smu", 473 { .compatible = "samsung,exynos5420-dw-mshc-smu",
413 .data = &exynos_drv_data, }, 474 .data = &exynos_drv_data, },
475 { .compatible = "samsung,exynos7-dw-mshc",
476 .data = &exynos_drv_data, },
477 { .compatible = "samsung,exynos7-dw-mshc-smu",
478 .data = &exynos_drv_data, },
414 {}, 479 {},
415}; 480};
416MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); 481MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 8b6572162ed9..ec6dbcdec693 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -35,6 +35,10 @@ static const struct dw_mci_drv_data socfpga_drv_data = {
35 .prepare_command = dw_mci_pltfm_prepare_command, 35 .prepare_command = dw_mci_pltfm_prepare_command,
36}; 36};
37 37
38static const struct dw_mci_drv_data pistachio_drv_data = {
39 .prepare_command = dw_mci_pltfm_prepare_command,
40};
41
38int dw_mci_pltfm_register(struct platform_device *pdev, 42int dw_mci_pltfm_register(struct platform_device *pdev,
39 const struct dw_mci_drv_data *drv_data) 43 const struct dw_mci_drv_data *drv_data)
40{ 44{
@@ -90,6 +94,8 @@ static const struct of_device_id dw_mci_pltfm_match[] = {
90 { .compatible = "snps,dw-mshc", }, 94 { .compatible = "snps,dw-mshc", },
91 { .compatible = "altr,socfpga-dw-mshc", 95 { .compatible = "altr,socfpga-dw-mshc",
92 .data = &socfpga_drv_data }, 96 .data = &socfpga_drv_data },
97 { .compatible = "img,pistachio-dw-mshc",
98 .data = &pistachio_drv_data },
93 {}, 99 {},
94}; 100};
95MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); 101MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index f0c2cb1a210d..5650ac488cf3 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -37,6 +37,9 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
37 unsigned int cclkin; 37 unsigned int cclkin;
38 u32 bus_hz; 38 u32 bus_hz;
39 39
40 if (ios->clock == 0)
41 return;
42
40 /* 43 /*
41 * cclkin: source clock of mmc controller 44 * cclkin: source clock of mmc controller
42 * bus_hz: card interface clock generated by CLKGEN 45 * bus_hz: card interface clock generated by CLKGEN
@@ -65,14 +68,24 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
65 } 68 }
66} 69}
67 70
71static int dw_mci_rockchip_init(struct dw_mci *host)
72{
73 /* It is slot 8 on Rockchip SoCs */
74 host->sdio_id0 = 8;
75
76 return 0;
77}
78
68static const struct dw_mci_drv_data rk2928_drv_data = { 79static const struct dw_mci_drv_data rk2928_drv_data = {
69 .prepare_command = dw_mci_rockchip_prepare_command, 80 .prepare_command = dw_mci_rockchip_prepare_command,
81 .init = dw_mci_rockchip_init,
70}; 82};
71 83
72static const struct dw_mci_drv_data rk3288_drv_data = { 84static const struct dw_mci_drv_data rk3288_drv_data = {
73 .prepare_command = dw_mci_rockchip_prepare_command, 85 .prepare_command = dw_mci_rockchip_prepare_command,
74 .set_ios = dw_mci_rk3288_set_ios, 86 .set_ios = dw_mci_rk3288_set_ios,
75 .setup_clock = dw_mci_rk3288_setup_clock, 87 .setup_clock = dw_mci_rk3288_setup_clock,
88 .init = dw_mci_rockchip_init,
76}; 89};
77 90
78static const struct of_device_id dw_mci_rockchip_match[] = { 91static const struct of_device_id dw_mci_rockchip_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 69f0cc68d5b2..67c04518ec4c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -34,7 +34,6 @@
34#include <linux/mmc/dw_mmc.h> 34#include <linux/mmc/dw_mmc.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36#include <linux/regulator/consumer.h> 36#include <linux/regulator/consumer.h>
37#include <linux/workqueue.h>
38#include <linux/of.h> 37#include <linux/of.h>
39#include <linux/of_gpio.h> 38#include <linux/of_gpio.h>
40#include <linux/mmc/slot-gpio.h> 39#include <linux/mmc/slot-gpio.h>
@@ -62,6 +61,24 @@
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63 SDMMC_IDMAC_INT_TI) 62 SDMMC_IDMAC_INT_TI)
64 63
64struct idmac_desc_64addr {
65 u32 des0; /* Control Descriptor */
66
67 u32 des1; /* Reserved */
68
69 u32 des2; /*Buffer sizes */
70#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
71 ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
72
73 u32 des3; /* Reserved */
74
75 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
76 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
77
78 u32 des6; /* Lower 32-bits of Next Descriptor Address */
79 u32 des7; /* Upper 32-bits of Next Descriptor Address */
80};
81
65struct idmac_desc { 82struct idmac_desc {
66 u32 des0; /* Control Descriptor */ 83 u32 des0; /* Control Descriptor */
67#define IDMAC_DES0_DIC BIT(1) 84#define IDMAC_DES0_DIC BIT(1)
@@ -83,6 +100,7 @@ struct idmac_desc {
83#endif /* CONFIG_MMC_DW_IDMAC */ 100#endif /* CONFIG_MMC_DW_IDMAC */
84 101
85static bool dw_mci_reset(struct dw_mci *host); 102static bool dw_mci_reset(struct dw_mci *host);
103static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
86 104
87#if defined(CONFIG_DEBUG_FS) 105#if defined(CONFIG_DEBUG_FS)
88static int dw_mci_req_show(struct seq_file *s, void *v) 106static int dw_mci_req_show(struct seq_file *s, void *v)
@@ -414,30 +432,66 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
414 unsigned int sg_len) 432 unsigned int sg_len)
415{ 433{
416 int i; 434 int i;
417 struct idmac_desc *desc = host->sg_cpu; 435 if (host->dma_64bit_address == 1) {
436 struct idmac_desc_64addr *desc = host->sg_cpu;
418 437
419 for (i = 0; i < sg_len; i++, desc++) { 438 for (i = 0; i < sg_len; i++, desc++) {
420 unsigned int length = sg_dma_len(&data->sg[i]); 439 unsigned int length = sg_dma_len(&data->sg[i]);
421 u32 mem_addr = sg_dma_address(&data->sg[i]); 440 u64 mem_addr = sg_dma_address(&data->sg[i]);
422 441
423 /* Set the OWN bit and disable interrupts for this descriptor */ 442 /*
424 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 443 * Set the OWN bit and disable interrupts for this
444 * descriptor
445 */
446 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
447 IDMAC_DES0_CH;
448 /* Buffer length */
449 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
450
451 /* Physical address to DMA to/from */
452 desc->des4 = mem_addr & 0xffffffff;
453 desc->des5 = mem_addr >> 32;
454 }
425 455
426 /* Buffer length */ 456 /* Set first descriptor */
427 IDMAC_SET_BUFFER1_SIZE(desc, length); 457 desc = host->sg_cpu;
458 desc->des0 |= IDMAC_DES0_FD;
428 459
429 /* Physical address to DMA to/from */ 460 /* Set last descriptor */
430 desc->des2 = mem_addr; 461 desc = host->sg_cpu + (i - 1) *
431 } 462 sizeof(struct idmac_desc_64addr);
463 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
464 desc->des0 |= IDMAC_DES0_LD;
432 465
433 /* Set first descriptor */ 466 } else {
434 desc = host->sg_cpu; 467 struct idmac_desc *desc = host->sg_cpu;
435 desc->des0 |= IDMAC_DES0_FD; 468
469 for (i = 0; i < sg_len; i++, desc++) {
470 unsigned int length = sg_dma_len(&data->sg[i]);
471 u32 mem_addr = sg_dma_address(&data->sg[i]);
472
473 /*
474 * Set the OWN bit and disable interrupts for this
475 * descriptor
476 */
477 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
478 IDMAC_DES0_CH;
479 /* Buffer length */
480 IDMAC_SET_BUFFER1_SIZE(desc, length);
436 481
437 /* Set last descriptor */ 482 /* Physical address to DMA to/from */
438 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 483 desc->des2 = mem_addr;
439 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 484 }
440 desc->des0 |= IDMAC_DES0_LD; 485
486 /* Set first descriptor */
487 desc = host->sg_cpu;
488 desc->des0 |= IDMAC_DES0_FD;
489
490 /* Set last descriptor */
491 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
492 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
493 desc->des0 |= IDMAC_DES0_LD;
494 }
441 495
442 wmb(); 496 wmb();
443} 497}
@@ -448,6 +502,10 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
448 502
449 dw_mci_translate_sglist(host, host->data, sg_len); 503 dw_mci_translate_sglist(host, host->data, sg_len);
450 504
505 /* Make sure to reset DMA in case we did PIO before this */
506 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
507 dw_mci_idmac_reset(host);
508
451 /* Select IDMAC interface */ 509 /* Select IDMAC interface */
452 temp = mci_readl(host, CTRL); 510 temp = mci_readl(host, CTRL);
453 temp |= SDMMC_CTRL_USE_IDMAC; 511 temp |= SDMMC_CTRL_USE_IDMAC;
@@ -466,29 +524,71 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
466 524
467static int dw_mci_idmac_init(struct dw_mci *host) 525static int dw_mci_idmac_init(struct dw_mci *host)
468{ 526{
469 struct idmac_desc *p;
470 int i; 527 int i;
471 528
472 /* Number of descriptors in the ring buffer */ 529 if (host->dma_64bit_address == 1) {
473 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 530 struct idmac_desc_64addr *p;
531 /* Number of descriptors in the ring buffer */
532 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
533
534 /* Forward link the descriptor list */
535 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
536 i++, p++) {
537 p->des6 = (host->sg_dma +
538 (sizeof(struct idmac_desc_64addr) *
539 (i + 1))) & 0xffffffff;
540
541 p->des7 = (u64)(host->sg_dma +
542 (sizeof(struct idmac_desc_64addr) *
543 (i + 1))) >> 32;
544 /* Initialize reserved and buffer size fields to "0" */
545 p->des1 = 0;
546 p->des2 = 0;
547 p->des3 = 0;
548 }
549
550 /* Set the last descriptor as the end-of-ring descriptor */
551 p->des6 = host->sg_dma & 0xffffffff;
552 p->des7 = (u64)host->sg_dma >> 32;
553 p->des0 = IDMAC_DES0_ER;
554
555 } else {
556 struct idmac_desc *p;
557 /* Number of descriptors in the ring buffer */
558 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
474 559
475 /* Forward link the descriptor list */ 560 /* Forward link the descriptor list */
476 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 561 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
477 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 562 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
563 (i + 1));
478 564
479 /* Set the last descriptor as the end-of-ring descriptor */ 565 /* Set the last descriptor as the end-of-ring descriptor */
480 p->des3 = host->sg_dma; 566 p->des3 = host->sg_dma;
481 p->des0 = IDMAC_DES0_ER; 567 p->des0 = IDMAC_DES0_ER;
568 }
482 569
483 dw_mci_idmac_reset(host); 570 dw_mci_idmac_reset(host);
484 571
485 /* Mask out interrupts - get Tx & Rx complete only */ 572 if (host->dma_64bit_address == 1) {
486 mci_writel(host, IDSTS, IDMAC_INT_CLR); 573 /* Mask out interrupts - get Tx & Rx complete only */
487 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 574 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
488 SDMMC_IDMAC_INT_TI); 575 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
576 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
577
578 /* Set the descriptor base address */
579 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
580 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
581
582 } else {
583 /* Mask out interrupts - get Tx & Rx complete only */
584 mci_writel(host, IDSTS, IDMAC_INT_CLR);
585 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
586 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
587
588 /* Set the descriptor base address */
589 mci_writel(host, DBADDR, host->sg_dma);
590 }
489 591
490 /* Set the descriptor base address */
491 mci_writel(host, DBADDR, host->sg_dma);
492 return 0; 592 return 0;
493} 593}
494 594
@@ -626,6 +726,13 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
626 726
627 WARN_ON(!(data->flags & MMC_DATA_READ)); 727 WARN_ON(!(data->flags & MMC_DATA_READ));
628 728
729 /*
730 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
731 * in the FIFO region, so we really shouldn't access it).
732 */
733 if (host->verid < DW_MMC_240A)
734 return;
735
629 if (host->timing != MMC_TIMING_MMC_HS200 && 736 if (host->timing != MMC_TIMING_MMC_HS200 &&
630 host->timing != MMC_TIMING_UHS_SDR104) 737 host->timing != MMC_TIMING_UHS_SDR104)
631 goto disable; 738 goto disable;
@@ -819,7 +926,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
819 926
820 /* enable clock; only low power if no SDIO */ 927 /* enable clock; only low power if no SDIO */
821 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 928 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
822 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) 929 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->sdio_id)))
823 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 930 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
824 mci_writel(host, CLKENA, clk_en_a); 931 mci_writel(host, CLKENA, clk_en_a);
825 932
@@ -1075,7 +1182,7 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1075 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); 1182 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1076 1183
1077 if (ret) { 1184 if (ret) {
1078 dev_err(&mmc->class_dev, 1185 dev_dbg(&mmc->class_dev,
1079 "Regulator set error %d: %d - %d\n", 1186 "Regulator set error %d: %d - %d\n",
1080 ret, min_uv, max_uv); 1187 ret, min_uv, max_uv);
1081 return ret; 1188 return ret;
@@ -1180,10 +1287,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1180 dw_mci_disable_low_power(slot); 1287 dw_mci_disable_low_power(slot);
1181 1288
1182 mci_writel(host, INTMASK, 1289 mci_writel(host, INTMASK,
1183 (int_mask | SDMMC_INT_SDIO(slot->id))); 1290 (int_mask | SDMMC_INT_SDIO(slot->sdio_id)));
1184 } else { 1291 } else {
1185 mci_writel(host, INTMASK, 1292 mci_writel(host, INTMASK,
1186 (int_mask & ~SDMMC_INT_SDIO(slot->id))); 1293 (int_mask & ~SDMMC_INT_SDIO(slot->sdio_id)));
1187 } 1294 }
1188} 1295}
1189 1296
@@ -1954,6 +2061,23 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1954 tasklet_schedule(&host->tasklet); 2061 tasklet_schedule(&host->tasklet);
1955} 2062}
1956 2063
2064static void dw_mci_handle_cd(struct dw_mci *host)
2065{
2066 int i;
2067
2068 for (i = 0; i < host->num_slots; i++) {
2069 struct dw_mci_slot *slot = host->slot[i];
2070
2071 if (!slot)
2072 continue;
2073
2074 if (slot->mmc->ops->card_event)
2075 slot->mmc->ops->card_event(slot->mmc);
2076 mmc_detect_change(slot->mmc,
2077 msecs_to_jiffies(host->pdata->detect_delay_ms));
2078 }
2079}
2080
1957static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2081static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1958{ 2082{
1959 struct dw_mci *host = dev_id; 2083 struct dw_mci *host = dev_id;
@@ -2029,14 +2153,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2029 2153
2030 if (pending & SDMMC_INT_CD) { 2154 if (pending & SDMMC_INT_CD) {
2031 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2155 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2032 queue_work(host->card_workqueue, &host->card_work); 2156 dw_mci_handle_cd(host);
2033 } 2157 }
2034 2158
2035 /* Handle SDIO Interrupts */ 2159 /* Handle SDIO Interrupts */
2036 for (i = 0; i < host->num_slots; i++) { 2160 for (i = 0; i < host->num_slots; i++) {
2037 struct dw_mci_slot *slot = host->slot[i]; 2161 struct dw_mci_slot *slot = host->slot[i];
2038 if (pending & SDMMC_INT_SDIO(i)) { 2162 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2039 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 2163 mci_writel(host, RINTSTS,
2164 SDMMC_INT_SDIO(slot->sdio_id));
2040 mmc_signal_sdio_irq(slot->mmc); 2165 mmc_signal_sdio_irq(slot->mmc);
2041 } 2166 }
2042 } 2167 }
@@ -2045,99 +2170,28 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2045 2170
2046#ifdef CONFIG_MMC_DW_IDMAC 2171#ifdef CONFIG_MMC_DW_IDMAC
2047 /* Handle DMA interrupts */ 2172 /* Handle DMA interrupts */
2048 pending = mci_readl(host, IDSTS); 2173 if (host->dma_64bit_address == 1) {
2049 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2174 pending = mci_readl(host, IDSTS64);
2050 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 2175 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2051 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2176 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2052 host->dma_ops->complete(host); 2177 SDMMC_IDMAC_INT_RI);
2178 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2179 host->dma_ops->complete(host);
2180 }
2181 } else {
2182 pending = mci_readl(host, IDSTS);
2183 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2184 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2185 SDMMC_IDMAC_INT_RI);
2186 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2187 host->dma_ops->complete(host);
2188 }
2053 } 2189 }
2054#endif 2190#endif
2055 2191
2056 return IRQ_HANDLED; 2192 return IRQ_HANDLED;
2057} 2193}
2058 2194
2059static void dw_mci_work_routine_card(struct work_struct *work)
2060{
2061 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2062 int i;
2063
2064 for (i = 0; i < host->num_slots; i++) {
2065 struct dw_mci_slot *slot = host->slot[i];
2066 struct mmc_host *mmc = slot->mmc;
2067 struct mmc_request *mrq;
2068 int present;
2069
2070 present = dw_mci_get_cd(mmc);
2071 while (present != slot->last_detect_state) {
2072 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2073 present ? "inserted" : "removed");
2074
2075 spin_lock_bh(&host->lock);
2076
2077 /* Card change detected */
2078 slot->last_detect_state = present;
2079
2080 /* Clean up queue if present */
2081 mrq = slot->mrq;
2082 if (mrq) {
2083 if (mrq == host->mrq) {
2084 host->data = NULL;
2085 host->cmd = NULL;
2086
2087 switch (host->state) {
2088 case STATE_IDLE:
2089 case STATE_WAITING_CMD11_DONE:
2090 break;
2091 case STATE_SENDING_CMD11:
2092 case STATE_SENDING_CMD:
2093 mrq->cmd->error = -ENOMEDIUM;
2094 if (!mrq->data)
2095 break;
2096 /* fall through */
2097 case STATE_SENDING_DATA:
2098 mrq->data->error = -ENOMEDIUM;
2099 dw_mci_stop_dma(host);
2100 break;
2101 case STATE_DATA_BUSY:
2102 case STATE_DATA_ERROR:
2103 if (mrq->data->error == -EINPROGRESS)
2104 mrq->data->error = -ENOMEDIUM;
2105 /* fall through */
2106 case STATE_SENDING_STOP:
2107 if (mrq->stop)
2108 mrq->stop->error = -ENOMEDIUM;
2109 break;
2110 }
2111
2112 dw_mci_request_end(host, mrq);
2113 } else {
2114 list_del(&slot->queue_node);
2115 mrq->cmd->error = -ENOMEDIUM;
2116 if (mrq->data)
2117 mrq->data->error = -ENOMEDIUM;
2118 if (mrq->stop)
2119 mrq->stop->error = -ENOMEDIUM;
2120
2121 spin_unlock(&host->lock);
2122 mmc_request_done(slot->mmc, mrq);
2123 spin_lock(&host->lock);
2124 }
2125 }
2126
2127 /* Power down slot */
2128 if (present == 0)
2129 dw_mci_reset(host);
2130
2131 spin_unlock_bh(&host->lock);
2132
2133 present = dw_mci_get_cd(mmc);
2134 }
2135
2136 mmc_detect_change(slot->mmc,
2137 msecs_to_jiffies(host->pdata->detect_delay_ms));
2138 }
2139}
2140
2141#ifdef CONFIG_OF 2195#ifdef CONFIG_OF
2142/* given a slot id, find out the device node representing that slot */ 2196/* given a slot id, find out the device node representing that slot */
2143static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 2197static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
@@ -2206,6 +2260,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2206 2260
2207 slot = mmc_priv(mmc); 2261 slot = mmc_priv(mmc);
2208 slot->id = id; 2262 slot->id = id;
2263 slot->sdio_id = host->sdio_id0 + id;
2209 slot->mmc = mmc; 2264 slot->mmc = mmc;
2210 slot->host = host; 2265 slot->host = host;
2211 host->slot[id] = slot; 2266 host->slot[id] = slot;
@@ -2289,9 +2344,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2289 dw_mci_init_debugfs(slot); 2344 dw_mci_init_debugfs(slot);
2290#endif 2345#endif
2291 2346
2292 /* Card initially undetected */
2293 slot->last_detect_state = 0;
2294
2295 return 0; 2347 return 0;
2296 2348
2297err_host_allocated: 2349err_host_allocated:
@@ -2309,6 +2361,22 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2309 2361
2310static void dw_mci_init_dma(struct dw_mci *host) 2362static void dw_mci_init_dma(struct dw_mci *host)
2311{ 2363{
2364 int addr_config;
2365 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2366 addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2367
2368 if (addr_config == 1) {
2369 /* host supports IDMAC in 64-bit address mode */
2370 host->dma_64bit_address = 1;
2371 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2372 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2373 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2374 } else {
2375 /* host supports IDMAC in 32-bit address mode */
2376 host->dma_64bit_address = 0;
2377 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2378 }
2379
2312 /* Alloc memory for sg translation */ 2380 /* Alloc memory for sg translation */
2313 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2381 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2314 &host->sg_dma, GFP_KERNEL); 2382 &host->sg_dma, GFP_KERNEL);
@@ -2672,17 +2740,10 @@ int dw_mci_probe(struct dw_mci *host)
2672 host->data_offset = DATA_240A_OFFSET; 2740 host->data_offset = DATA_240A_OFFSET;
2673 2741
2674 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2742 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2675 host->card_workqueue = alloc_workqueue("dw-mci-card",
2676 WQ_MEM_RECLAIM, 1);
2677 if (!host->card_workqueue) {
2678 ret = -ENOMEM;
2679 goto err_dmaunmap;
2680 }
2681 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2682 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 2743 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2683 host->irq_flags, "dw-mci", host); 2744 host->irq_flags, "dw-mci", host);
2684 if (ret) 2745 if (ret)
2685 goto err_workqueue; 2746 goto err_dmaunmap;
2686 2747
2687 if (host->pdata->num_slots) 2748 if (host->pdata->num_slots)
2688 host->num_slots = host->pdata->num_slots; 2749 host->num_slots = host->pdata->num_slots;
@@ -2718,7 +2779,7 @@ int dw_mci_probe(struct dw_mci *host)
2718 } else { 2779 } else {
2719 dev_dbg(host->dev, "attempted to initialize %d slots, " 2780 dev_dbg(host->dev, "attempted to initialize %d slots, "
2720 "but failed on all\n", host->num_slots); 2781 "but failed on all\n", host->num_slots);
2721 goto err_workqueue; 2782 goto err_dmaunmap;
2722 } 2783 }
2723 2784
2724 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2785 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
@@ -2726,9 +2787,6 @@ int dw_mci_probe(struct dw_mci *host)
2726 2787
2727 return 0; 2788 return 0;
2728 2789
2729err_workqueue:
2730 destroy_workqueue(host->card_workqueue);
2731
2732err_dmaunmap: 2790err_dmaunmap:
2733 if (host->use_dma && host->dma_ops->exit) 2791 if (host->use_dma && host->dma_ops->exit)
2734 host->dma_ops->exit(host); 2792 host->dma_ops->exit(host);
@@ -2762,8 +2820,6 @@ void dw_mci_remove(struct dw_mci *host)
2762 mci_writel(host, CLKENA, 0); 2820 mci_writel(host, CLKENA, 0);
2763 mci_writel(host, CLKSRC, 0); 2821 mci_writel(host, CLKSRC, 0);
2764 2822
2765 destroy_workqueue(host->card_workqueue);
2766
2767 if (host->use_dma && host->dma_ops->exit) 2823 if (host->use_dma && host->dma_ops->exit)
2768 host->dma_ops->exit(host); 2824 host->dma_ops->exit(host);
2769 2825
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 01b99e8a9190..0d0f7a271d63 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -55,6 +55,17 @@
55#define SDMMC_BUFADDR 0x098 55#define SDMMC_BUFADDR 0x098
56#define SDMMC_CDTHRCTL 0x100 56#define SDMMC_CDTHRCTL 0x100
57#define SDMMC_DATA(x) (x) 57#define SDMMC_DATA(x) (x)
58/*
59* Registers to support idmac 64-bit address mode
60*/
61#define SDMMC_DBADDRL 0x088
62#define SDMMC_DBADDRU 0x08c
63#define SDMMC_IDSTS64 0x090
64#define SDMMC_IDINTEN64 0x094
65#define SDMMC_DSCADDRL 0x098
66#define SDMMC_DSCADDRU 0x09c
67#define SDMMC_BUFADDRL 0x0A0
68#define SDMMC_BUFADDRU 0x0A4
58 69
59/* 70/*
60 * Data offset is difference according to Version 71 * Data offset is difference according to Version
@@ -214,7 +225,7 @@ extern int dw_mci_resume(struct dw_mci *host);
214 * with CONFIG_MMC_CLKGATE. 225 * with CONFIG_MMC_CLKGATE.
215 * @flags: Random state bits associated with the slot. 226 * @flags: Random state bits associated with the slot.
216 * @id: Number of this slot. 227 * @id: Number of this slot.
217 * @last_detect_state: Most recently observed card detect state. 228 * @sdio_id: Number of this slot in the SDIO interrupt registers.
218 */ 229 */
219struct dw_mci_slot { 230struct dw_mci_slot {
220 struct mmc_host *mmc; 231 struct mmc_host *mmc;
@@ -234,7 +245,7 @@ struct dw_mci_slot {
234#define DW_MMC_CARD_PRESENT 0 245#define DW_MMC_CARD_PRESENT 0
235#define DW_MMC_CARD_NEED_INIT 1 246#define DW_MMC_CARD_NEED_INIT 1
236 int id; 247 int id;
237 int last_detect_state; 248 int sdio_id;
238}; 249};
239 250
240struct dw_mci_tuning_data { 251struct dw_mci_tuning_data {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 43af791e2e45..8232e9a02d40 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -736,8 +736,15 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
736 chan = host->dma_tx_channel; 736 chan = host->dma_tx_channel;
737 dmaengine_terminate_all(chan); 737 dmaengine_terminate_all(chan);
738 738
739 if (host->dma_desc_current == next->dma_desc)
740 host->dma_desc_current = NULL;
741
742 if (host->dma_current == next->dma_chan)
743 host->dma_current = NULL;
744
739 next->dma_desc = NULL; 745 next->dma_desc = NULL;
740 next->dma_chan = NULL; 746 next->dma_chan = NULL;
747 data->host_cookie = 0;
741 } 748 }
742} 749}
743 750
@@ -1843,7 +1850,7 @@ static int mmci_runtime_resume(struct device *dev)
1843static const struct dev_pm_ops mmci_dev_pm_ops = { 1850static const struct dev_pm_ops mmci_dev_pm_ops = {
1844 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1851 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1845 pm_runtime_force_resume) 1852 pm_runtime_force_resume)
1846 SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1853 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1847}; 1854};
1848 1855
1849static struct amba_id mmci_ids[] = { 1856static struct amba_id mmci_ids[] = {
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 9405ecdaf6cf..90c60fd4ff6e 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1360,7 +1360,7 @@ msmsdcc_probe(struct platform_device *pdev)
1360 if (ret) 1360 if (ret)
1361 goto cmd_irq_free; 1361 goto cmd_irq_free;
1362 1362
1363 mmc_set_drvdata(pdev, mmc); 1363 platform_set_drvdata(pdev, mmc);
1364 mmc_add_host(mmc); 1364 mmc_add_host(mmc);
1365 1365
1366 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n", 1366 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
@@ -1419,7 +1419,7 @@ ioremap_free:
1419static int 1419static int
1420msmsdcc_suspend(struct platform_device *dev, pm_message_t state) 1420msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1421{ 1421{
1422 struct mmc_host *mmc = mmc_get_drvdata(dev); 1422 struct mmc_host *mmc = platform_get_drvdata(dev);
1423 1423
1424 if (mmc) { 1424 if (mmc) {
1425 struct msmsdcc_host *host = mmc_priv(mmc); 1425 struct msmsdcc_host *host = mmc_priv(mmc);
@@ -1437,7 +1437,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1437static int 1437static int
1438msmsdcc_resume(struct platform_device *dev) 1438msmsdcc_resume(struct platform_device *dev)
1439{ 1439{
1440 struct mmc_host *mmc = mmc_get_drvdata(dev); 1440 struct mmc_host *mmc = platform_get_drvdata(dev);
1441 1441
1442 if (mmc) { 1442 if (mmc) {
1443 struct msmsdcc_host *host = mmc_priv(mmc); 1443 struct msmsdcc_host *host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 6b4c5ad3b393..4f8618f4522d 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -111,10 +111,15 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
111 mvsd_write(MVSD_BLK_COUNT, data->blocks); 111 mvsd_write(MVSD_BLK_COUNT, data->blocks);
112 mvsd_write(MVSD_BLK_SIZE, data->blksz); 112 mvsd_write(MVSD_BLK_SIZE, data->blksz);
113 113
114 if (nodma || (data->blksz | data->sg->offset) & 3) { 114 if (nodma || (data->blksz | data->sg->offset) & 3 ||
115 ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) {
115 /* 116 /*
116 * We cannot do DMA on a buffer which offset or size 117 * We cannot do DMA on a buffer which offset or size
117 * is not aligned on a 4-byte boundary. 118 * is not aligned on a 4-byte boundary.
119 *
120 * It also appears the host to card DMA can corrupt
121 * data when the buffer is not aligned on a 64 byte
122 * boundary.
118 */ 123 */
119 host->pio_size = data->blocks * data->blksz; 124 host->pio_size = data->blocks * data->blksz;
120 host->pio_ptr = sg_virt(data->sg); 125 host->pio_ptr = sg_virt(data->sg);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index ad111422ad55..5316d9b9e7b4 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -373,13 +373,9 @@ static void mxcmci_dma_callback(void *data)
373 del_timer(&host->watchdog); 373 del_timer(&host->watchdog);
374 374
375 stat = mxcmci_readl(host, MMC_REG_STATUS); 375 stat = mxcmci_readl(host, MMC_REG_STATUS);
376 mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS);
377 376
378 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); 377 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
379 378
380 if (stat & STATUS_READ_OP_DONE)
381 mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS);
382
383 mxcmci_data_done(host, stat); 379 mxcmci_data_done(host, stat);
384} 380}
385 381
@@ -743,10 +739,8 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
743 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; 739 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
744 spin_unlock_irqrestore(&host->lock, flags); 740 spin_unlock_irqrestore(&host->lock, flags);
745 741
746 if (mxcmci_use_dma(host) && 742 if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
747 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) 743 mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
748 mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
749 MMC_REG_STATUS);
750 744
751 if (sdio_irq) { 745 if (sdio_irq) {
752 mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS); 746 mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
@@ -756,8 +750,7 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
756 if (stat & STATUS_END_CMD_RESP) 750 if (stat & STATUS_END_CMD_RESP)
757 mxcmci_cmd_done(host, stat); 751 mxcmci_cmd_done(host, stat);
758 752
759 if (mxcmci_use_dma(host) && 753 if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
760 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) {
761 del_timer(&host->watchdog); 754 del_timer(&host->watchdog);
762 mxcmci_data_done(host, stat); 755 mxcmci_data_done(host, stat);
763 } 756 }
@@ -1084,12 +1077,14 @@ static int mxcmci_probe(struct platform_device *pdev)
1084 dat3_card_detect = true; 1077 dat3_card_detect = true;
1085 1078
1086 ret = mmc_regulator_get_supply(mmc); 1079 ret = mmc_regulator_get_supply(mmc);
1087 if (ret) { 1080 if (ret == -EPROBE_DEFER)
1088 if (pdata && ret != -EPROBE_DEFER) 1081 goto out_free;
1089 mmc->ocr_avail = pdata->ocr_avail ? : 1082
1090 MMC_VDD_32_33 | MMC_VDD_33_34; 1083 if (!mmc->ocr_avail) {
1084 if (pdata && pdata->ocr_avail)
1085 mmc->ocr_avail = pdata->ocr_avail;
1091 else 1086 else
1092 goto out_free; 1087 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1093 } 1088 }
1094 1089
1095 if (dat3_card_detect) 1090 if (dat3_card_detect)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index cd74e5143c36..60c4ca97a727 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -581,10 +581,9 @@ static int mxs_mmc_probe(struct platform_device *pdev)
581 struct regulator *reg_vmmc; 581 struct regulator *reg_vmmc;
582 struct mxs_ssp *ssp; 582 struct mxs_ssp *ssp;
583 583
584 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
585 irq_err = platform_get_irq(pdev, 0); 584 irq_err = platform_get_irq(pdev, 0);
586 if (!iores || irq_err < 0) 585 if (irq_err < 0)
587 return -EINVAL; 586 return irq_err;
588 587
589 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); 588 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
590 if (!mmc) 589 if (!mmc)
@@ -593,6 +592,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
593 host = mmc_priv(mmc); 592 host = mmc_priv(mmc);
594 ssp = &host->ssp; 593 ssp = &host->ssp;
595 ssp->dev = &pdev->dev; 594 ssp->dev = &pdev->dev;
595 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
596 ssp->base = devm_ioremap_resource(&pdev->dev, iores); 596 ssp->base = devm_ioremap_resource(&pdev->dev, iores);
597 if (IS_ERR(ssp->base)) { 597 if (IS_ERR(ssp->base)) {
598 ret = PTR_ERR(ssp->base); 598 ret = PTR_ERR(ssp->base);
@@ -619,7 +619,9 @@ static int mxs_mmc_probe(struct platform_device *pdev)
619 ret = PTR_ERR(ssp->clk); 619 ret = PTR_ERR(ssp->clk);
620 goto out_mmc_free; 620 goto out_mmc_free;
621 } 621 }
622 clk_prepare_enable(ssp->clk); 622 ret = clk_prepare_enable(ssp->clk);
623 if (ret)
624 goto out_mmc_free;
623 625
624 ret = mxs_mmc_reset(host); 626 ret = mxs_mmc_reset(host);
625 if (ret) { 627 if (ret) {
@@ -660,7 +662,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
660 platform_set_drvdata(pdev, mmc); 662 platform_set_drvdata(pdev, mmc);
661 663
662 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, 664 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
663 DRIVER_NAME, host); 665 dev_name(&pdev->dev), host);
664 if (ret) 666 if (ret)
665 goto out_free_dma; 667 goto out_free_dma;
666 668
@@ -702,7 +704,7 @@ static int mxs_mmc_remove(struct platform_device *pdev)
702 return 0; 704 return 0;
703} 705}
704 706
705#ifdef CONFIG_PM 707#ifdef CONFIG_PM_SLEEP
706static int mxs_mmc_suspend(struct device *dev) 708static int mxs_mmc_suspend(struct device *dev)
707{ 709{
708 struct mmc_host *mmc = dev_get_drvdata(dev); 710 struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -719,25 +721,19 @@ static int mxs_mmc_resume(struct device *dev)
719 struct mxs_mmc_host *host = mmc_priv(mmc); 721 struct mxs_mmc_host *host = mmc_priv(mmc);
720 struct mxs_ssp *ssp = &host->ssp; 722 struct mxs_ssp *ssp = &host->ssp;
721 723
722 clk_prepare_enable(ssp->clk); 724 return clk_prepare_enable(ssp->clk);
723 return 0;
724} 725}
725
726static const struct dev_pm_ops mxs_mmc_pm_ops = {
727 .suspend = mxs_mmc_suspend,
728 .resume = mxs_mmc_resume,
729};
730#endif 726#endif
731 727
728static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
729
732static struct platform_driver mxs_mmc_driver = { 730static struct platform_driver mxs_mmc_driver = {
733 .probe = mxs_mmc_probe, 731 .probe = mxs_mmc_probe,
734 .remove = mxs_mmc_remove, 732 .remove = mxs_mmc_remove,
735 .id_table = mxs_ssp_ids, 733 .id_table = mxs_ssp_ids,
736 .driver = { 734 .driver = {
737 .name = DRIVER_NAME, 735 .name = DRIVER_NAME,
738#ifdef CONFIG_PM
739 .pm = &mxs_mmc_pm_ops, 736 .pm = &mxs_mmc_pm_ops,
740#endif
741 .of_match_table = mxs_mmc_dt_ids, 737 .of_match_table = mxs_mmc_dt_ids,
742 }, 738 },
743}; 739};
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index df27bb4fc098..7c71dcdcba8b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -42,7 +42,7 @@
42#include <linux/regulator/consumer.h> 42#include <linux/regulator/consumer.h>
43#include <linux/pinctrl/consumer.h> 43#include <linux/pinctrl/consumer.h>
44#include <linux/pm_runtime.h> 44#include <linux/pm_runtime.h>
45#include <linux/platform_data/mmc-omap.h> 45#include <linux/platform_data/hsmmc-omap.h>
46 46
47/* OMAP HSMMC Host Controller Registers */ 47/* OMAP HSMMC Host Controller Registers */
48#define OMAP_HSMMC_SYSSTATUS 0x0014 48#define OMAP_HSMMC_SYSSTATUS 0x0014
@@ -155,7 +155,7 @@
155 * omap.c controller driver. Luckily this is not currently done on any known 155 * omap.c controller driver. Luckily this is not currently done on any known
156 * omap_hsmmc.c device. 156 * omap_hsmmc.c device.
157 */ 157 */
158#define mmc_slot(host) (host->pdata->slots[host->slot_id]) 158#define mmc_pdata(host) host->pdata
159 159
160/* 160/*
161 * MMC Host controller read/write API's 161 * MMC Host controller read/write API's
@@ -207,7 +207,6 @@ struct omap_hsmmc_host {
207 int use_dma, dma_ch; 207 int use_dma, dma_ch;
208 struct dma_chan *tx_chan; 208 struct dma_chan *tx_chan;
209 struct dma_chan *rx_chan; 209 struct dma_chan *rx_chan;
210 int slot_id;
211 int response_busy; 210 int response_busy;
212 int context_loss; 211 int context_loss;
213 int protect_card; 212 int protect_card;
@@ -220,7 +219,26 @@ struct omap_hsmmc_host {
220#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ 219#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
221#define HSMMC_WAKE_IRQ_ENABLED (1 << 2) 220#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
222 struct omap_hsmmc_next next_data; 221 struct omap_hsmmc_next next_data;
223 struct omap_mmc_platform_data *pdata; 222 struct omap_hsmmc_platform_data *pdata;
223
224 /* To handle board related suspend/resume functionality for MMC */
225 int (*suspend)(struct device *dev);
226 int (*resume)(struct device *dev);
227
228 /* return MMC cover switch state, can be NULL if not supported.
229 *
230 * possible return values:
231 * 0 - closed
232 * 1 - open
233 */
234 int (*get_cover_state)(struct device *dev);
235
236 /* Card detection IRQs */
237 int card_detect_irq;
238
239 int (*card_detect)(struct device *dev);
240 int (*get_ro)(struct device *dev);
241
224}; 242};
225 243
226struct omap_mmc_of_data { 244struct omap_mmc_of_data {
@@ -230,50 +248,48 @@ struct omap_mmc_of_data {
230 248
231static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); 249static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
232 250
233static int omap_hsmmc_card_detect(struct device *dev, int slot) 251static int omap_hsmmc_card_detect(struct device *dev)
234{ 252{
235 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 253 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
236 struct omap_mmc_platform_data *mmc = host->pdata; 254 struct omap_hsmmc_platform_data *mmc = host->pdata;
237 255
238 /* NOTE: assumes card detect signal is active-low */ 256 /* NOTE: assumes card detect signal is active-low */
239 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); 257 return !gpio_get_value_cansleep(mmc->switch_pin);
240} 258}
241 259
242static int omap_hsmmc_get_wp(struct device *dev, int slot) 260static int omap_hsmmc_get_wp(struct device *dev)
243{ 261{
244 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 262 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
245 struct omap_mmc_platform_data *mmc = host->pdata; 263 struct omap_hsmmc_platform_data *mmc = host->pdata;
246 264
247 /* NOTE: assumes write protect signal is active-high */ 265 /* NOTE: assumes write protect signal is active-high */
248 return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); 266 return gpio_get_value_cansleep(mmc->gpio_wp);
249} 267}
250 268
251static int omap_hsmmc_get_cover_state(struct device *dev, int slot) 269static int omap_hsmmc_get_cover_state(struct device *dev)
252{ 270{
253 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 271 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
254 struct omap_mmc_platform_data *mmc = host->pdata; 272 struct omap_hsmmc_platform_data *mmc = host->pdata;
255 273
256 /* NOTE: assumes card detect signal is active-low */ 274 /* NOTE: assumes card detect signal is active-low */
257 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); 275 return !gpio_get_value_cansleep(mmc->switch_pin);
258} 276}
259 277
260#ifdef CONFIG_PM 278#ifdef CONFIG_PM
261 279
262static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) 280static int omap_hsmmc_suspend_cdirq(struct device *dev)
263{ 281{
264 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 282 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
265 struct omap_mmc_platform_data *mmc = host->pdata;
266 283
267 disable_irq(mmc->slots[0].card_detect_irq); 284 disable_irq(host->card_detect_irq);
268 return 0; 285 return 0;
269} 286}
270 287
271static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) 288static int omap_hsmmc_resume_cdirq(struct device *dev)
272{ 289{
273 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 290 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
274 struct omap_mmc_platform_data *mmc = host->pdata;
275 291
276 enable_irq(mmc->slots[0].card_detect_irq); 292 enable_irq(host->card_detect_irq);
277 return 0; 293 return 0;
278} 294}
279 295
@@ -286,8 +302,7 @@ static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
286 302
287#ifdef CONFIG_REGULATOR 303#ifdef CONFIG_REGULATOR
288 304
289static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, 305static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
290 int vdd)
291{ 306{
292 struct omap_hsmmc_host *host = 307 struct omap_hsmmc_host *host =
293 platform_get_drvdata(to_platform_device(dev)); 308 platform_get_drvdata(to_platform_device(dev));
@@ -300,8 +315,8 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
300 if (!host->vcc) 315 if (!host->vcc)
301 return 0; 316 return 0;
302 317
303 if (mmc_slot(host).before_set_reg) 318 if (mmc_pdata(host)->before_set_reg)
304 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); 319 mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
305 320
306 if (host->pbias) { 321 if (host->pbias) {
307 if (host->pbias_enabled == 1) { 322 if (host->pbias_enabled == 1) {
@@ -363,8 +378,8 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
363 } 378 }
364 } 379 }
365 380
366 if (mmc_slot(host).after_set_reg) 381 if (mmc_pdata(host)->after_set_reg)
367 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); 382 mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
368 383
369error_set_power: 384error_set_power:
370 return ret; 385 return ret;
@@ -383,18 +398,18 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
383 } else { 398 } else {
384 host->vcc = reg; 399 host->vcc = reg;
385 ocr_value = mmc_regulator_get_ocrmask(reg); 400 ocr_value = mmc_regulator_get_ocrmask(reg);
386 if (!mmc_slot(host).ocr_mask) { 401 if (!mmc_pdata(host)->ocr_mask) {
387 mmc_slot(host).ocr_mask = ocr_value; 402 mmc_pdata(host)->ocr_mask = ocr_value;
388 } else { 403 } else {
389 if (!(mmc_slot(host).ocr_mask & ocr_value)) { 404 if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
390 dev_err(host->dev, "ocrmask %x is not supported\n", 405 dev_err(host->dev, "ocrmask %x is not supported\n",
391 mmc_slot(host).ocr_mask); 406 mmc_pdata(host)->ocr_mask);
392 mmc_slot(host).ocr_mask = 0; 407 mmc_pdata(host)->ocr_mask = 0;
393 return -EINVAL; 408 return -EINVAL;
394 } 409 }
395 } 410 }
396 } 411 }
397 mmc_slot(host).set_power = omap_hsmmc_set_power; 412 mmc_pdata(host)->set_power = omap_hsmmc_set_power;
398 413
399 /* Allow an aux regulator */ 414 /* Allow an aux regulator */
400 reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); 415 reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
@@ -404,7 +419,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
404 host->pbias = IS_ERR(reg) ? NULL : reg; 419 host->pbias = IS_ERR(reg) ? NULL : reg;
405 420
406 /* For eMMC do not power off when not in sleep state */ 421 /* For eMMC do not power off when not in sleep state */
407 if (mmc_slot(host).no_regulator_off_init) 422 if (mmc_pdata(host)->no_regulator_off_init)
408 return 0; 423 return 0;
409 /* 424 /*
410 * To disable boot_on regulator, enable regulator 425 * To disable boot_on regulator, enable regulator
@@ -412,10 +427,10 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
412 */ 427 */
413 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || 428 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
414 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { 429 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
415 int vdd = ffs(mmc_slot(host).ocr_mask) - 1; 430 int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
416 431
417 mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); 432 mmc_pdata(host)->set_power(host->dev, 1, vdd);
418 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 433 mmc_pdata(host)->set_power(host->dev, 0, 0);
419 } 434 }
420 435
421 return 0; 436 return 0;
@@ -423,7 +438,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
423 438
424static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) 439static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
425{ 440{
426 mmc_slot(host).set_power = NULL; 441 mmc_pdata(host)->set_power = NULL;
427} 442}
428 443
429static inline int omap_hsmmc_have_reg(void) 444static inline int omap_hsmmc_have_reg(void)
@@ -449,55 +464,59 @@ static inline int omap_hsmmc_have_reg(void)
449 464
450#endif 465#endif
451 466
452static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata) 467static int omap_hsmmc_gpio_init(struct omap_hsmmc_host *host,
468 struct omap_hsmmc_platform_data *pdata)
453{ 469{
454 int ret; 470 int ret;
455 471
456 if (gpio_is_valid(pdata->slots[0].switch_pin)) { 472 if (gpio_is_valid(pdata->switch_pin)) {
457 if (pdata->slots[0].cover) 473 if (pdata->cover)
458 pdata->slots[0].get_cover_state = 474 host->get_cover_state =
459 omap_hsmmc_get_cover_state; 475 omap_hsmmc_get_cover_state;
460 else 476 else
461 pdata->slots[0].card_detect = omap_hsmmc_card_detect; 477 host->card_detect = omap_hsmmc_card_detect;
462 pdata->slots[0].card_detect_irq = 478 host->card_detect_irq =
463 gpio_to_irq(pdata->slots[0].switch_pin); 479 gpio_to_irq(pdata->switch_pin);
464 ret = gpio_request(pdata->slots[0].switch_pin, "mmc_cd"); 480 ret = gpio_request(pdata->switch_pin, "mmc_cd");
465 if (ret) 481 if (ret)
466 return ret; 482 return ret;
467 ret = gpio_direction_input(pdata->slots[0].switch_pin); 483 ret = gpio_direction_input(pdata->switch_pin);
468 if (ret) 484 if (ret)
469 goto err_free_sp; 485 goto err_free_sp;
470 } else 486 } else {
471 pdata->slots[0].switch_pin = -EINVAL; 487 pdata->switch_pin = -EINVAL;
488 }
472 489
473 if (gpio_is_valid(pdata->slots[0].gpio_wp)) { 490 if (gpio_is_valid(pdata->gpio_wp)) {
474 pdata->slots[0].get_ro = omap_hsmmc_get_wp; 491 host->get_ro = omap_hsmmc_get_wp;
475 ret = gpio_request(pdata->slots[0].gpio_wp, "mmc_wp"); 492 ret = gpio_request(pdata->gpio_wp, "mmc_wp");
476 if (ret) 493 if (ret)
477 goto err_free_cd; 494 goto err_free_cd;
478 ret = gpio_direction_input(pdata->slots[0].gpio_wp); 495 ret = gpio_direction_input(pdata->gpio_wp);
479 if (ret) 496 if (ret)
480 goto err_free_wp; 497 goto err_free_wp;
481 } else 498 } else {
482 pdata->slots[0].gpio_wp = -EINVAL; 499 pdata->gpio_wp = -EINVAL;
500 }
483 501
484 return 0; 502 return 0;
485 503
486err_free_wp: 504err_free_wp:
487 gpio_free(pdata->slots[0].gpio_wp); 505 gpio_free(pdata->gpio_wp);
488err_free_cd: 506err_free_cd:
489 if (gpio_is_valid(pdata->slots[0].switch_pin)) 507 if (gpio_is_valid(pdata->switch_pin))
490err_free_sp: 508err_free_sp:
491 gpio_free(pdata->slots[0].switch_pin); 509 gpio_free(pdata->switch_pin);
492 return ret; 510 return ret;
493} 511}
494 512
495static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata) 513static void omap_hsmmc_gpio_free(struct omap_hsmmc_host *host,
514 struct omap_hsmmc_platform_data *pdata)
496{ 515{
497 if (gpio_is_valid(pdata->slots[0].gpio_wp)) 516 if (gpio_is_valid(pdata->gpio_wp))
498 gpio_free(pdata->slots[0].gpio_wp); 517 gpio_free(pdata->gpio_wp);
499 if (gpio_is_valid(pdata->slots[0].switch_pin)) 518 if (gpio_is_valid(pdata->switch_pin))
500 gpio_free(pdata->slots[0].switch_pin); 519 gpio_free(pdata->switch_pin);
501} 520}
502 521
503/* 522/*
@@ -607,8 +626,9 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
607 * in capabilities register 626 * in capabilities register
608 * - MMC/SD clock coming out of controller > 25MHz 627 * - MMC/SD clock coming out of controller > 25MHz
609 */ 628 */
610 if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && 629 if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
611 (ios->timing != MMC_TIMING_MMC_DDR52) && 630 (ios->timing != MMC_TIMING_MMC_DDR52) &&
631 (ios->timing != MMC_TIMING_UHS_DDR50) &&
612 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { 632 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
613 regval = OMAP_HSMMC_READ(host->base, HCTL); 633 regval = OMAP_HSMMC_READ(host->base, HCTL);
614 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) 634 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
@@ -628,7 +648,8 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
628 u32 con; 648 u32 con;
629 649
630 con = OMAP_HSMMC_READ(host->base, CON); 650 con = OMAP_HSMMC_READ(host->base, CON);
631 if (ios->timing == MMC_TIMING_MMC_DDR52) 651 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
652 ios->timing == MMC_TIMING_UHS_DDR50)
632 con |= DDR; /* configure in DDR mode */ 653 con |= DDR; /* configure in DDR mode */
633 else 654 else
634 con &= ~DDR; 655 con &= ~DDR;
@@ -791,8 +812,8 @@ int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
791{ 812{
792 int r = 1; 813 int r = 1;
793 814
794 if (mmc_slot(host).get_cover_state) 815 if (host->get_cover_state)
795 r = mmc_slot(host).get_cover_state(host->dev, host->slot_id); 816 r = host->get_cover_state(host->dev);
796 return r; 817 return r;
797} 818}
798 819
@@ -816,7 +837,7 @@ omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
816 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 837 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
817 struct omap_hsmmc_host *host = mmc_priv(mmc); 838 struct omap_hsmmc_host *host = mmc_priv(mmc);
818 839
819 return sprintf(buf, "%s\n", mmc_slot(host).name); 840 return sprintf(buf, "%s\n", mmc_pdata(host)->name);
820} 841}
821 842
822static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); 843static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
@@ -1061,7 +1082,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
1061 * OMAP4 ES2 and greater has an updated reset logic. 1082 * OMAP4 ES2 and greater has an updated reset logic.
1062 * Monitor a 0->1 transition first 1083 * Monitor a 0->1 transition first
1063 */ 1084 */
1064 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { 1085 if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
1065 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) 1086 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1066 && (i++ < limit)) 1087 && (i++ < limit))
1067 udelay(1); 1088 udelay(1);
@@ -1210,12 +1231,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1210 clk_disable_unprepare(host->dbclk); 1231 clk_disable_unprepare(host->dbclk);
1211 1232
1212 /* Turn the power off */ 1233 /* Turn the power off */
1213 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 1234 ret = mmc_pdata(host)->set_power(host->dev, 0, 0);
1214 1235
1215 /* Turn the power ON with given VDD 1.8 or 3.0v */ 1236 /* Turn the power ON with given VDD 1.8 or 3.0v */
1216 if (!ret) 1237 if (!ret)
1217 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, 1238 ret = mmc_pdata(host)->set_power(host->dev, 1, vdd);
1218 vdd);
1219 pm_runtime_get_sync(host->dev); 1239 pm_runtime_get_sync(host->dev);
1220 if (host->dbclk) 1240 if (host->dbclk)
1221 clk_prepare_enable(host->dbclk); 1241 clk_prepare_enable(host->dbclk);
@@ -1259,11 +1279,11 @@ err:
1259/* Protect the card while the cover is open */ 1279/* Protect the card while the cover is open */
1260static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) 1280static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
1261{ 1281{
1262 if (!mmc_slot(host).get_cover_state) 1282 if (!host->get_cover_state)
1263 return; 1283 return;
1264 1284
1265 host->reqs_blocked = 0; 1285 host->reqs_blocked = 0;
1266 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { 1286 if (host->get_cover_state(host->dev)) {
1267 if (host->protect_card) { 1287 if (host->protect_card) {
1268 dev_info(host->dev, "%s: cover is closed, " 1288 dev_info(host->dev, "%s: cover is closed, "
1269 "card is now accessible\n", 1289 "card is now accessible\n",
@@ -1286,13 +1306,12 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
1286static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) 1306static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1287{ 1307{
1288 struct omap_hsmmc_host *host = dev_id; 1308 struct omap_hsmmc_host *host = dev_id;
1289 struct omap_mmc_slot_data *slot = &mmc_slot(host);
1290 int carddetect; 1309 int carddetect;
1291 1310
1292 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); 1311 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
1293 1312
1294 if (slot->card_detect) 1313 if (host->card_detect)
1295 carddetect = slot->card_detect(host->dev, host->slot_id); 1314 carddetect = host->card_detect(host->dev);
1296 else { 1315 else {
1297 omap_hsmmc_protect_card(host); 1316 omap_hsmmc_protect_card(host);
1298 carddetect = -ENOSYS; 1317 carddetect = -ENOSYS;
@@ -1618,12 +1637,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1618 if (ios->power_mode != host->power_mode) { 1637 if (ios->power_mode != host->power_mode) {
1619 switch (ios->power_mode) { 1638 switch (ios->power_mode) {
1620 case MMC_POWER_OFF: 1639 case MMC_POWER_OFF:
1621 mmc_slot(host).set_power(host->dev, host->slot_id, 1640 mmc_pdata(host)->set_power(host->dev, 0, 0);
1622 0, 0);
1623 break; 1641 break;
1624 case MMC_POWER_UP: 1642 case MMC_POWER_UP:
1625 mmc_slot(host).set_power(host->dev, host->slot_id, 1643 mmc_pdata(host)->set_power(host->dev, 1, ios->vdd);
1626 1, ios->vdd);
1627 break; 1644 break;
1628 case MMC_POWER_ON: 1645 case MMC_POWER_ON:
1629 do_send_init_stream = 1; 1646 do_send_init_stream = 1;
@@ -1668,26 +1685,26 @@ static int omap_hsmmc_get_cd(struct mmc_host *mmc)
1668{ 1685{
1669 struct omap_hsmmc_host *host = mmc_priv(mmc); 1686 struct omap_hsmmc_host *host = mmc_priv(mmc);
1670 1687
1671 if (!mmc_slot(host).card_detect) 1688 if (!host->card_detect)
1672 return -ENOSYS; 1689 return -ENOSYS;
1673 return mmc_slot(host).card_detect(host->dev, host->slot_id); 1690 return host->card_detect(host->dev);
1674} 1691}
1675 1692
1676static int omap_hsmmc_get_ro(struct mmc_host *mmc) 1693static int omap_hsmmc_get_ro(struct mmc_host *mmc)
1677{ 1694{
1678 struct omap_hsmmc_host *host = mmc_priv(mmc); 1695 struct omap_hsmmc_host *host = mmc_priv(mmc);
1679 1696
1680 if (!mmc_slot(host).get_ro) 1697 if (!host->get_ro)
1681 return -ENOSYS; 1698 return -ENOSYS;
1682 return mmc_slot(host).get_ro(host->dev, 0); 1699 return host->get_ro(host->dev);
1683} 1700}
1684 1701
1685static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) 1702static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
1686{ 1703{
1687 struct omap_hsmmc_host *host = mmc_priv(mmc); 1704 struct omap_hsmmc_host *host = mmc_priv(mmc);
1688 1705
1689 if (mmc_slot(host).init_card) 1706 if (mmc_pdata(host)->init_card)
1690 mmc_slot(host).init_card(card); 1707 mmc_pdata(host)->init_card(card);
1691} 1708}
1692 1709
1693static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 1710static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1957,9 +1974,9 @@ static const struct of_device_id omap_mmc_of_match[] = {
1957}; 1974};
1958MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1975MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
1959 1976
1960static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) 1977static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1961{ 1978{
1962 struct omap_mmc_platform_data *pdata; 1979 struct omap_hsmmc_platform_data *pdata;
1963 struct device_node *np = dev->of_node; 1980 struct device_node *np = dev->of_node;
1964 u32 bus_width, max_freq; 1981 u32 bus_width, max_freq;
1965 int cd_gpio, wp_gpio; 1982 int cd_gpio, wp_gpio;
@@ -1976,40 +1993,38 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1976 if (of_find_property(np, "ti,dual-volt", NULL)) 1993 if (of_find_property(np, "ti,dual-volt", NULL))
1977 pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; 1994 pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
1978 1995
1979 /* This driver only supports 1 slot */ 1996 pdata->switch_pin = cd_gpio;
1980 pdata->nr_slots = 1; 1997 pdata->gpio_wp = wp_gpio;
1981 pdata->slots[0].switch_pin = cd_gpio;
1982 pdata->slots[0].gpio_wp = wp_gpio;
1983 1998
1984 if (of_find_property(np, "ti,non-removable", NULL)) { 1999 if (of_find_property(np, "ti,non-removable", NULL)) {
1985 pdata->slots[0].nonremovable = true; 2000 pdata->nonremovable = true;
1986 pdata->slots[0].no_regulator_off_init = true; 2001 pdata->no_regulator_off_init = true;
1987 } 2002 }
1988 of_property_read_u32(np, "bus-width", &bus_width); 2003 of_property_read_u32(np, "bus-width", &bus_width);
1989 if (bus_width == 4) 2004 if (bus_width == 4)
1990 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; 2005 pdata->caps |= MMC_CAP_4_BIT_DATA;
1991 else if (bus_width == 8) 2006 else if (bus_width == 8)
1992 pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; 2007 pdata->caps |= MMC_CAP_8_BIT_DATA;
1993 2008
1994 if (of_find_property(np, "ti,needs-special-reset", NULL)) 2009 if (of_find_property(np, "ti,needs-special-reset", NULL))
1995 pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; 2010 pdata->features |= HSMMC_HAS_UPDATED_RESET;
1996 2011
1997 if (!of_property_read_u32(np, "max-frequency", &max_freq)) 2012 if (!of_property_read_u32(np, "max-frequency", &max_freq))
1998 pdata->max_freq = max_freq; 2013 pdata->max_freq = max_freq;
1999 2014
2000 if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) 2015 if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
2001 pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT; 2016 pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
2002 2017
2003 if (of_find_property(np, "keep-power-in-suspend", NULL)) 2018 if (of_find_property(np, "keep-power-in-suspend", NULL))
2004 pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER; 2019 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2005 2020
2006 if (of_find_property(np, "enable-sdio-wakeup", NULL)) 2021 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2007 pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2022 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2008 2023
2009 return pdata; 2024 return pdata;
2010} 2025}
2011#else 2026#else
2012static inline struct omap_mmc_platform_data 2027static inline struct omap_hsmmc_platform_data
2013 *of_get_hsmmc_pdata(struct device *dev) 2028 *of_get_hsmmc_pdata(struct device *dev)
2014{ 2029{
2015 return ERR_PTR(-EINVAL); 2030 return ERR_PTR(-EINVAL);
@@ -2018,7 +2033,7 @@ static inline struct omap_mmc_platform_data
2018 2033
2019static int omap_hsmmc_probe(struct platform_device *pdev) 2034static int omap_hsmmc_probe(struct platform_device *pdev)
2020{ 2035{
2021 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 2036 struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
2022 struct mmc_host *mmc; 2037 struct mmc_host *mmc;
2023 struct omap_hsmmc_host *host = NULL; 2038 struct omap_hsmmc_host *host = NULL;
2024 struct resource *res; 2039 struct resource *res;
@@ -2048,11 +2063,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2048 return -ENXIO; 2063 return -ENXIO;
2049 } 2064 }
2050 2065
2051 if (pdata->nr_slots == 0) {
2052 dev_err(&pdev->dev, "No Slots\n");
2053 return -ENXIO;
2054 }
2055
2056 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2066 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2057 irq = platform_get_irq(pdev, 0); 2067 irq = platform_get_irq(pdev, 0);
2058 if (res == NULL || irq < 0) 2068 if (res == NULL || irq < 0)
@@ -2062,14 +2072,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2062 if (IS_ERR(base)) 2072 if (IS_ERR(base))
2063 return PTR_ERR(base); 2073 return PTR_ERR(base);
2064 2074
2065 ret = omap_hsmmc_gpio_init(pdata);
2066 if (ret)
2067 goto err;
2068
2069 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); 2075 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
2070 if (!mmc) { 2076 if (!mmc) {
2071 ret = -ENOMEM; 2077 ret = -ENOMEM;
2072 goto err_alloc; 2078 goto err;
2073 } 2079 }
2074 2080
2075 host = mmc_priv(mmc); 2081 host = mmc_priv(mmc);
@@ -2079,13 +2085,16 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2079 host->use_dma = 1; 2085 host->use_dma = 1;
2080 host->dma_ch = -1; 2086 host->dma_ch = -1;
2081 host->irq = irq; 2087 host->irq = irq;
2082 host->slot_id = 0;
2083 host->mapbase = res->start + pdata->reg_offset; 2088 host->mapbase = res->start + pdata->reg_offset;
2084 host->base = base + pdata->reg_offset; 2089 host->base = base + pdata->reg_offset;
2085 host->power_mode = MMC_POWER_OFF; 2090 host->power_mode = MMC_POWER_OFF;
2086 host->next_data.cookie = 1; 2091 host->next_data.cookie = 1;
2087 host->pbias_enabled = 0; 2092 host->pbias_enabled = 0;
2088 2093
2094 ret = omap_hsmmc_gpio_init(host, pdata);
2095 if (ret)
2096 goto err_gpio;
2097
2089 platform_set_drvdata(pdev, host); 2098 platform_set_drvdata(pdev, host);
2090 2099
2091 if (pdev->dev.of_node) 2100 if (pdev->dev.of_node)
@@ -2144,14 +2153,14 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2144 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2153 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2145 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2154 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
2146 2155
2147 mmc->caps |= mmc_slot(host).caps; 2156 mmc->caps |= mmc_pdata(host)->caps;
2148 if (mmc->caps & MMC_CAP_8_BIT_DATA) 2157 if (mmc->caps & MMC_CAP_8_BIT_DATA)
2149 mmc->caps |= MMC_CAP_4_BIT_DATA; 2158 mmc->caps |= MMC_CAP_4_BIT_DATA;
2150 2159
2151 if (mmc_slot(host).nonremovable) 2160 if (mmc_pdata(host)->nonremovable)
2152 mmc->caps |= MMC_CAP_NONREMOVABLE; 2161 mmc->caps |= MMC_CAP_NONREMOVABLE;
2153 2162
2154 mmc->pm_caps = mmc_slot(host).pm_caps; 2163 mmc->pm_caps = mmc_pdata(host)->pm_caps;
2155 2164
2156 omap_hsmmc_conf_bus_power(host); 2165 omap_hsmmc_conf_bus_power(host);
2157 2166
@@ -2204,27 +2213,19 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2204 goto err_irq; 2213 goto err_irq;
2205 } 2214 }
2206 2215
2207 if (pdata->init != NULL) { 2216 if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
2208 if (pdata->init(&pdev->dev) != 0) {
2209 dev_err(mmc_dev(host->mmc),
2210 "Unable to configure MMC IRQs\n");
2211 goto err_irq;
2212 }
2213 }
2214
2215 if (omap_hsmmc_have_reg() && !mmc_slot(host).set_power) {
2216 ret = omap_hsmmc_reg_get(host); 2217 ret = omap_hsmmc_reg_get(host);
2217 if (ret) 2218 if (ret)
2218 goto err_reg; 2219 goto err_irq;
2219 host->use_reg = 1; 2220 host->use_reg = 1;
2220 } 2221 }
2221 2222
2222 mmc->ocr_avail = mmc_slot(host).ocr_mask; 2223 mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
2223 2224
2224 /* Request IRQ for card detect */ 2225 /* Request IRQ for card detect */
2225 if ((mmc_slot(host).card_detect_irq)) { 2226 if (host->card_detect_irq) {
2226 ret = devm_request_threaded_irq(&pdev->dev, 2227 ret = devm_request_threaded_irq(&pdev->dev,
2227 mmc_slot(host).card_detect_irq, 2228 host->card_detect_irq,
2228 NULL, omap_hsmmc_detect, 2229 NULL, omap_hsmmc_detect,
2229 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2230 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2230 mmc_hostname(mmc), host); 2231 mmc_hostname(mmc), host);
@@ -2233,8 +2234,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2233 "Unable to grab MMC CD IRQ\n"); 2234 "Unable to grab MMC CD IRQ\n");
2234 goto err_irq_cd; 2235 goto err_irq_cd;
2235 } 2236 }
2236 pdata->suspend = omap_hsmmc_suspend_cdirq; 2237 host->suspend = omap_hsmmc_suspend_cdirq;
2237 pdata->resume = omap_hsmmc_resume_cdirq; 2238 host->resume = omap_hsmmc_resume_cdirq;
2238 } 2239 }
2239 2240
2240 omap_hsmmc_disable_irq(host); 2241 omap_hsmmc_disable_irq(host);
@@ -2255,12 +2256,12 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2255 2256
2256 mmc_add_host(mmc); 2257 mmc_add_host(mmc);
2257 2258
2258 if (mmc_slot(host).name != NULL) { 2259 if (mmc_pdata(host)->name != NULL) {
2259 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); 2260 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
2260 if (ret < 0) 2261 if (ret < 0)
2261 goto err_slot_name; 2262 goto err_slot_name;
2262 } 2263 }
2263 if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) { 2264 if (host->card_detect_irq && host->get_cover_state) {
2264 ret = device_create_file(&mmc->class_dev, 2265 ret = device_create_file(&mmc->class_dev,
2265 &dev_attr_cover_switch); 2266 &dev_attr_cover_switch);
2266 if (ret < 0) 2267 if (ret < 0)
@@ -2278,9 +2279,6 @@ err_slot_name:
2278err_irq_cd: 2279err_irq_cd:
2279 if (host->use_reg) 2280 if (host->use_reg)
2280 omap_hsmmc_reg_put(host); 2281 omap_hsmmc_reg_put(host);
2281err_reg:
2282 if (host->pdata->cleanup)
2283 host->pdata->cleanup(&pdev->dev);
2284err_irq: 2282err_irq:
2285 if (host->tx_chan) 2283 if (host->tx_chan)
2286 dma_release_channel(host->tx_chan); 2284 dma_release_channel(host->tx_chan);
@@ -2291,9 +2289,9 @@ err_irq:
2291 if (host->dbclk) 2289 if (host->dbclk)
2292 clk_disable_unprepare(host->dbclk); 2290 clk_disable_unprepare(host->dbclk);
2293err1: 2291err1:
2292 omap_hsmmc_gpio_free(host, pdata);
2293err_gpio:
2294 mmc_free_host(mmc); 2294 mmc_free_host(mmc);
2295err_alloc:
2296 omap_hsmmc_gpio_free(pdata);
2297err: 2295err:
2298 return ret; 2296 return ret;
2299} 2297}
@@ -2306,8 +2304,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2306 mmc_remove_host(host->mmc); 2304 mmc_remove_host(host->mmc);
2307 if (host->use_reg) 2305 if (host->use_reg)
2308 omap_hsmmc_reg_put(host); 2306 omap_hsmmc_reg_put(host);
2309 if (host->pdata->cleanup)
2310 host->pdata->cleanup(&pdev->dev);
2311 2307
2312 if (host->tx_chan) 2308 if (host->tx_chan)
2313 dma_release_channel(host->tx_chan); 2309 dma_release_channel(host->tx_chan);
@@ -2319,7 +2315,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2319 if (host->dbclk) 2315 if (host->dbclk)
2320 clk_disable_unprepare(host->dbclk); 2316 clk_disable_unprepare(host->dbclk);
2321 2317
2322 omap_hsmmc_gpio_free(host->pdata); 2318 omap_hsmmc_gpio_free(host, host->pdata);
2323 mmc_free_host(host->mmc); 2319 mmc_free_host(host->mmc);
2324 2320
2325 return 0; 2321 return 0;
@@ -2330,8 +2326,8 @@ static int omap_hsmmc_prepare(struct device *dev)
2330{ 2326{
2331 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2327 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2332 2328
2333 if (host->pdata->suspend) 2329 if (host->suspend)
2334 return host->pdata->suspend(dev, host->slot_id); 2330 return host->suspend(dev);
2335 2331
2336 return 0; 2332 return 0;
2337} 2333}
@@ -2340,8 +2336,8 @@ static void omap_hsmmc_complete(struct device *dev)
2340{ 2336{
2341 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2337 struct omap_hsmmc_host *host = dev_get_drvdata(dev);
2342 2338
2343 if (host->pdata->resume) 2339 if (host->resume)
2344 host->pdata->resume(dev, host->slot_id); 2340 host->resume(dev);
2345 2341
2346} 2342}
2347 2343
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 9cccc0e89b04..c01eac7c8196 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_host {
76 const struct sdhci_acpi_slot *slot; 76 const struct sdhci_acpi_slot *slot;
77 struct platform_device *pdev; 77 struct platform_device *pdev;
78 bool use_runtime_pm; 78 bool use_runtime_pm;
79 bool dma_setup;
79}; 80};
80 81
81static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) 82static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
@@ -85,7 +86,29 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
85 86
86static int sdhci_acpi_enable_dma(struct sdhci_host *host) 87static int sdhci_acpi_enable_dma(struct sdhci_host *host)
87{ 88{
88 return 0; 89 struct sdhci_acpi_host *c = sdhci_priv(host);
90 struct device *dev = &c->pdev->dev;
91 int err = -1;
92
93 if (c->dma_setup)
94 return 0;
95
96 if (host->flags & SDHCI_USE_64_BIT_DMA) {
97 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
98 host->flags &= ~SDHCI_USE_64_BIT_DMA;
99 } else {
100 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
101 if (err)
102 dev_warn(dev, "Failed to set 64-bit DMA mask\n");
103 }
104 }
105
106 if (err)
107 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
108
109 c->dma_setup = !err;
110
111 return err;
89} 112}
90 113
91static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) 114static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
@@ -180,17 +203,21 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
180static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { 203static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
181 .chip = &sdhci_acpi_chip_int, 204 .chip = &sdhci_acpi_chip_int,
182 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 205 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
183 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR, 206 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
207 MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
184 .caps2 = MMC_CAP2_HC_ERASE_SZ, 208 .caps2 = MMC_CAP2_HC_ERASE_SZ,
185 .flags = SDHCI_ACPI_RUNTIME_PM, 209 .flags = SDHCI_ACPI_RUNTIME_PM,
210 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
186 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC, 211 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
187 .probe_slot = sdhci_acpi_emmc_probe_slot, 212 .probe_slot = sdhci_acpi_emmc_probe_slot,
188}; 213};
189 214
190static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { 215static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
191 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, 216 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
217 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
192 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 218 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
193 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, 219 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
220 MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
194 .flags = SDHCI_ACPI_RUNTIME_PM, 221 .flags = SDHCI_ACPI_RUNTIME_PM,
195 .pm_caps = MMC_PM_KEEP_POWER, 222 .pm_caps = MMC_PM_KEEP_POWER,
196 .probe_slot = sdhci_acpi_sdio_probe_slot, 223 .probe_slot = sdhci_acpi_sdio_probe_slot,
@@ -199,8 +226,10 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
199static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { 226static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
200 .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | 227 .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL |
201 SDHCI_ACPI_RUNTIME_PM, 228 SDHCI_ACPI_RUNTIME_PM,
229 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
202 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 230 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
203 SDHCI_QUIRK2_STOP_WITH_TC, 231 SDHCI_QUIRK2_STOP_WITH_TC,
232 .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
204 .probe_slot = sdhci_acpi_sd_probe_slot, 233 .probe_slot = sdhci_acpi_sd_probe_slot,
205}; 234};
206 235
@@ -305,21 +334,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
305 goto err_free; 334 goto err_free;
306 } 335 }
307 336
308 if (!dev->dma_mask) {
309 u64 dma_mask;
310
311 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) {
312 /* 64-bit DMA is not supported at present */
313 dma_mask = DMA_BIT_MASK(32);
314 } else {
315 dma_mask = DMA_BIT_MASK(32);
316 }
317
318 err = dma_coerce_mask_and_coherent(dev, dma_mask);
319 if (err)
320 goto err_free;
321 }
322
323 if (c->slot) { 337 if (c->slot) {
324 if (c->slot->probe_slot) { 338 if (c->slot->probe_slot) {
325 err = c->slot->probe_slot(pdev, hid, uid); 339 err = c->slot->probe_slot(pdev, hid, uid);
@@ -416,7 +430,7 @@ static int sdhci_acpi_resume(struct device *dev)
416 430
417#endif 431#endif
418 432
419#ifdef CONFIG_PM_RUNTIME 433#ifdef CONFIG_PM
420 434
421static int sdhci_acpi_runtime_suspend(struct device *dev) 435static int sdhci_acpi_runtime_suspend(struct device *dev)
422{ 436{
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 587ee0edeb57..af1f7c0f9545 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -65,8 +65,6 @@
65/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ 65/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
66#define ESDHC_TUNING_START_TAP 0x1 66#define ESDHC_TUNING_START_TAP 0x1
67 67
68#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64
69
70/* pinctrl state */ 68/* pinctrl state */
71#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" 69#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
72#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" 70#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
@@ -692,8 +690,6 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
692 /* FIXME: delay a bit for card to be ready for next tuning due to errors */ 690 /* FIXME: delay a bit for card to be ready for next tuning due to errors */
693 mdelay(1); 691 mdelay(1);
694 692
695 /* This is balanced by the runtime put in sdhci_tasklet_finish */
696 pm_runtime_get_sync(host->mmc->parent);
697 reg = readl(host->ioaddr + ESDHC_MIX_CTRL); 693 reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
698 reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | 694 reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
699 ESDHC_MIX_CTRL_FBCLK_SEL; 695 ESDHC_MIX_CTRL_FBCLK_SEL;
@@ -704,54 +700,6 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
704 val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); 700 val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
705} 701}
706 702
707static void esdhc_request_done(struct mmc_request *mrq)
708{
709 complete(&mrq->completion);
710}
711
712static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
713 struct scatterlist *sg)
714{
715 struct mmc_command cmd = {0};
716 struct mmc_request mrq = {NULL};
717 struct mmc_data data = {0};
718
719 cmd.opcode = opcode;
720 cmd.arg = 0;
721 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
722
723 data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
724 data.blocks = 1;
725 data.flags = MMC_DATA_READ;
726 data.sg = sg;
727 data.sg_len = 1;
728
729 mrq.cmd = &cmd;
730 mrq.cmd->mrq = &mrq;
731 mrq.data = &data;
732 mrq.data->mrq = &mrq;
733 mrq.cmd->data = mrq.data;
734
735 mrq.done = esdhc_request_done;
736 init_completion(&(mrq.completion));
737
738 spin_lock_irq(&host->lock);
739 host->mrq = &mrq;
740
741 sdhci_send_command(host, mrq.cmd);
742
743 spin_unlock_irq(&host->lock);
744
745 wait_for_completion(&mrq.completion);
746
747 if (cmd.error)
748 return cmd.error;
749 if (data.error)
750 return data.error;
751
752 return 0;
753}
754
755static void esdhc_post_tuning(struct sdhci_host *host) 703static void esdhc_post_tuning(struct sdhci_host *host)
756{ 704{
757 u32 reg; 705 u32 reg;
@@ -763,21 +711,13 @@ static void esdhc_post_tuning(struct sdhci_host *host)
763 711
764static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) 712static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
765{ 713{
766 struct scatterlist sg;
767 char *tuning_pattern;
768 int min, max, avg, ret; 714 int min, max, avg, ret;
769 715
770 tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
771 if (!tuning_pattern)
772 return -ENOMEM;
773
774 sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
775
776 /* find the mininum delay first which can pass tuning */ 716 /* find the mininum delay first which can pass tuning */
777 min = ESDHC_TUNE_CTRL_MIN; 717 min = ESDHC_TUNE_CTRL_MIN;
778 while (min < ESDHC_TUNE_CTRL_MAX) { 718 while (min < ESDHC_TUNE_CTRL_MAX) {
779 esdhc_prepare_tuning(host, min); 719 esdhc_prepare_tuning(host, min);
780 if (!esdhc_send_tuning_cmd(host, opcode, &sg)) 720 if (!mmc_send_tuning(host->mmc))
781 break; 721 break;
782 min += ESDHC_TUNE_CTRL_STEP; 722 min += ESDHC_TUNE_CTRL_STEP;
783 } 723 }
@@ -786,7 +726,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
786 max = min + ESDHC_TUNE_CTRL_STEP; 726 max = min + ESDHC_TUNE_CTRL_STEP;
787 while (max < ESDHC_TUNE_CTRL_MAX) { 727 while (max < ESDHC_TUNE_CTRL_MAX) {
788 esdhc_prepare_tuning(host, max); 728 esdhc_prepare_tuning(host, max);
789 if (esdhc_send_tuning_cmd(host, opcode, &sg)) { 729 if (mmc_send_tuning(host->mmc)) {
790 max -= ESDHC_TUNE_CTRL_STEP; 730 max -= ESDHC_TUNE_CTRL_STEP;
791 break; 731 break;
792 } 732 }
@@ -796,11 +736,9 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
796 /* use average delay to get the best timing */ 736 /* use average delay to get the best timing */
797 avg = (min + max) / 2; 737 avg = (min + max) / 2;
798 esdhc_prepare_tuning(host, avg); 738 esdhc_prepare_tuning(host, avg);
799 ret = esdhc_send_tuning_cmd(host, opcode, &sg); 739 ret = mmc_send_tuning(host->mmc);
800 esdhc_post_tuning(host); 740 esdhc_post_tuning(host);
801 741
802 kfree(tuning_pattern);
803
804 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", 742 dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
805 ret ? "failed" : "passed", avg, ret); 743 ret ? "failed" : "passed", avg, ret);
806 744
@@ -1031,11 +969,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1031 969
1032 imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, 970 imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
1033 PINCTRL_STATE_DEFAULT); 971 PINCTRL_STATE_DEFAULT);
1034 if (IS_ERR(imx_data->pins_default)) { 972 if (IS_ERR(imx_data->pins_default))
1035 err = PTR_ERR(imx_data->pins_default); 973 dev_warn(mmc_dev(host->mmc), "could not get default state\n");
1036 dev_err(mmc_dev(host->mmc), "could not get default state\n");
1037 goto disable_clk;
1038 }
1039 974
1040 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 975 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1041 976
@@ -1123,7 +1058,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1123 } 1058 }
1124 1059
1125 /* sdr50 and sdr104 needs work on 1.8v signal voltage */ 1060 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
1126 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) { 1061 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
1062 !IS_ERR(imx_data->pins_default)) {
1127 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, 1063 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1128 ESDHC_PINCTRL_STATE_100MHZ); 1064 ESDHC_PINCTRL_STATE_100MHZ);
1129 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 1065 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1172,7 +1108,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
1172 pm_runtime_dont_use_autosuspend(&pdev->dev); 1108 pm_runtime_dont_use_autosuspend(&pdev->dev);
1173 pm_runtime_disable(&pdev->dev); 1109 pm_runtime_disable(&pdev->dev);
1174 1110
1175 if (!IS_ENABLED(CONFIG_PM_RUNTIME)) { 1111 if (!IS_ENABLED(CONFIG_PM)) {
1176 clk_disable_unprepare(imx_data->clk_per); 1112 clk_disable_unprepare(imx_data->clk_per);
1177 clk_disable_unprepare(imx_data->clk_ipg); 1113 clk_disable_unprepare(imx_data->clk_ipg);
1178 clk_disable_unprepare(imx_data->clk_ahb); 1114 clk_disable_unprepare(imx_data->clk_ahb);
@@ -1183,7 +1119,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
1183 return 0; 1119 return 0;
1184} 1120}
1185 1121
1186#ifdef CONFIG_PM_RUNTIME 1122#ifdef CONFIG_PM
1187static int sdhci_esdhc_runtime_suspend(struct device *dev) 1123static int sdhci_esdhc_runtime_suspend(struct device *dev)
1188{ 1124{
1189 struct sdhci_host *host = dev_get_drvdata(dev); 1125 struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 30804385af6d..3d32ce896b09 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -339,9 +339,7 @@ static int msm_init_cm_dll(struct sdhci_host *host)
339static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) 339static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
340{ 340{
341 int tuning_seq_cnt = 3; 341 int tuning_seq_cnt = 3;
342 u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0; 342 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
343 const u8 *tuning_block_pattern = tuning_blk_pattern_4bit;
344 int size = sizeof(tuning_blk_pattern_4bit);
345 int rc; 343 int rc;
346 struct mmc_host *mmc = host->mmc; 344 struct mmc_host *mmc = host->mmc;
347 struct mmc_ios ios = host->mmc->ios; 345 struct mmc_ios ios = host->mmc->ios;
@@ -355,53 +353,21 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
355 (ios.timing == MMC_TIMING_UHS_SDR104))) 353 (ios.timing == MMC_TIMING_UHS_SDR104)))
356 return 0; 354 return 0;
357 355
358 if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
359 (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
360 tuning_block_pattern = tuning_blk_pattern_8bit;
361 size = sizeof(tuning_blk_pattern_8bit);
362 }
363
364 data_buf = kmalloc(size, GFP_KERNEL);
365 if (!data_buf)
366 return -ENOMEM;
367
368retry: 356retry:
369 /* First of all reset the tuning block */ 357 /* First of all reset the tuning block */
370 rc = msm_init_cm_dll(host); 358 rc = msm_init_cm_dll(host);
371 if (rc) 359 if (rc)
372 goto out; 360 return rc;
373 361
374 phase = 0; 362 phase = 0;
375 do { 363 do {
376 struct mmc_command cmd = { 0 };
377 struct mmc_data data = { 0 };
378 struct mmc_request mrq = {
379 .cmd = &cmd,
380 .data = &data
381 };
382 struct scatterlist sg;
383
384 /* Set the phase in delay line hw block */ 364 /* Set the phase in delay line hw block */
385 rc = msm_config_cm_dll_phase(host, phase); 365 rc = msm_config_cm_dll_phase(host, phase);
386 if (rc) 366 if (rc)
387 goto out; 367 return rc;
388 368
389 cmd.opcode = opcode; 369 rc = mmc_send_tuning(mmc);
390 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 370 if (!rc) {
391
392 data.blksz = size;
393 data.blocks = 1;
394 data.flags = MMC_DATA_READ;
395 data.timeout_ns = NSEC_PER_SEC; /* 1 second */
396
397 data.sg = &sg;
398 data.sg_len = 1;
399 sg_init_one(&sg, data_buf, size);
400 memset(data_buf, 0, size);
401 mmc_wait_for_req(mmc, &mrq);
402
403 if (!cmd.error && !data.error &&
404 !memcmp(data_buf, tuning_block_pattern, size)) {
405 /* Tuning is successful at this tuning point */ 371 /* Tuning is successful at this tuning point */
406 tuned_phases[tuned_phase_cnt++] = phase; 372 tuned_phases[tuned_phase_cnt++] = phase;
407 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", 373 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
@@ -413,7 +379,7 @@ retry:
413 rc = msm_find_most_appropriate_phase(host, tuned_phases, 379 rc = msm_find_most_appropriate_phase(host, tuned_phases,
414 tuned_phase_cnt); 380 tuned_phase_cnt);
415 if (rc < 0) 381 if (rc < 0)
416 goto out; 382 return rc;
417 else 383 else
418 phase = rc; 384 phase = rc;
419 385
@@ -423,7 +389,7 @@ retry:
423 */ 389 */
424 rc = msm_config_cm_dll_phase(host, phase); 390 rc = msm_config_cm_dll_phase(host, phase);
425 if (rc) 391 if (rc)
426 goto out; 392 return rc;
427 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", 393 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
428 mmc_hostname(mmc), phase); 394 mmc_hostname(mmc), phase);
429 } else { 395 } else {
@@ -435,8 +401,6 @@ retry:
435 rc = -EIO; 401 rc = -EIO;
436 } 402 }
437 403
438out:
439 kfree(data_buf);
440 return rc; 404 return rc;
441} 405}
442 406
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 981d66e5c023..bcb51e9dfdcd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -165,7 +165,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
165 host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0); 165 host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
166 if (IS_ERR(host)) { 166 if (IS_ERR(host)) {
167 ret = PTR_ERR(host); 167 ret = PTR_ERR(host);
168 dev_err(&pdev->dev, "platform init failed (%u)\n", ret);
169 goto clk_disable_all; 168 goto clk_disable_all;
170 } 169 }
171 170
@@ -175,10 +174,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
175 pltfm_host->clk = clk_xin; 174 pltfm_host->clk = clk_xin;
176 175
177 ret = sdhci_add_host(host); 176 ret = sdhci_add_host(host);
178 if (ret) { 177 if (ret)
179 dev_err(&pdev->dev, "platform register failed (%u)\n", ret);
180 goto err_pltfm_free; 178 goto err_pltfm_free;
181 }
182 179
183 return 0; 180 return 0;
184 181
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 5670e381b0cf..e2ec108dba0e 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -127,8 +127,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
127 return; 127 return;
128 scratch_32 &= ~((1 << 21) | (1 << 30)); 128 scratch_32 &= ~((1 << 21) | (1 << 30));
129 129
130 /* Set RTD3 function disabled */
131 scratch_32 |= ((1 << 29) | (1 << 28));
132 pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); 130 pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
133 131
134 /* Set L1 Entrance Timer */ 132 /* Set L1 Entrance Timer */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 61192973e7cb..03427755b902 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -134,7 +134,7 @@ static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
134 return 0; 134 return 0;
135} 135}
136 136
137#ifdef CONFIG_PM_RUNTIME 137#ifdef CONFIG_PM
138 138
139static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) 139static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
140{ 140{
@@ -269,7 +269,9 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
269static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 269static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
270{ 270{
271 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 271 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
272 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR; 272 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
273 MMC_CAP_BUS_WIDTH_TEST |
274 MMC_CAP_WAIT_WHILE_BUSY;
273 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; 275 slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
274 slot->hw_reset = sdhci_pci_int_hw_reset; 276 slot->hw_reset = sdhci_pci_int_hw_reset;
275 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) 277 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
@@ -279,12 +281,16 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
279 281
280static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 282static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
281{ 283{
282 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; 284 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
285 MMC_CAP_BUS_WIDTH_TEST |
286 MMC_CAP_WAIT_WHILE_BUSY;
283 return 0; 287 return 0;
284} 288}
285 289
286static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 290static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
287{ 291{
292 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
293 MMC_CAP_WAIT_WHILE_BUSY;
288 slot->cd_con_id = NULL; 294 slot->cd_con_id = NULL;
289 slot->cd_idx = 0; 295 slot->cd_idx = 0;
290 slot->cd_override_level = true; 296 slot->cd_override_level = true;
@@ -294,11 +300,13 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
294static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 300static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
295 .allow_runtime_pm = true, 301 .allow_runtime_pm = true,
296 .probe_slot = byt_emmc_probe_slot, 302 .probe_slot = byt_emmc_probe_slot,
303 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
297 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 304 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
298 SDHCI_QUIRK2_STOP_WITH_TC, 305 SDHCI_QUIRK2_STOP_WITH_TC,
299}; 306};
300 307
301static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 308static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
309 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
302 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 310 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
303 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 311 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
304 .allow_runtime_pm = true, 312 .allow_runtime_pm = true,
@@ -306,6 +314,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
306}; 314};
307 315
308static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 316static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
317 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
309 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 318 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
310 SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 319 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
311 SDHCI_QUIRK2_STOP_WITH_TC, 320 SDHCI_QUIRK2_STOP_WITH_TC,
@@ -645,6 +654,25 @@ static const struct sdhci_pci_fixes sdhci_rtsx = {
645 .probe_slot = rtsx_probe_slot, 654 .probe_slot = rtsx_probe_slot,
646}; 655};
647 656
657static int amd_probe(struct sdhci_pci_chip *chip)
658{
659 struct pci_dev *smbus_dev;
660
661 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
662 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
663
664 if (smbus_dev && (smbus_dev->revision < 0x51)) {
665 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
666 chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
667 }
668
669 return 0;
670}
671
672static const struct sdhci_pci_fixes sdhci_amd = {
673 .probe = amd_probe,
674};
675
648static const struct pci_device_id pci_ids[] = { 676static const struct pci_device_id pci_ids[] = {
649 { 677 {
650 .vendor = PCI_VENDOR_ID_RICOH, 678 .vendor = PCI_VENDOR_ID_RICOH,
@@ -1044,7 +1072,15 @@ static const struct pci_device_id pci_ids[] = {
1044 .subdevice = PCI_ANY_ID, 1072 .subdevice = PCI_ANY_ID,
1045 .driver_data = (kernel_ulong_t)&sdhci_o2, 1073 .driver_data = (kernel_ulong_t)&sdhci_o2,
1046 }, 1074 },
1047 1075 {
1076 .vendor = PCI_VENDOR_ID_AMD,
1077 .device = PCI_ANY_ID,
1078 .class = PCI_CLASS_SYSTEM_SDHCI << 8,
1079 .class_mask = 0xFFFF00,
1080 .subvendor = PCI_ANY_ID,
1081 .subdevice = PCI_ANY_ID,
1082 .driver_data = (kernel_ulong_t)&sdhci_amd,
1083 },
1048 { /* Generic SD host controller */ 1084 { /* Generic SD host controller */
1049 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 1085 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
1050 }, 1086 },
@@ -1064,7 +1100,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
1064{ 1100{
1065 struct sdhci_pci_slot *slot; 1101 struct sdhci_pci_slot *slot;
1066 struct pci_dev *pdev; 1102 struct pci_dev *pdev;
1067 int ret; 1103 int ret = -1;
1068 1104
1069 slot = sdhci_priv(host); 1105 slot = sdhci_priv(host);
1070 pdev = slot->chip->pdev; 1106 pdev = slot->chip->pdev;
@@ -1076,7 +1112,17 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
1076 "doesn't fully claim to support it.\n"); 1112 "doesn't fully claim to support it.\n");
1077 } 1113 }
1078 1114
1079 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1115 if (host->flags & SDHCI_USE_64_BIT_DMA) {
1116 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
1117 host->flags &= ~SDHCI_USE_64_BIT_DMA;
1118 } else {
1119 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1120 if (ret)
1121 dev_warn(&pdev->dev, "Failed to set 64-bit DMA mask\n");
1122 }
1123 }
1124 if (ret)
1125 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1080 if (ret) 1126 if (ret)
1081 return ret; 1127 return ret;
1082 1128
@@ -1230,15 +1276,6 @@ static int sdhci_pci_resume(struct device *dev)
1230 return 0; 1276 return 0;
1231} 1277}
1232 1278
1233#else /* CONFIG_PM */
1234
1235#define sdhci_pci_suspend NULL
1236#define sdhci_pci_resume NULL
1237
1238#endif /* CONFIG_PM */
1239
1240#ifdef CONFIG_PM_RUNTIME
1241
1242static int sdhci_pci_runtime_suspend(struct device *dev) 1279static int sdhci_pci_runtime_suspend(struct device *dev)
1243{ 1280{
1244 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 1281 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
@@ -1310,7 +1347,12 @@ static int sdhci_pci_runtime_idle(struct device *dev)
1310 return 0; 1347 return 0;
1311} 1348}
1312 1349
1313#endif 1350#else /* CONFIG_PM */
1351
1352#define sdhci_pci_suspend NULL
1353#define sdhci_pci_resume NULL
1354
1355#endif /* CONFIG_PM */
1314 1356
1315static const struct dev_pm_ops sdhci_pci_pm_ops = { 1357static const struct dev_pm_ops sdhci_pci_pm_ops = {
1316 .suspend = sdhci_pci_suspend, 1358 .suspend = sdhci_pci_suspend,
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index b4c23e983baf..f98008b5ea77 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -167,23 +167,17 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
167 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; 167 struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
168 struct device *dev = &pdev->dev; 168 struct device *dev = &pdev->dev;
169 struct sdhci_host *host = NULL; 169 struct sdhci_host *host = NULL;
170 struct sdhci_pxa *pxa = NULL;
171 const struct of_device_id *match; 170 const struct of_device_id *match;
172 171
173 int ret; 172 int ret;
174 struct clk *clk; 173 struct clk *clk;
175 174
176 pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
177 if (!pxa)
178 return -ENOMEM;
179
180 host = sdhci_pltfm_init(pdev, NULL, 0); 175 host = sdhci_pltfm_init(pdev, NULL, 0);
181 if (IS_ERR(host)) { 176 if (IS_ERR(host))
182 kfree(pxa);
183 return PTR_ERR(host); 177 return PTR_ERR(host);
184 } 178
185 pltfm_host = sdhci_priv(host); 179 pltfm_host = sdhci_priv(host);
186 pltfm_host->priv = pxa; 180 pltfm_host->priv = NULL;
187 181
188 clk = clk_get(dev, "PXA-SDHCLK"); 182 clk = clk_get(dev, "PXA-SDHCLK");
189 if (IS_ERR(clk)) { 183 if (IS_ERR(clk)) {
@@ -238,7 +232,6 @@ err_add_host:
238 clk_put(clk); 232 clk_put(clk);
239err_clk_get: 233err_clk_get:
240 sdhci_pltfm_free(pdev); 234 sdhci_pltfm_free(pdev);
241 kfree(pxa);
242 return ret; 235 return ret;
243} 236}
244 237
@@ -246,14 +239,12 @@ static int sdhci_pxav2_remove(struct platform_device *pdev)
246{ 239{
247 struct sdhci_host *host = platform_get_drvdata(pdev); 240 struct sdhci_host *host = platform_get_drvdata(pdev);
248 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 241 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
249 struct sdhci_pxa *pxa = pltfm_host->priv;
250 242
251 sdhci_remove_host(host, 1); 243 sdhci_remove_host(host, 1);
252 244
253 clk_disable_unprepare(pltfm_host->clk); 245 clk_disable_unprepare(pltfm_host->clk);
254 clk_put(pltfm_host->clk); 246 clk_put(pltfm_host->clk);
255 sdhci_pltfm_free(pdev); 247 sdhci_pltfm_free(pdev);
256 kfree(pxa);
257 248
258 return 0; 249 return 0;
259} 250}
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 5036d7d39529..45238871192d 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -58,6 +58,12 @@
58#define SDCE_MISC_INT (1<<2) 58#define SDCE_MISC_INT (1<<2)
59#define SDCE_MISC_INT_EN (1<<1) 59#define SDCE_MISC_INT_EN (1<<1)
60 60
61struct sdhci_pxa {
62 struct clk *clk_core;
63 struct clk *clk_io;
64 u8 power_mode;
65};
66
61/* 67/*
62 * These registers are relative to the second register region, for the 68 * These registers are relative to the second register region, for the
63 * MBus bridge. 69 * MBus bridge.
@@ -211,6 +217,7 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
211 case MMC_TIMING_UHS_SDR104: 217 case MMC_TIMING_UHS_SDR104:
212 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; 218 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
213 break; 219 break;
220 case MMC_TIMING_MMC_DDR52:
214 case MMC_TIMING_UHS_DDR50: 221 case MMC_TIMING_UHS_DDR50:
215 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; 222 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
216 break; 223 break;
@@ -283,9 +290,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
283 struct sdhci_host *host = NULL; 290 struct sdhci_host *host = NULL;
284 struct sdhci_pxa *pxa = NULL; 291 struct sdhci_pxa *pxa = NULL;
285 const struct of_device_id *match; 292 const struct of_device_id *match;
286
287 int ret; 293 int ret;
288 struct clk *clk;
289 294
290 pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL); 295 pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL);
291 if (!pxa) 296 if (!pxa)
@@ -305,14 +310,20 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
305 pltfm_host = sdhci_priv(host); 310 pltfm_host = sdhci_priv(host);
306 pltfm_host->priv = pxa; 311 pltfm_host->priv = pxa;
307 312
308 clk = devm_clk_get(dev, NULL); 313 pxa->clk_io = devm_clk_get(dev, "io");
309 if (IS_ERR(clk)) { 314 if (IS_ERR(pxa->clk_io))
315 pxa->clk_io = devm_clk_get(dev, NULL);
316 if (IS_ERR(pxa->clk_io)) {
310 dev_err(dev, "failed to get io clock\n"); 317 dev_err(dev, "failed to get io clock\n");
311 ret = PTR_ERR(clk); 318 ret = PTR_ERR(pxa->clk_io);
312 goto err_clk_get; 319 goto err_clk_get;
313 } 320 }
314 pltfm_host->clk = clk; 321 pltfm_host->clk = pxa->clk_io;
315 clk_prepare_enable(clk); 322 clk_prepare_enable(pxa->clk_io);
323
324 pxa->clk_core = devm_clk_get(dev, "core");
325 if (!IS_ERR(pxa->clk_core))
326 clk_prepare_enable(pxa->clk_core);
316 327
317 /* enable 1/8V DDR capable */ 328 /* enable 1/8V DDR capable */
318 host->mmc->caps |= MMC_CAP_1_8V_DDR; 329 host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -385,7 +396,9 @@ err_add_host:
385 pm_runtime_disable(&pdev->dev); 396 pm_runtime_disable(&pdev->dev);
386err_of_parse: 397err_of_parse:
387err_cd_req: 398err_cd_req:
388 clk_disable_unprepare(clk); 399 clk_disable_unprepare(pxa->clk_io);
400 if (!IS_ERR(pxa->clk_core))
401 clk_disable_unprepare(pxa->clk_core);
389err_clk_get: 402err_clk_get:
390err_mbus_win: 403err_mbus_win:
391 sdhci_pltfm_free(pdev); 404 sdhci_pltfm_free(pdev);
@@ -396,12 +409,15 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
396{ 409{
397 struct sdhci_host *host = platform_get_drvdata(pdev); 410 struct sdhci_host *host = platform_get_drvdata(pdev);
398 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 411 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
412 struct sdhci_pxa *pxa = pltfm_host->priv;
399 413
400 pm_runtime_get_sync(&pdev->dev); 414 pm_runtime_get_sync(&pdev->dev);
401 sdhci_remove_host(host, 1); 415 sdhci_remove_host(host, 1);
402 pm_runtime_disable(&pdev->dev); 416 pm_runtime_disable(&pdev->dev);
403 417
404 clk_disable_unprepare(pltfm_host->clk); 418 clk_disable_unprepare(pxa->clk_io);
419 if (!IS_ERR(pxa->clk_core))
420 clk_disable_unprepare(pxa->clk_core);
405 421
406 sdhci_pltfm_free(pdev); 422 sdhci_pltfm_free(pdev);
407 423
@@ -436,20 +452,21 @@ static int sdhci_pxav3_resume(struct device *dev)
436} 452}
437#endif 453#endif
438 454
439#ifdef CONFIG_PM_RUNTIME 455#ifdef CONFIG_PM
440static int sdhci_pxav3_runtime_suspend(struct device *dev) 456static int sdhci_pxav3_runtime_suspend(struct device *dev)
441{ 457{
442 struct sdhci_host *host = dev_get_drvdata(dev); 458 struct sdhci_host *host = dev_get_drvdata(dev);
443 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 459 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
460 struct sdhci_pxa *pxa = pltfm_host->priv;
444 unsigned long flags; 461 unsigned long flags;
445 462
446 if (pltfm_host->clk) { 463 spin_lock_irqsave(&host->lock, flags);
447 spin_lock_irqsave(&host->lock, flags); 464 host->runtime_suspended = true;
448 host->runtime_suspended = true; 465 spin_unlock_irqrestore(&host->lock, flags);
449 spin_unlock_irqrestore(&host->lock, flags);
450 466
451 clk_disable_unprepare(pltfm_host->clk); 467 clk_disable_unprepare(pxa->clk_io);
452 } 468 if (!IS_ERR(pxa->clk_core))
469 clk_disable_unprepare(pxa->clk_core);
453 470
454 return 0; 471 return 0;
455} 472}
@@ -458,15 +475,16 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
458{ 475{
459 struct sdhci_host *host = dev_get_drvdata(dev); 476 struct sdhci_host *host = dev_get_drvdata(dev);
460 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 477 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
478 struct sdhci_pxa *pxa = pltfm_host->priv;
461 unsigned long flags; 479 unsigned long flags;
462 480
463 if (pltfm_host->clk) { 481 clk_prepare_enable(pxa->clk_io);
464 clk_prepare_enable(pltfm_host->clk); 482 if (!IS_ERR(pxa->clk_core))
483 clk_prepare_enable(pxa->clk_core);
465 484
466 spin_lock_irqsave(&host->lock, flags); 485 spin_lock_irqsave(&host->lock, flags);
467 host->runtime_suspended = false; 486 host->runtime_suspended = false;
468 spin_unlock_irqrestore(&host->lock, flags); 487 spin_unlock_irqrestore(&host->lock, flags);
469 }
470 488
471 return 0; 489 return 0;
472} 490}
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0ce6eb17deaf..c45b8932d843 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -300,6 +300,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
300 struct device *dev = &ourhost->pdev->dev; 300 struct device *dev = &ourhost->pdev->dev;
301 unsigned long timeout; 301 unsigned long timeout;
302 u16 clk = 0; 302 u16 clk = 0;
303 int ret;
303 304
304 host->mmc->actual_clock = 0; 305 host->mmc->actual_clock = 0;
305 306
@@ -311,7 +312,12 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
311 312
312 sdhci_s3c_set_clock(host, clock); 313 sdhci_s3c_set_clock(host, clock);
313 314
314 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); 315 ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
316 if (ret != 0) {
317 dev_err(dev, "%s: failed to set clock rate %uHz\n",
318 mmc_hostname(host->mmc), clock);
319 return;
320 }
315 321
316 clk = SDHCI_CLOCK_INT_EN; 322 clk = SDHCI_CLOCK_INT_EN;
317 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 323 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -609,7 +615,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
609 goto err_req_regs; 615 goto err_req_regs;
610 } 616 }
611 617
612#ifdef CONFIG_PM_RUNTIME 618#ifdef CONFIG_PM
613 if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 619 if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
614 clk_disable_unprepare(sc->clk_io); 620 clk_disable_unprepare(sc->clk_io);
615#endif 621#endif
@@ -635,7 +641,7 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
635 if (sc->ext_cd_irq) 641 if (sc->ext_cd_irq)
636 free_irq(sc->ext_cd_irq, sc); 642 free_irq(sc->ext_cd_irq, sc);
637 643
638#ifdef CONFIG_PM_RUNTIME 644#ifdef CONFIG_PM
639 if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL) 645 if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
640 clk_prepare_enable(sc->clk_io); 646 clk_prepare_enable(sc->clk_io);
641#endif 647#endif
@@ -667,7 +673,7 @@ static int sdhci_s3c_resume(struct device *dev)
667} 673}
668#endif 674#endif
669 675
670#ifdef CONFIG_PM_RUNTIME 676#ifdef CONFIG_PM
671static int sdhci_s3c_runtime_suspend(struct device *dev) 677static int sdhci_s3c_runtime_suspend(struct device *dev)
672{ 678{
673 struct sdhci_host *host = dev_get_drvdata(dev); 679 struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ada1a3ea3a87..cbb245b58538 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -44,8 +44,6 @@
44 44
45#define MAX_TUNING_LOOP 40 45#define MAX_TUNING_LOOP 40
46 46
47#define ADMA_SIZE ((128 * 2 + 1) * 4)
48
49static unsigned int debug_quirks = 0; 47static unsigned int debug_quirks = 0;
50static unsigned int debug_quirks2; 48static unsigned int debug_quirks2;
51 49
@@ -56,7 +54,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
56static void sdhci_tuning_timer(unsigned long data); 54static void sdhci_tuning_timer(unsigned long data);
57static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
58 56
59#ifdef CONFIG_PM_RUNTIME 57#ifdef CONFIG_PM
60static int sdhci_runtime_pm_get(struct sdhci_host *host); 58static int sdhci_runtime_pm_get(struct sdhci_host *host);
61static int sdhci_runtime_pm_put(struct sdhci_host *host); 59static int sdhci_runtime_pm_put(struct sdhci_host *host);
62static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); 60static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host)
119 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", 117 pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
120 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 118 sdhci_readw(host, SDHCI_HOST_CONTROL2));
121 119
122 if (host->flags & SDHCI_USE_ADMA) 120 if (host->flags & SDHCI_USE_ADMA) {
123 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 121 if (host->flags & SDHCI_USE_64_BIT_DMA)
124 readl(host->ioaddr + SDHCI_ADMA_ERROR), 122 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); 123 readl(host->ioaddr + SDHCI_ADMA_ERROR),
124 readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
125 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
126 else
127 pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
128 readl(host->ioaddr + SDHCI_ADMA_ERROR),
129 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
130 }
126 131
127 pr_debug(DRIVER_NAME ": ===========================================\n"); 132 pr_debug(DRIVER_NAME ": ===========================================\n");
128} 133}
@@ -448,18 +453,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
448 local_irq_restore(*flags); 453 local_irq_restore(*flags);
449} 454}
450 455
451static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) 456static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
457 dma_addr_t addr, int len, unsigned cmd)
452{ 458{
453 __le32 *dataddr = (__le32 __force *)(desc + 4); 459 struct sdhci_adma2_64_desc *dma_desc = desc;
454 __le16 *cmdlen = (__le16 __force *)desc; 460
461 /* 32-bit and 64-bit descriptors have these members in same position */
462 dma_desc->cmd = cpu_to_le16(cmd);
463 dma_desc->len = cpu_to_le16(len);
464 dma_desc->addr_lo = cpu_to_le32((u32)addr);
455 465
456 /* SDHCI specification says ADMA descriptors should be 4 byte 466 if (host->flags & SDHCI_USE_64_BIT_DMA)
457 * aligned, so using 16 or 32bit operations should be safe. */ 467 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
468}
458 469
459 cmdlen[0] = cpu_to_le16(cmd); 470static void sdhci_adma_mark_end(void *desc)
460 cmdlen[1] = cpu_to_le16(len); 471{
472 struct sdhci_adma2_64_desc *dma_desc = desc;
461 473
462 dataddr[0] = cpu_to_le32(addr); 474 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
475 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
463} 476}
464 477
465static int sdhci_adma_table_pre(struct sdhci_host *host, 478static int sdhci_adma_table_pre(struct sdhci_host *host,
@@ -467,8 +480,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
467{ 480{
468 int direction; 481 int direction;
469 482
470 u8 *desc; 483 void *desc;
471 u8 *align; 484 void *align;
472 dma_addr_t addr; 485 dma_addr_t addr;
473 dma_addr_t align_addr; 486 dma_addr_t align_addr;
474 int len, offset; 487 int len, offset;
@@ -489,17 +502,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
489 direction = DMA_TO_DEVICE; 502 direction = DMA_TO_DEVICE;
490 503
491 host->align_addr = dma_map_single(mmc_dev(host->mmc), 504 host->align_addr = dma_map_single(mmc_dev(host->mmc),
492 host->align_buffer, 128 * 4, direction); 505 host->align_buffer, host->align_buffer_sz, direction);
493 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 506 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
494 goto fail; 507 goto fail;
495 BUG_ON(host->align_addr & 0x3); 508 BUG_ON(host->align_addr & host->align_mask);
496 509
497 host->sg_count = dma_map_sg(mmc_dev(host->mmc), 510 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
498 data->sg, data->sg_len, direction); 511 data->sg, data->sg_len, direction);
499 if (host->sg_count == 0) 512 if (host->sg_count == 0)
500 goto unmap_align; 513 goto unmap_align;
501 514
502 desc = host->adma_desc; 515 desc = host->adma_table;
503 align = host->align_buffer; 516 align = host->align_buffer;
504 517
505 align_addr = host->align_addr; 518 align_addr = host->align_addr;
@@ -515,24 +528,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
515 * the (up to three) bytes that screw up the 528 * the (up to three) bytes that screw up the
516 * alignment. 529 * alignment.
517 */ 530 */
518 offset = (4 - (addr & 0x3)) & 0x3; 531 offset = (host->align_sz - (addr & host->align_mask)) &
532 host->align_mask;
519 if (offset) { 533 if (offset) {
520 if (data->flags & MMC_DATA_WRITE) { 534 if (data->flags & MMC_DATA_WRITE) {
521 buffer = sdhci_kmap_atomic(sg, &flags); 535 buffer = sdhci_kmap_atomic(sg, &flags);
522 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 536 WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
537 (PAGE_SIZE - offset));
523 memcpy(align, buffer, offset); 538 memcpy(align, buffer, offset);
524 sdhci_kunmap_atomic(buffer, &flags); 539 sdhci_kunmap_atomic(buffer, &flags);
525 } 540 }
526 541
527 /* tran, valid */ 542 /* tran, valid */
528 sdhci_set_adma_desc(desc, align_addr, offset, 0x21); 543 sdhci_adma_write_desc(host, desc, align_addr, offset,
544 ADMA2_TRAN_VALID);
529 545
530 BUG_ON(offset > 65536); 546 BUG_ON(offset > 65536);
531 547
532 align += 4; 548 align += host->align_sz;
533 align_addr += 4; 549 align_addr += host->align_sz;
534 550
535 desc += 8; 551 desc += host->desc_sz;
536 552
537 addr += offset; 553 addr += offset;
538 len -= offset; 554 len -= offset;
@@ -541,23 +557,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
541 BUG_ON(len > 65536); 557 BUG_ON(len > 65536);
542 558
543 /* tran, valid */ 559 /* tran, valid */
544 sdhci_set_adma_desc(desc, addr, len, 0x21); 560 sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
545 desc += 8; 561 desc += host->desc_sz;
546 562
547 /* 563 /*
548 * If this triggers then we have a calculation bug 564 * If this triggers then we have a calculation bug
549 * somewhere. :/ 565 * somewhere. :/
550 */ 566 */
551 WARN_ON((desc - host->adma_desc) > ADMA_SIZE); 567 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
552 } 568 }
553 569
554 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 570 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
555 /* 571 /*
556 * Mark the last descriptor as the terminating descriptor 572 * Mark the last descriptor as the terminating descriptor
557 */ 573 */
558 if (desc != host->adma_desc) { 574 if (desc != host->adma_table) {
559 desc -= 8; 575 desc -= host->desc_sz;
560 desc[0] |= 0x2; /* end */ 576 sdhci_adma_mark_end(desc);
561 } 577 }
562 } else { 578 } else {
563 /* 579 /*
@@ -565,7 +581,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
565 */ 581 */
566 582
567 /* nop, end, valid */ 583 /* nop, end, valid */
568 sdhci_set_adma_desc(desc, 0, 0, 0x3); 584 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
569 } 585 }
570 586
571 /* 587 /*
@@ -573,14 +589,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
573 */ 589 */
574 if (data->flags & MMC_DATA_WRITE) { 590 if (data->flags & MMC_DATA_WRITE) {
575 dma_sync_single_for_device(mmc_dev(host->mmc), 591 dma_sync_single_for_device(mmc_dev(host->mmc),
576 host->align_addr, 128 * 4, direction); 592 host->align_addr, host->align_buffer_sz, direction);
577 } 593 }
578 594
579 return 0; 595 return 0;
580 596
581unmap_align: 597unmap_align:
582 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 598 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
583 128 * 4, direction); 599 host->align_buffer_sz, direction);
584fail: 600fail:
585 return -EINVAL; 601 return -EINVAL;
586} 602}
@@ -592,7 +608,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
592 608
593 struct scatterlist *sg; 609 struct scatterlist *sg;
594 int i, size; 610 int i, size;
595 u8 *align; 611 void *align;
596 char *buffer; 612 char *buffer;
597 unsigned long flags; 613 unsigned long flags;
598 bool has_unaligned; 614 bool has_unaligned;
@@ -603,12 +619,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
603 direction = DMA_TO_DEVICE; 619 direction = DMA_TO_DEVICE;
604 620
605 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 621 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
606 128 * 4, direction); 622 host->align_buffer_sz, direction);
607 623
608 /* Do a quick scan of the SG list for any unaligned mappings */ 624 /* Do a quick scan of the SG list for any unaligned mappings */
609 has_unaligned = false; 625 has_unaligned = false;
610 for_each_sg(data->sg, sg, host->sg_count, i) 626 for_each_sg(data->sg, sg, host->sg_count, i)
611 if (sg_dma_address(sg) & 3) { 627 if (sg_dma_address(sg) & host->align_mask) {
612 has_unaligned = true; 628 has_unaligned = true;
613 break; 629 break;
614 } 630 }
@@ -620,15 +636,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
620 align = host->align_buffer; 636 align = host->align_buffer;
621 637
622 for_each_sg(data->sg, sg, host->sg_count, i) { 638 for_each_sg(data->sg, sg, host->sg_count, i) {
623 if (sg_dma_address(sg) & 0x3) { 639 if (sg_dma_address(sg) & host->align_mask) {
624 size = 4 - (sg_dma_address(sg) & 0x3); 640 size = host->align_sz -
641 (sg_dma_address(sg) & host->align_mask);
625 642
626 buffer = sdhci_kmap_atomic(sg, &flags); 643 buffer = sdhci_kmap_atomic(sg, &flags);
627 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); 644 WARN_ON(((long)buffer & (PAGE_SIZE - 1)) >
645 (PAGE_SIZE - size));
628 memcpy(buffer, align, size); 646 memcpy(buffer, align, size);
629 sdhci_kunmap_atomic(buffer, &flags); 647 sdhci_kunmap_atomic(buffer, &flags);
630 648
631 align += 4; 649 align += host->align_sz;
632 } 650 }
633 } 651 }
634 } 652 }
@@ -822,6 +840,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
822 } else { 840 } else {
823 sdhci_writel(host, host->adma_addr, 841 sdhci_writel(host, host->adma_addr,
824 SDHCI_ADMA_ADDRESS); 842 SDHCI_ADMA_ADDRESS);
843 if (host->flags & SDHCI_USE_64_BIT_DMA)
844 sdhci_writel(host,
845 (u64)host->adma_addr >> 32,
846 SDHCI_ADMA_ADDRESS_HI);
825 } 847 }
826 } else { 848 } else {
827 int sg_cnt; 849 int sg_cnt;
@@ -855,10 +877,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
855 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 877 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
856 ctrl &= ~SDHCI_CTRL_DMA_MASK; 878 ctrl &= ~SDHCI_CTRL_DMA_MASK;
857 if ((host->flags & SDHCI_REQ_USE_DMA) && 879 if ((host->flags & SDHCI_REQ_USE_DMA) &&
858 (host->flags & SDHCI_USE_ADMA)) 880 (host->flags & SDHCI_USE_ADMA)) {
859 ctrl |= SDHCI_CTRL_ADMA32; 881 if (host->flags & SDHCI_USE_64_BIT_DMA)
860 else 882 ctrl |= SDHCI_CTRL_ADMA64;
883 else
884 ctrl |= SDHCI_CTRL_ADMA32;
885 } else {
861 ctrl |= SDHCI_CTRL_SDMA; 886 ctrl |= SDHCI_CTRL_SDMA;
887 }
862 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 888 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
863 } 889 }
864 890
@@ -889,10 +915,15 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
889 struct mmc_data *data = cmd->data; 915 struct mmc_data *data = cmd->data;
890 916
891 if (data == NULL) { 917 if (data == NULL) {
918 if (host->quirks2 &
919 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
920 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
921 } else {
892 /* clear Auto CMD settings for no data CMDs */ 922 /* clear Auto CMD settings for no data CMDs */
893 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 923 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
894 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 924 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
895 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 925 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
926 }
896 return; 927 return;
897 } 928 }
898 929
@@ -1117,6 +1148,9 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
1117 case MMC_TIMING_UHS_DDR50: 1148 case MMC_TIMING_UHS_DDR50:
1118 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1149 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1119 break; 1150 break;
1151 case MMC_TIMING_MMC_HS400:
1152 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1153 break;
1120 default: 1154 default:
1121 pr_warn("%s: Invalid UHS-I mode selected\n", 1155 pr_warn("%s: Invalid UHS-I mode selected\n",
1122 mmc_hostname(host->mmc)); 1156 mmc_hostname(host->mmc));
@@ -1444,6 +1478,8 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1444 else if ((timing == MMC_TIMING_UHS_DDR50) || 1478 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1445 (timing == MMC_TIMING_MMC_DDR52)) 1479 (timing == MMC_TIMING_MMC_DDR52))
1446 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 1480 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1481 else if (timing == MMC_TIMING_MMC_HS400)
1482 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1447 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 1483 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1448} 1484}
1449EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 1485EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
@@ -1515,7 +1551,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1515 u16 clk, ctrl_2; 1551 u16 clk, ctrl_2;
1516 1552
1517 /* In case of UHS-I modes, set High Speed Enable */ 1553 /* In case of UHS-I modes, set High Speed Enable */
1518 if ((ios->timing == MMC_TIMING_MMC_HS200) || 1554 if ((ios->timing == MMC_TIMING_MMC_HS400) ||
1555 (ios->timing == MMC_TIMING_MMC_HS200) ||
1519 (ios->timing == MMC_TIMING_MMC_DDR52) || 1556 (ios->timing == MMC_TIMING_MMC_DDR52) ||
1520 (ios->timing == MMC_TIMING_UHS_SDR50) || 1557 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1521 (ios->timing == MMC_TIMING_UHS_SDR104) || 1558 (ios->timing == MMC_TIMING_UHS_SDR104) ||
@@ -1862,6 +1899,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1862 * tuning function has to be executed. 1899 * tuning function has to be executed.
1863 */ 1900 */
1864 switch (host->timing) { 1901 switch (host->timing) {
1902 case MMC_TIMING_MMC_HS400:
1865 case MMC_TIMING_MMC_HS200: 1903 case MMC_TIMING_MMC_HS200:
1866 case MMC_TIMING_UHS_SDR104: 1904 case MMC_TIMING_UHS_SDR104:
1867 break; 1905 break;
@@ -2144,9 +2182,10 @@ static void sdhci_tasklet_finish(unsigned long param)
2144 */ 2182 */
2145 if (!(host->flags & SDHCI_DEVICE_DEAD) && 2183 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2146 ((mrq->cmd && mrq->cmd->error) || 2184 ((mrq->cmd && mrq->cmd->error) ||
2147 (mrq->data && (mrq->data->error || 2185 (mrq->sbc && mrq->sbc->error) ||
2148 (mrq->data->stop && mrq->data->stop->error))) || 2186 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
2149 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2187 (mrq->data->stop && mrq->data->stop->error))) ||
2188 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2150 2189
2151 /* Some controllers need this kick or reset won't work here */ 2190 /* Some controllers need this kick or reset won't work here */
2152 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 2191 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
@@ -2282,32 +2321,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2282} 2321}
2283 2322
2284#ifdef CONFIG_MMC_DEBUG 2323#ifdef CONFIG_MMC_DEBUG
2285static void sdhci_show_adma_error(struct sdhci_host *host) 2324static void sdhci_adma_show_error(struct sdhci_host *host)
2286{ 2325{
2287 const char *name = mmc_hostname(host->mmc); 2326 const char *name = mmc_hostname(host->mmc);
2288 u8 *desc = host->adma_desc; 2327 void *desc = host->adma_table;
2289 __le32 *dma;
2290 __le16 *len;
2291 u8 attr;
2292 2328
2293 sdhci_dumpregs(host); 2329 sdhci_dumpregs(host);
2294 2330
2295 while (true) { 2331 while (true) {
2296 dma = (__le32 *)(desc + 4); 2332 struct sdhci_adma2_64_desc *dma_desc = desc;
2297 len = (__le16 *)(desc + 2); 2333
2298 attr = *desc; 2334 if (host->flags & SDHCI_USE_64_BIT_DMA)
2299 2335 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2300 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2336 name, desc, le32_to_cpu(dma_desc->addr_hi),
2301 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); 2337 le32_to_cpu(dma_desc->addr_lo),
2338 le16_to_cpu(dma_desc->len),
2339 le16_to_cpu(dma_desc->cmd));
2340 else
2341 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2342 name, desc, le32_to_cpu(dma_desc->addr_lo),
2343 le16_to_cpu(dma_desc->len),
2344 le16_to_cpu(dma_desc->cmd));
2302 2345
2303 desc += 8; 2346 desc += host->desc_sz;
2304 2347
2305 if (attr & 2) 2348 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2306 break; 2349 break;
2307 } 2350 }
2308} 2351}
2309#else 2352#else
2310static void sdhci_show_adma_error(struct sdhci_host *host) { } 2353static void sdhci_adma_show_error(struct sdhci_host *host) { }
2311#endif 2354#endif
2312 2355
2313static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 2356static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
@@ -2370,7 +2413,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2370 host->data->error = -EILSEQ; 2413 host->data->error = -EILSEQ;
2371 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2414 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2372 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2415 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2373 sdhci_show_adma_error(host); 2416 sdhci_adma_show_error(host);
2374 host->data->error = -EIO; 2417 host->data->error = -EIO;
2375 if (host->ops->adma_workaround) 2418 if (host->ops->adma_workaround)
2376 host->ops->adma_workaround(host, intmask); 2419 host->ops->adma_workaround(host, intmask);
@@ -2654,9 +2697,6 @@ int sdhci_resume_host(struct sdhci_host *host)
2654} 2697}
2655 2698
2656EXPORT_SYMBOL_GPL(sdhci_resume_host); 2699EXPORT_SYMBOL_GPL(sdhci_resume_host);
2657#endif /* CONFIG_PM */
2658
2659#ifdef CONFIG_PM_RUNTIME
2660 2700
2661static int sdhci_runtime_pm_get(struct sdhci_host *host) 2701static int sdhci_runtime_pm_get(struct sdhci_host *host)
2662{ 2702{
@@ -2757,7 +2797,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
2757} 2797}
2758EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 2798EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2759 2799
2760#endif 2800#endif /* CONFIG_PM */
2761 2801
2762/*****************************************************************************\ 2802/*****************************************************************************\
2763 * * 2803 * *
@@ -2849,6 +2889,16 @@ int sdhci_add_host(struct sdhci_host *host)
2849 host->flags &= ~SDHCI_USE_ADMA; 2889 host->flags &= ~SDHCI_USE_ADMA;
2850 } 2890 }
2851 2891
2892 /*
2893 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2894 * and *must* do 64-bit DMA. A driver has the opportunity to change
2895 * that during the first call to ->enable_dma(). Similarly
2896 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2897 * implement.
2898 */
2899 if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
2900 host->flags |= SDHCI_USE_64_BIT_DMA;
2901
2852 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 2902 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2853 if (host->ops->enable_dma) { 2903 if (host->ops->enable_dma) {
2854 if (host->ops->enable_dma(host)) { 2904 if (host->ops->enable_dma(host)) {
@@ -2860,33 +2910,56 @@ int sdhci_add_host(struct sdhci_host *host)
2860 } 2910 }
2861 } 2911 }
2862 2912
2913 /* SDMA does not support 64-bit DMA */
2914 if (host->flags & SDHCI_USE_64_BIT_DMA)
2915 host->flags &= ~SDHCI_USE_SDMA;
2916
2863 if (host->flags & SDHCI_USE_ADMA) { 2917 if (host->flags & SDHCI_USE_ADMA) {
2864 /* 2918 /*
2865 * We need to allocate descriptors for all sg entries 2919 * The DMA descriptor table size is calculated as the maximum
2866 * (128) and potentially one alignment transfer for 2920 * number of segments times 2, to allow for an alignment
2867 * each of those entries. 2921 * descriptor for each segment, plus 1 for a nop end descriptor,
2922 * all multipled by the descriptor size.
2868 */ 2923 */
2869 host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), 2924 if (host->flags & SDHCI_USE_64_BIT_DMA) {
2870 ADMA_SIZE, &host->adma_addr, 2925 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2871 GFP_KERNEL); 2926 SDHCI_ADMA2_64_DESC_SZ;
2872 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 2927 host->align_buffer_sz = SDHCI_MAX_SEGS *
2873 if (!host->adma_desc || !host->align_buffer) { 2928 SDHCI_ADMA2_64_ALIGN;
2874 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 2929 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
2875 host->adma_desc, host->adma_addr); 2930 host->align_sz = SDHCI_ADMA2_64_ALIGN;
2931 host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
2932 } else {
2933 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
2934 SDHCI_ADMA2_32_DESC_SZ;
2935 host->align_buffer_sz = SDHCI_MAX_SEGS *
2936 SDHCI_ADMA2_32_ALIGN;
2937 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
2938 host->align_sz = SDHCI_ADMA2_32_ALIGN;
2939 host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
2940 }
2941 host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
2942 host->adma_table_sz,
2943 &host->adma_addr,
2944 GFP_KERNEL);
2945 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2946 if (!host->adma_table || !host->align_buffer) {
2947 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
2948 host->adma_table, host->adma_addr);
2876 kfree(host->align_buffer); 2949 kfree(host->align_buffer);
2877 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2950 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2878 mmc_hostname(mmc)); 2951 mmc_hostname(mmc));
2879 host->flags &= ~SDHCI_USE_ADMA; 2952 host->flags &= ~SDHCI_USE_ADMA;
2880 host->adma_desc = NULL; 2953 host->adma_table = NULL;
2881 host->align_buffer = NULL; 2954 host->align_buffer = NULL;
2882 } else if (host->adma_addr & 3) { 2955 } else if (host->adma_addr & host->align_mask) {
2883 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 2956 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2884 mmc_hostname(mmc)); 2957 mmc_hostname(mmc));
2885 host->flags &= ~SDHCI_USE_ADMA; 2958 host->flags &= ~SDHCI_USE_ADMA;
2886 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 2959 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
2887 host->adma_desc, host->adma_addr); 2960 host->adma_table, host->adma_addr);
2888 kfree(host->align_buffer); 2961 kfree(host->align_buffer);
2889 host->adma_desc = NULL; 2962 host->adma_table = NULL;
2890 host->align_buffer = NULL; 2963 host->align_buffer = NULL;
2891 } 2964 }
2892 } 2965 }
@@ -3027,7 +3100,7 @@ int sdhci_add_host(struct sdhci_host *host)
3027 if (ret) { 3100 if (ret) {
3028 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 3101 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3029 mmc_hostname(mmc), ret); 3102 mmc_hostname(mmc), ret);
3030 mmc->supply.vqmmc = NULL; 3103 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3031 } 3104 }
3032 } 3105 }
3033 3106
@@ -3046,16 +3119,21 @@ int sdhci_add_host(struct sdhci_host *host)
3046 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 3119 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3047 * field can be promoted to support HS200. 3120 * field can be promoted to support HS200.
3048 */ 3121 */
3049 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) { 3122 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3050 mmc->caps2 |= MMC_CAP2_HS200; 3123 mmc->caps2 |= MMC_CAP2_HS200;
3051 if (IS_ERR(mmc->supply.vqmmc) ||
3052 !regulator_is_supported_voltage
3053 (mmc->supply.vqmmc, 1100000, 1300000))
3054 mmc->caps2 &= ~MMC_CAP2_HS200_1_2V_SDR;
3055 }
3056 } else if (caps[1] & SDHCI_SUPPORT_SDR50) 3124 } else if (caps[1] & SDHCI_SUPPORT_SDR50)
3057 mmc->caps |= MMC_CAP_UHS_SDR50; 3125 mmc->caps |= MMC_CAP_UHS_SDR50;
3058 3126
3127 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3128 (caps[1] & SDHCI_SUPPORT_HS400))
3129 mmc->caps2 |= MMC_CAP2_HS400;
3130
3131 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3132 (IS_ERR(mmc->supply.vqmmc) ||
3133 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3134 1300000)))
3135 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3136
3059 if ((caps[1] & SDHCI_SUPPORT_DDR50) && 3137 if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3060 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 3138 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3061 mmc->caps |= MMC_CAP_UHS_DDR50; 3139 mmc->caps |= MMC_CAP_UHS_DDR50;
@@ -3175,11 +3253,11 @@ int sdhci_add_host(struct sdhci_host *host)
3175 * can do scatter/gather or not. 3253 * can do scatter/gather or not.
3176 */ 3254 */
3177 if (host->flags & SDHCI_USE_ADMA) 3255 if (host->flags & SDHCI_USE_ADMA)
3178 mmc->max_segs = 128; 3256 mmc->max_segs = SDHCI_MAX_SEGS;
3179 else if (host->flags & SDHCI_USE_SDMA) 3257 else if (host->flags & SDHCI_USE_SDMA)
3180 mmc->max_segs = 1; 3258 mmc->max_segs = 1;
3181 else /* PIO */ 3259 else /* PIO */
3182 mmc->max_segs = 128; 3260 mmc->max_segs = SDHCI_MAX_SEGS;
3183 3261
3184 /* 3262 /*
3185 * Maximum number of sectors in one transfer. Limited by DMA boundary 3263 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -3277,7 +3355,8 @@ int sdhci_add_host(struct sdhci_host *host)
3277 3355
3278 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 3356 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3279 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 3357 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3280 (host->flags & SDHCI_USE_ADMA) ? "ADMA" : 3358 (host->flags & SDHCI_USE_ADMA) ?
3359 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3281 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 3360 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3282 3361
3283 sdhci_enable_card_detection(host); 3362 sdhci_enable_card_detection(host);
@@ -3339,18 +3418,15 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
3339 3418
3340 tasklet_kill(&host->finish_tasklet); 3419 tasklet_kill(&host->finish_tasklet);
3341 3420
3342 if (!IS_ERR(mmc->supply.vmmc))
3343 regulator_disable(mmc->supply.vmmc);
3344
3345 if (!IS_ERR(mmc->supply.vqmmc)) 3421 if (!IS_ERR(mmc->supply.vqmmc))
3346 regulator_disable(mmc->supply.vqmmc); 3422 regulator_disable(mmc->supply.vqmmc);
3347 3423
3348 if (host->adma_desc) 3424 if (host->adma_table)
3349 dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, 3425 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3350 host->adma_desc, host->adma_addr); 3426 host->adma_table, host->adma_addr);
3351 kfree(host->align_buffer); 3427 kfree(host->align_buffer);
3352 3428
3353 host->adma_desc = NULL; 3429 host->adma_table = NULL;
3354 host->align_buffer = NULL; 3430 host->align_buffer = NULL;
3355} 3431}
3356 3432
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 31896a779d4e..41a2c34299ed 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -161,7 +161,7 @@
161#define SDHCI_CTRL_UHS_SDR50 0x0002 161#define SDHCI_CTRL_UHS_SDR50 0x0002
162#define SDHCI_CTRL_UHS_SDR104 0x0003 162#define SDHCI_CTRL_UHS_SDR104 0x0003
163#define SDHCI_CTRL_UHS_DDR50 0x0004 163#define SDHCI_CTRL_UHS_DDR50 0x0004
164#define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */ 164#define SDHCI_CTRL_HS400 0x0005 /* Non-standard */
165#define SDHCI_CTRL_VDD_180 0x0008 165#define SDHCI_CTRL_VDD_180 0x0008
166#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 166#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
167#define SDHCI_CTRL_DRV_TYPE_B 0x0000 167#define SDHCI_CTRL_DRV_TYPE_B 0x0000
@@ -204,6 +204,7 @@
204#define SDHCI_RETUNING_MODE_SHIFT 14 204#define SDHCI_RETUNING_MODE_SHIFT 14
205#define SDHCI_CLOCK_MUL_MASK 0x00FF0000 205#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
206#define SDHCI_CLOCK_MUL_SHIFT 16 206#define SDHCI_CLOCK_MUL_SHIFT 16
207#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
207 208
208#define SDHCI_CAPABILITIES_1 0x44 209#define SDHCI_CAPABILITIES_1 0x44
209 210
@@ -227,6 +228,7 @@
227/* 55-57 reserved */ 228/* 55-57 reserved */
228 229
229#define SDHCI_ADMA_ADDRESS 0x58 230#define SDHCI_ADMA_ADDRESS 0x58
231#define SDHCI_ADMA_ADDRESS_HI 0x5C
230 232
231/* 60-FB reserved */ 233/* 60-FB reserved */
232 234
@@ -235,6 +237,7 @@
235#define SDHCI_PRESET_FOR_SDR50 0x6A 237#define SDHCI_PRESET_FOR_SDR50 0x6A
236#define SDHCI_PRESET_FOR_SDR104 0x6C 238#define SDHCI_PRESET_FOR_SDR104 0x6C
237#define SDHCI_PRESET_FOR_DDR50 0x6E 239#define SDHCI_PRESET_FOR_DDR50 0x6E
240#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
238#define SDHCI_PRESET_DRV_MASK 0xC000 241#define SDHCI_PRESET_DRV_MASK 0xC000
239#define SDHCI_PRESET_DRV_SHIFT 14 242#define SDHCI_PRESET_DRV_SHIFT 14
240#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400 243#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
@@ -266,6 +269,46 @@
266#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) 269#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
267#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) 270#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
268 271
272/* ADMA2 32-bit DMA descriptor size */
273#define SDHCI_ADMA2_32_DESC_SZ 8
274
275/* ADMA2 32-bit DMA alignment */
276#define SDHCI_ADMA2_32_ALIGN 4
277
278/* ADMA2 32-bit descriptor */
279struct sdhci_adma2_32_desc {
280 __le16 cmd;
281 __le16 len;
282 __le32 addr;
283} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
284
285/* ADMA2 64-bit DMA descriptor size */
286#define SDHCI_ADMA2_64_DESC_SZ 12
287
288/* ADMA2 64-bit DMA alignment */
289#define SDHCI_ADMA2_64_ALIGN 8
290
291/*
292 * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
293 * aligned.
294 */
295struct sdhci_adma2_64_desc {
296 __le16 cmd;
297 __le16 len;
298 __le32 addr_lo;
299 __le32 addr_hi;
300} __packed __aligned(4);
301
302#define ADMA2_TRAN_VALID 0x21
303#define ADMA2_NOP_END_VALID 0x3
304#define ADMA2_END 0x2
305
306/*
307 * Maximum segments assuming a 512KiB maximum requisition size and a minimum
308 * 4KiB page size.
309 */
310#define SDHCI_MAX_SEGS 128
311
269struct sdhci_ops { 312struct sdhci_ops {
270#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 313#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
271 u32 (*read_l)(struct sdhci_host *host, int reg); 314 u32 (*read_l)(struct sdhci_host *host, int reg);
@@ -411,9 +454,6 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
411extern int sdhci_suspend_host(struct sdhci_host *host); 454extern int sdhci_suspend_host(struct sdhci_host *host);
412extern int sdhci_resume_host(struct sdhci_host *host); 455extern int sdhci_resume_host(struct sdhci_host *host);
413extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); 456extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
414#endif
415
416#ifdef CONFIG_PM_RUNTIME
417extern int sdhci_runtime_suspend_host(struct sdhci_host *host); 457extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
418extern int sdhci_runtime_resume_host(struct sdhci_host *host); 458extern int sdhci_runtime_resume_host(struct sdhci_host *host);
419#endif 459#endif
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index a2e81a1ea6af..00c8ebdf8ec7 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -375,7 +375,7 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
375static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { 375static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
376 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 376 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
377 pm_runtime_force_resume) 377 pm_runtime_force_resume)
378 SET_PM_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 378 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
379 tmio_mmc_host_runtime_resume, 379 tmio_mmc_host_runtime_resume,
380 NULL) 380 NULL)
381}; 381};
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d1663b3c4143..15cb8b7ffc34 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22 22
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/clk-private.h>
25#include <linux/clk/sunxi.h> 24#include <linux/clk/sunxi.h>
26 25
27#include <linux/gpio.h> 26#include <linux/gpio.h>
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 659028ddb8b1..2616fdfdbbeb 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -135,7 +135,7 @@ static int tmio_mmc_remove(struct platform_device *pdev)
135 135
136static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { 136static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
137 SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume) 137 SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
138 SET_PM_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 138 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
139 tmio_mmc_host_runtime_resume, 139 tmio_mmc_host_runtime_resume,
140 NULL) 140 NULL)
141}; 141};
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
new file mode 100644
index 000000000000..4666262edaca
--- /dev/null
+++ b/drivers/mmc/host/toshsd.c
@@ -0,0 +1,717 @@
1/*
2 * Toshiba PCI Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2014 Ondrej Zary
5 * Copyright (C) 2007 Richard Betts, All Rights Reserved.
6 *
7 * Based on asic3_mmc.c, copyright (c) 2005 SDG Systems, LLC and,
8 * sdhci.c, copyright (C) 2005-2006 Pierre Ossman
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/scatterlist.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/pm.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/mmc.h>
26
27#include "toshsd.h"
28
29#define DRIVER_NAME "toshsd"
30
31static const struct pci_device_id pci_ids[] = {
32 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA, 0x0805) },
33 { /* end: all zeroes */ },
34};
35
36MODULE_DEVICE_TABLE(pci, pci_ids);
37
38static void toshsd_init(struct toshsd_host *host)
39{
40 /* enable clock */
41 pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP,
42 SD_PCICFG_CLKSTOP_ENABLE_ALL);
43 pci_write_config_byte(host->pdev, SD_PCICFG_CARDDETECT, 2);
44
45 /* reset */
46 iowrite16(0, host->ioaddr + SD_SOFTWARERESET); /* assert */
47 mdelay(2);
48 iowrite16(1, host->ioaddr + SD_SOFTWARERESET); /* deassert */
49 mdelay(2);
50
51 /* Clear card registers */
52 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
53 iowrite32(0, host->ioaddr + SD_CARDSTATUS);
54 iowrite32(0, host->ioaddr + SD_ERRORSTATUS0);
55 iowrite16(0, host->ioaddr + SD_STOPINTERNAL);
56
57 /* SDIO clock? */
58 iowrite16(0x100, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL);
59
60 /* enable LED */
61 pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE1,
62 SD_PCICFG_LED_ENABLE1_START);
63 pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE2,
64 SD_PCICFG_LED_ENABLE2_START);
65
66 /* set interrupt masks */
67 iowrite32(~(u32)(SD_CARD_RESP_END | SD_CARD_RW_END
68 | SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0
69 | SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE
70 | SD_BUF_CMD_TIMEOUT),
71 host->ioaddr + SD_INTMASKCARD);
72
73 iowrite16(0x1000, host->ioaddr + SD_TRANSACTIONCTRL);
74}
75
76/* Set MMC clock / power.
77 * Note: This controller uses a simple divider scheme therefore it cannot run
78 * SD/MMC cards at full speed (24/20MHz). HCLK (=33MHz PCI clock?) is too high
79 * and the next slowest is 16MHz (div=2).
80 */
81static void __toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
82{
83 struct toshsd_host *host = mmc_priv(mmc);
84
85 if (ios->clock) {
86 u16 clk;
87 int div = 1;
88
89 while (ios->clock < HCLK / div)
90 div *= 2;
91
92 clk = div >> 2;
93
94 if (div == 1) { /* disable the divider */
95 pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE,
96 SD_PCICFG_CLKMODE_DIV_DISABLE);
97 clk |= SD_CARDCLK_DIV_DISABLE;
98 } else
99 pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, 0);
100
101 clk |= SD_CARDCLK_ENABLE_CLOCK;
102 iowrite16(clk, host->ioaddr + SD_CARDCLOCKCTRL);
103
104 mdelay(10);
105 } else
106 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
107
108 switch (ios->power_mode) {
109 case MMC_POWER_OFF:
110 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1,
111 SD_PCICFG_PWR1_OFF);
112 mdelay(1);
113 break;
114 case MMC_POWER_UP:
115 break;
116 case MMC_POWER_ON:
117 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1,
118 SD_PCICFG_PWR1_33V);
119 pci_write_config_byte(host->pdev, SD_PCICFG_POWER2,
120 SD_PCICFG_PWR2_AUTO);
121 mdelay(20);
122 break;
123 }
124
125 switch (ios->bus_width) {
126 case MMC_BUS_WIDTH_1:
127 iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14)
128 | SD_CARDOPT_C2_MODULE_ABSENT
129 | SD_CARDOPT_DATA_XFR_WIDTH_1,
130 host->ioaddr + SD_CARDOPTIONSETUP);
131 break;
132 case MMC_BUS_WIDTH_4:
133 iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14)
134 | SD_CARDOPT_C2_MODULE_ABSENT
135 | SD_CARDOPT_DATA_XFR_WIDTH_4,
136 host->ioaddr + SD_CARDOPTIONSETUP);
137 break;
138 }
139}
140
141static void toshsd_set_led(struct toshsd_host *host, unsigned char state)
142{
143 iowrite16(state, host->ioaddr + SDIO_BASE + SDIO_LEDCTRL);
144}
145
146static void toshsd_finish_request(struct toshsd_host *host)
147{
148 struct mmc_request *mrq = host->mrq;
149
150 /* Write something to end the command */
151 host->mrq = NULL;
152 host->cmd = NULL;
153 host->data = NULL;
154
155 toshsd_set_led(host, 0);
156 mmc_request_done(host->mmc, mrq);
157}
158
159static irqreturn_t toshsd_thread_irq(int irq, void *dev_id)
160{
161 struct toshsd_host *host = dev_id;
162 struct mmc_data *data = host->data;
163 struct sg_mapping_iter *sg_miter = &host->sg_miter;
164 unsigned short *buf;
165 int count;
166 unsigned long flags;
167
168 if (!data) {
169 dev_warn(&host->pdev->dev, "Spurious Data IRQ\n");
170 if (host->cmd) {
171 host->cmd->error = -EIO;
172 toshsd_finish_request(host);
173 }
174 return IRQ_NONE;
175 }
176 spin_lock_irqsave(&host->lock, flags);
177
178 if (!sg_miter_next(sg_miter))
179 return IRQ_HANDLED;
180 buf = sg_miter->addr;
181
182 /* Ensure we dont read more than one block. The chip will interrupt us
183 * When the next block is available.
184 */
185 count = sg_miter->length;
186 if (count > data->blksz)
187 count = data->blksz;
188
189 dev_dbg(&host->pdev->dev, "count: %08x, flags %08x\n", count,
190 data->flags);
191
192 /* Transfer the data */
193 if (data->flags & MMC_DATA_READ)
194 ioread32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2);
195 else
196 iowrite32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2);
197
198 sg_miter->consumed = count;
199 sg_miter_stop(sg_miter);
200
201 spin_unlock_irqrestore(&host->lock, flags);
202
203 return IRQ_HANDLED;
204}
205
206static void toshsd_cmd_irq(struct toshsd_host *host)
207{
208 struct mmc_command *cmd = host->cmd;
209 u8 *buf;
210 u16 data;
211
212 if (!host->cmd) {
213 dev_warn(&host->pdev->dev, "Spurious CMD irq\n");
214 return;
215 }
216 buf = (u8 *)cmd->resp;
217 host->cmd = NULL;
218
219 if (cmd->flags & MMC_RSP_PRESENT && cmd->flags & MMC_RSP_136) {
220 /* R2 */
221 buf[12] = 0xff;
222 data = ioread16(host->ioaddr + SD_RESPONSE0);
223 buf[13] = data & 0xff;
224 buf[14] = data >> 8;
225 data = ioread16(host->ioaddr + SD_RESPONSE1);
226 buf[15] = data & 0xff;
227 buf[8] = data >> 8;
228 data = ioread16(host->ioaddr + SD_RESPONSE2);
229 buf[9] = data & 0xff;
230 buf[10] = data >> 8;
231 data = ioread16(host->ioaddr + SD_RESPONSE3);
232 buf[11] = data & 0xff;
233 buf[4] = data >> 8;
234 data = ioread16(host->ioaddr + SD_RESPONSE4);
235 buf[5] = data & 0xff;
236 buf[6] = data >> 8;
237 data = ioread16(host->ioaddr + SD_RESPONSE5);
238 buf[7] = data & 0xff;
239 buf[0] = data >> 8;
240 data = ioread16(host->ioaddr + SD_RESPONSE6);
241 buf[1] = data & 0xff;
242 buf[2] = data >> 8;
243 data = ioread16(host->ioaddr + SD_RESPONSE7);
244 buf[3] = data & 0xff;
245 } else if (cmd->flags & MMC_RSP_PRESENT) {
246 /* R1, R1B, R3, R6, R7 */
247 data = ioread16(host->ioaddr + SD_RESPONSE0);
248 buf[0] = data & 0xff;
249 buf[1] = data >> 8;
250 data = ioread16(host->ioaddr + SD_RESPONSE1);
251 buf[2] = data & 0xff;
252 buf[3] = data >> 8;
253 }
254
255 dev_dbg(&host->pdev->dev, "Command IRQ complete %d %d %x\n",
256 cmd->opcode, cmd->error, cmd->flags);
257
258 /* If there is data to handle we will
259 * finish the request in the mmc_data_end_irq handler.*/
260 if (host->data)
261 return;
262
263 toshsd_finish_request(host);
264}
265
266static void toshsd_data_end_irq(struct toshsd_host *host)
267{
268 struct mmc_data *data = host->data;
269
270 host->data = NULL;
271
272 if (!data) {
273 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
274 return;
275 }
276
277 if (data->error == 0)
278 data->bytes_xfered = data->blocks * data->blksz;
279 else
280 data->bytes_xfered = 0;
281
282 dev_dbg(&host->pdev->dev, "Completed data request xfr=%d\n",
283 data->bytes_xfered);
284
285 iowrite16(0, host->ioaddr + SD_STOPINTERNAL);
286
287 toshsd_finish_request(host);
288}
289
290static irqreturn_t toshsd_irq(int irq, void *dev_id)
291{
292 struct toshsd_host *host = dev_id;
293 u32 int_reg, int_mask, int_status, detail;
294 int error = 0, ret = IRQ_HANDLED;
295
296 spin_lock(&host->lock);
297 int_status = ioread32(host->ioaddr + SD_CARDSTATUS);
298 int_mask = ioread32(host->ioaddr + SD_INTMASKCARD);
299 int_reg = int_status & ~int_mask & ~IRQ_DONT_CARE_BITS;
300
301 dev_dbg(&host->pdev->dev, "IRQ status:%x mask:%x\n",
302 int_status, int_mask);
303
304 /* nothing to do: it's not our IRQ */
305 if (!int_reg) {
306 ret = IRQ_NONE;
307 goto irq_end;
308 }
309
310 if (int_reg & SD_BUF_CMD_TIMEOUT) {
311 error = -ETIMEDOUT;
312 dev_dbg(&host->pdev->dev, "Timeout\n");
313 } else if (int_reg & SD_BUF_CRC_ERR) {
314 error = -EILSEQ;
315 dev_err(&host->pdev->dev, "BadCRC\n");
316 } else if (int_reg & (SD_BUF_ILLEGAL_ACCESS
317 | SD_BUF_CMD_INDEX_ERR
318 | SD_BUF_STOP_BIT_END_ERR
319 | SD_BUF_OVERFLOW
320 | SD_BUF_UNDERFLOW
321 | SD_BUF_DATA_TIMEOUT)) {
322 dev_err(&host->pdev->dev, "Buffer status error: { %s%s%s%s%s%s}\n",
323 int_reg & SD_BUF_ILLEGAL_ACCESS ? "ILLEGAL_ACC " : "",
324 int_reg & SD_BUF_CMD_INDEX_ERR ? "CMD_INDEX " : "",
325 int_reg & SD_BUF_STOP_BIT_END_ERR ? "STOPBIT_END " : "",
326 int_reg & SD_BUF_OVERFLOW ? "OVERFLOW " : "",
327 int_reg & SD_BUF_UNDERFLOW ? "UNDERFLOW " : "",
328 int_reg & SD_BUF_DATA_TIMEOUT ? "DATA_TIMEOUT " : "");
329
330 detail = ioread32(host->ioaddr + SD_ERRORSTATUS0);
331 dev_err(&host->pdev->dev, "detail error status { %s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
332 detail & SD_ERR0_RESP_CMD_ERR ? "RESP_CMD " : "",
333 detail & SD_ERR0_RESP_NON_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "",
334 detail & SD_ERR0_RESP_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "",
335 detail & SD_ERR0_READ_DATA_END_BIT_ERR ? "READ_DATA_END_BIT " : "",
336 detail & SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR ? "WRITE_CMD_END_BIT " : "",
337 detail & SD_ERR0_RESP_NON_CMD12_CRC_ERR ? "RESP_CRC " : "",
338 detail & SD_ERR0_RESP_CMD12_CRC_ERR ? "RESP_CRC " : "",
339 detail & SD_ERR0_READ_DATA_CRC_ERR ? "READ_DATA_CRC " : "",
340 detail & SD_ERR0_WRITE_CMD_CRC_ERR ? "WRITE_CMD_CRC " : "",
341 detail & SD_ERR1_NO_CMD_RESP ? "NO_CMD_RESP " : "",
342 detail & SD_ERR1_TIMEOUT_READ_DATA ? "READ_DATA_TIMEOUT " : "",
343 detail & SD_ERR1_TIMEOUT_CRS_STATUS ? "CRS_STATUS_TIMEOUT " : "",
344 detail & SD_ERR1_TIMEOUT_CRC_BUSY ? "CRC_BUSY_TIMEOUT " : "");
345 error = -EIO;
346 }
347
348 if (error) {
349 if (host->cmd)
350 host->cmd->error = error;
351
352 if (error == -ETIMEDOUT) {
353 iowrite32(int_status &
354 ~(SD_BUF_CMD_TIMEOUT | SD_CARD_RESP_END),
355 host->ioaddr + SD_CARDSTATUS);
356 } else {
357 toshsd_init(host);
358 __toshsd_set_ios(host->mmc, &host->mmc->ios);
359 goto irq_end;
360 }
361 }
362
363 /* Card insert/remove. The mmc controlling code is stateless. */
364 if (int_reg & (SD_CARD_CARD_INSERTED_0 | SD_CARD_CARD_REMOVED_0)) {
365 iowrite32(int_status &
366 ~(SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0),
367 host->ioaddr + SD_CARDSTATUS);
368
369 if (int_reg & SD_CARD_CARD_INSERTED_0)
370 toshsd_init(host);
371
372 mmc_detect_change(host->mmc, 1);
373 }
374
375 /* Data transfer */
376 if (int_reg & (SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE)) {
377 iowrite32(int_status &
378 ~(SD_BUF_WRITE_ENABLE | SD_BUF_READ_ENABLE),
379 host->ioaddr + SD_CARDSTATUS);
380
381 ret = IRQ_WAKE_THREAD;
382 goto irq_end;
383 }
384
385 /* Command completion */
386 if (int_reg & SD_CARD_RESP_END) {
387 iowrite32(int_status & ~(SD_CARD_RESP_END),
388 host->ioaddr + SD_CARDSTATUS);
389 toshsd_cmd_irq(host);
390 }
391
392 /* Data transfer completion */
393 if (int_reg & SD_CARD_RW_END) {
394 iowrite32(int_status & ~(SD_CARD_RW_END),
395 host->ioaddr + SD_CARDSTATUS);
396 toshsd_data_end_irq(host);
397 }
398irq_end:
399 spin_unlock(&host->lock);
400 return ret;
401}
402
403static void toshsd_start_cmd(struct toshsd_host *host, struct mmc_command *cmd)
404{
405 struct mmc_data *data = host->data;
406 int c = cmd->opcode;
407
408 dev_dbg(&host->pdev->dev, "Command opcode: %d\n", cmd->opcode);
409
410 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
411 iowrite16(SD_STOPINT_ISSUE_CMD12,
412 host->ioaddr + SD_STOPINTERNAL);
413
414 cmd->resp[0] = cmd->opcode;
415 cmd->resp[1] = 0;
416 cmd->resp[2] = 0;
417 cmd->resp[3] = 0;
418
419 toshsd_finish_request(host);
420 return;
421 }
422
423 switch (mmc_resp_type(cmd)) {
424 case MMC_RSP_NONE:
425 c |= SD_CMD_RESP_TYPE_NONE;
426 break;
427
428 case MMC_RSP_R1:
429 c |= SD_CMD_RESP_TYPE_EXT_R1;
430 break;
431 case MMC_RSP_R1B:
432 c |= SD_CMD_RESP_TYPE_EXT_R1B;
433 break;
434 case MMC_RSP_R2:
435 c |= SD_CMD_RESP_TYPE_EXT_R2;
436 break;
437 case MMC_RSP_R3:
438 c |= SD_CMD_RESP_TYPE_EXT_R3;
439 break;
440
441 default:
442 dev_err(&host->pdev->dev, "Unknown response type %d\n",
443 mmc_resp_type(cmd));
444 break;
445 }
446
447 host->cmd = cmd;
448
449 if (cmd->opcode == MMC_APP_CMD)
450 c |= SD_CMD_TYPE_ACMD;
451
452 if (cmd->opcode == MMC_GO_IDLE_STATE)
453 c |= (3 << 8); /* removed from ipaq-asic3.h for some reason */
454
455 if (data) {
456 c |= SD_CMD_DATA_PRESENT;
457
458 if (data->blocks > 1) {
459 iowrite16(SD_STOPINT_AUTO_ISSUE_CMD12,
460 host->ioaddr + SD_STOPINTERNAL);
461 c |= SD_CMD_MULTI_BLOCK;
462 }
463
464 if (data->flags & MMC_DATA_READ)
465 c |= SD_CMD_TRANSFER_READ;
466
467 /* MMC_DATA_WRITE does not require a bit to be set */
468 }
469
470 /* Send the command */
471 iowrite32(cmd->arg, host->ioaddr + SD_ARG0);
472 iowrite16(c, host->ioaddr + SD_CMD);
473}
474
475static void toshsd_start_data(struct toshsd_host *host, struct mmc_data *data)
476{
477 unsigned int flags = SG_MITER_ATOMIC;
478
479 dev_dbg(&host->pdev->dev, "setup data transfer: blocksize %08x nr_blocks %d, offset: %08x\n",
480 data->blksz, data->blocks, data->sg->offset);
481
482 host->data = data;
483
484 if (data->flags & MMC_DATA_READ)
485 flags |= SG_MITER_TO_SG;
486 else
487 flags |= SG_MITER_FROM_SG;
488
489 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
490
491 /* Set transfer length and blocksize */
492 iowrite16(data->blocks, host->ioaddr + SD_BLOCKCOUNT);
493 iowrite16(data->blksz, host->ioaddr + SD_CARDXFERDATALEN);
494}
495
496/* Process requests from the MMC layer */
497static void toshsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
498{
499 struct toshsd_host *host = mmc_priv(mmc);
500 unsigned long flags;
501
502 /* abort if card not present */
503 if (!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0)) {
504 mrq->cmd->error = -ENOMEDIUM;
505 mmc_request_done(mmc, mrq);
506 return;
507 }
508
509 spin_lock_irqsave(&host->lock, flags);
510
511 WARN_ON(host->mrq != NULL);
512
513 host->mrq = mrq;
514
515 if (mrq->data)
516 toshsd_start_data(host, mrq->data);
517
518 toshsd_set_led(host, 1);
519
520 toshsd_start_cmd(host, mrq->cmd);
521
522 spin_unlock_irqrestore(&host->lock, flags);
523}
524
525static void toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
526{
527 struct toshsd_host *host = mmc_priv(mmc);
528 unsigned long flags;
529
530 spin_lock_irqsave(&host->lock, flags);
531 __toshsd_set_ios(mmc, ios);
532 spin_unlock_irqrestore(&host->lock, flags);
533}
534
535static int toshsd_get_ro(struct mmc_host *mmc)
536{
537 struct toshsd_host *host = mmc_priv(mmc);
538
539 /* active low */
540 return !(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_WRITE_PROTECT);
541}
542
543static int toshsd_get_cd(struct mmc_host *mmc)
544{
545 struct toshsd_host *host = mmc_priv(mmc);
546
547 return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0);
548}
549
550static struct mmc_host_ops toshsd_ops = {
551 .request = toshsd_request,
552 .set_ios = toshsd_set_ios,
553 .get_ro = toshsd_get_ro,
554 .get_cd = toshsd_get_cd,
555};
556
557
558static void toshsd_powerdown(struct toshsd_host *host)
559{
560 /* mask all interrupts */
561 iowrite32(0xffffffff, host->ioaddr + SD_INTMASKCARD);
562 /* disable card clock */
563 iowrite16(0x000, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL);
564 iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
565 /* power down card */
566 pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, SD_PCICFG_PWR1_OFF);
567 /* disable clock */
568 pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0);
569}
570
571#ifdef CONFIG_PM_SLEEP
572static int toshsd_pm_suspend(struct device *dev)
573{
574 struct pci_dev *pdev = to_pci_dev(dev);
575 struct toshsd_host *host = pci_get_drvdata(pdev);
576
577 toshsd_powerdown(host);
578
579 pci_save_state(pdev);
580 pci_enable_wake(pdev, PCI_D3hot, 0);
581 pci_disable_device(pdev);
582 pci_set_power_state(pdev, PCI_D3hot);
583
584 return 0;
585}
586
587static int toshsd_pm_resume(struct device *dev)
588{
589 struct pci_dev *pdev = to_pci_dev(dev);
590 struct toshsd_host *host = pci_get_drvdata(pdev);
591 int ret;
592
593 pci_set_power_state(pdev, PCI_D0);
594 pci_restore_state(pdev);
595 ret = pci_enable_device(pdev);
596 if (ret)
597 return ret;
598
599 toshsd_init(host);
600
601 return 0;
602}
603#endif /* CONFIG_PM_SLEEP */
604
605static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
606{
607 int ret;
608 struct toshsd_host *host;
609 struct mmc_host *mmc;
610 resource_size_t base;
611
612 ret = pci_enable_device(pdev);
613 if (ret)
614 return ret;
615
616 mmc = mmc_alloc_host(sizeof(struct toshsd_host), &pdev->dev);
617 if (!mmc) {
618 ret = -ENOMEM;
619 goto err;
620 }
621
622 host = mmc_priv(mmc);
623 host->mmc = mmc;
624
625 host->pdev = pdev;
626 pci_set_drvdata(pdev, host);
627
628 ret = pci_request_regions(pdev, DRIVER_NAME);
629 if (ret)
630 goto free;
631
632 host->ioaddr = pci_iomap(pdev, 0, 0);
633 if (!host->ioaddr) {
634 ret = -ENOMEM;
635 goto release;
636 }
637
638 /* Set MMC host parameters */
639 mmc->ops = &toshsd_ops;
640 mmc->caps = MMC_CAP_4_BIT_DATA;
641 mmc->ocr_avail = MMC_VDD_32_33;
642
643 mmc->f_min = HCLK / 512;
644 mmc->f_max = HCLK;
645
646 spin_lock_init(&host->lock);
647
648 toshsd_init(host);
649
650 ret = request_threaded_irq(pdev->irq, toshsd_irq, toshsd_thread_irq,
651 IRQF_SHARED, DRIVER_NAME, host);
652 if (ret)
653 goto unmap;
654
655 mmc_add_host(mmc);
656
657 base = pci_resource_start(pdev, 0);
658 dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
659
660 pm_suspend_ignore_children(&pdev->dev, 1);
661
662 return 0;
663
664unmap:
665 pci_iounmap(pdev, host->ioaddr);
666release:
667 pci_release_regions(pdev);
668free:
669 mmc_free_host(mmc);
670 pci_set_drvdata(pdev, NULL);
671err:
672 pci_disable_device(pdev);
673 return ret;
674}
675
676static void toshsd_remove(struct pci_dev *pdev)
677{
678 struct toshsd_host *host = pci_get_drvdata(pdev);
679
680 mmc_remove_host(host->mmc);
681 toshsd_powerdown(host);
682 free_irq(pdev->irq, host);
683 pci_iounmap(pdev, host->ioaddr);
684 pci_release_regions(pdev);
685 mmc_free_host(host->mmc);
686 pci_set_drvdata(pdev, NULL);
687 pci_disable_device(pdev);
688}
689
690static const struct dev_pm_ops toshsd_pm_ops = {
691 SET_SYSTEM_SLEEP_PM_OPS(toshsd_pm_suspend, toshsd_pm_resume)
692};
693
694static struct pci_driver toshsd_driver = {
695 .name = DRIVER_NAME,
696 .id_table = pci_ids,
697 .probe = toshsd_probe,
698 .remove = toshsd_remove,
699 .driver.pm = &toshsd_pm_ops,
700};
701
702static int __init toshsd_drv_init(void)
703{
704 return pci_register_driver(&toshsd_driver);
705}
706
707static void __exit toshsd_drv_exit(void)
708{
709 pci_unregister_driver(&toshsd_driver);
710}
711
712module_init(toshsd_drv_init);
713module_exit(toshsd_drv_exit);
714
715MODULE_AUTHOR("Ondrej Zary, Richard Betts");
716MODULE_DESCRIPTION("Toshiba PCI Secure Digital Host Controller Interface driver");
717MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/toshsd.h b/drivers/mmc/host/toshsd.h
new file mode 100644
index 000000000000..b6c0d89e53a6
--- /dev/null
+++ b/drivers/mmc/host/toshsd.h
@@ -0,0 +1,176 @@
1/*
2 * Toshiba PCI Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2014 Ondrej Zary
5 * Copyright (C) 2007 Richard Betts, All Rights Reserved.
6 *
7 * Based on asic3_mmc.c Copyright (c) 2005 SDG Systems, LLC
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 */
14
15#define HCLK 33000000 /* 33 MHz (PCI clock) */
16
17#define SD_PCICFG_CLKSTOP 0x40 /* 0x1f = clock controller, 0 = stop */
18#define SD_PCICFG_GATEDCLK 0x41 /* Gated clock */
19#define SD_PCICFG_CLKMODE 0x42 /* Control clock of SD controller */
20#define SD_PCICFG_PINSTATUS 0x44 /* R/O: read status of SD pins */
21#define SD_PCICFG_POWER1 0x48
22#define SD_PCICFG_POWER2 0x49
23#define SD_PCICFG_POWER3 0x4a
24#define SD_PCICFG_CARDDETECT 0x4c
25#define SD_PCICFG_SLOTS 0x50 /* R/O: define support slot number */
26#define SD_PCICFG_EXTGATECLK1 0xf0 /* Could be used for gated clock */
27#define SD_PCICFG_EXTGATECLK2 0xf1 /* Could be used for gated clock */
28#define SD_PCICFG_EXTGATECLK3 0xf9 /* Bit 1: double buffer/single buffer */
29#define SD_PCICFG_SDLED_ENABLE1 0xfa
30#define SD_PCICFG_SDLED_ENABLE2 0xfe
31
32#define SD_PCICFG_CLKMODE_DIV_DISABLE BIT(0)
33#define SD_PCICFG_CLKSTOP_ENABLE_ALL 0x1f
34#define SD_PCICFG_LED_ENABLE1_START 0x12
35#define SD_PCICFG_LED_ENABLE2_START 0x80
36
37#define SD_PCICFG_PWR1_33V 0x08 /* Set for 3.3 volts */
38#define SD_PCICFG_PWR1_OFF 0x00 /* Turn off power */
39#define SD_PCICFG_PWR2_AUTO 0x02
40
41#define SD_CMD 0x00 /* also for SDIO */
42#define SD_ARG0 0x04 /* also for SDIO */
43#define SD_ARG1 0x06 /* also for SDIO */
44#define SD_STOPINTERNAL 0x08
45#define SD_BLOCKCOUNT 0x0a /* also for SDIO */
46#define SD_RESPONSE0 0x0c /* also for SDIO */
47#define SD_RESPONSE1 0x0e /* also for SDIO */
48#define SD_RESPONSE2 0x10 /* also for SDIO */
49#define SD_RESPONSE3 0x12 /* also for SDIO */
50#define SD_RESPONSE4 0x14 /* also for SDIO */
51#define SD_RESPONSE5 0x16 /* also for SDIO */
52#define SD_RESPONSE6 0x18 /* also for SDIO */
53#define SD_RESPONSE7 0x1a /* also for SDIO */
54#define SD_CARDSTATUS 0x1c /* also for SDIO */
55#define SD_BUFFERCTRL 0x1e /* also for SDIO */
56#define SD_INTMASKCARD 0x20 /* also for SDIO */
57#define SD_INTMASKBUFFER 0x22 /* also for SDIO */
58#define SD_CARDCLOCKCTRL 0x24
59#define SD_CARDXFERDATALEN 0x26 /* also for SDIO */
60#define SD_CARDOPTIONSETUP 0x28 /* also for SDIO */
61#define SD_ERRORSTATUS0 0x2c /* also for SDIO */
62#define SD_ERRORSTATUS1 0x2e /* also for SDIO */
63#define SD_DATAPORT 0x30 /* also for SDIO */
64#define SD_TRANSACTIONCTRL 0x34 /* also for SDIO */
65#define SD_SOFTWARERESET 0xe0 /* also for SDIO */
66
67/* registers above marked "also for SDIO" and all SDIO registers below can be
68 * accessed at SDIO_BASE + reg address */
69#define SDIO_BASE 0x100
70
71#define SDIO_CARDPORTSEL 0x02
72#define SDIO_CARDINTCTRL 0x36
73#define SDIO_CLOCKNWAITCTRL 0x38
74#define SDIO_HOSTINFORMATION 0x3a
75#define SDIO_ERRORCTRL 0x3c
76#define SDIO_LEDCTRL 0x3e
77
78#define SD_TRANSCTL_SET BIT(8)
79
80#define SD_CARDCLK_DIV_DISABLE BIT(15)
81#define SD_CARDCLK_ENABLE_CLOCK BIT(8)
82#define SD_CARDCLK_CLK_DIV_512 BIT(7)
83#define SD_CARDCLK_CLK_DIV_256 BIT(6)
84#define SD_CARDCLK_CLK_DIV_128 BIT(5)
85#define SD_CARDCLK_CLK_DIV_64 BIT(4)
86#define SD_CARDCLK_CLK_DIV_32 BIT(3)
87#define SD_CARDCLK_CLK_DIV_16 BIT(2)
88#define SD_CARDCLK_CLK_DIV_8 BIT(1)
89#define SD_CARDCLK_CLK_DIV_4 BIT(0)
90#define SD_CARDCLK_CLK_DIV_2 0
91
92#define SD_CARDOPT_REQUIRED 0x000e
93#define SD_CARDOPT_DATA_RESP_TIMEOUT(x) (((x) & 0x0f) << 4) /* 4 bits */
94#define SD_CARDOPT_C2_MODULE_ABSENT BIT(14)
95#define SD_CARDOPT_DATA_XFR_WIDTH_1 (1 << 15)
96#define SD_CARDOPT_DATA_XFR_WIDTH_4 (0 << 15)
97
98#define SD_CMD_TYPE_CMD (0 << 6)
99#define SD_CMD_TYPE_ACMD (1 << 6)
100#define SD_CMD_TYPE_AUTHEN (2 << 6)
101#define SD_CMD_RESP_TYPE_NONE (3 << 8)
102#define SD_CMD_RESP_TYPE_EXT_R1 (4 << 8)
103#define SD_CMD_RESP_TYPE_EXT_R1B (5 << 8)
104#define SD_CMD_RESP_TYPE_EXT_R2 (6 << 8)
105#define SD_CMD_RESP_TYPE_EXT_R3 (7 << 8)
106#define SD_CMD_RESP_TYPE_EXT_R6 (4 << 8)
107#define SD_CMD_RESP_TYPE_EXT_R7 (4 << 8)
108#define SD_CMD_DATA_PRESENT BIT(11)
109#define SD_CMD_TRANSFER_READ BIT(12)
110#define SD_CMD_MULTI_BLOCK BIT(13)
111#define SD_CMD_SECURITY_CMD BIT(14)
112
113#define SD_STOPINT_ISSUE_CMD12 BIT(0)
114#define SD_STOPINT_AUTO_ISSUE_CMD12 BIT(8)
115
116#define SD_CARD_RESP_END BIT(0)
117#define SD_CARD_RW_END BIT(2)
118#define SD_CARD_CARD_REMOVED_0 BIT(3)
119#define SD_CARD_CARD_INSERTED_0 BIT(4)
120#define SD_CARD_PRESENT_0 BIT(5)
121#define SD_CARD_UNK6 BIT(6)
122#define SD_CARD_WRITE_PROTECT BIT(7)
123#define SD_CARD_CARD_REMOVED_3 BIT(8)
124#define SD_CARD_CARD_INSERTED_3 BIT(9)
125#define SD_CARD_PRESENT_3 BIT(10)
126
127#define SD_BUF_CMD_INDEX_ERR BIT(16)
128#define SD_BUF_CRC_ERR BIT(17)
129#define SD_BUF_STOP_BIT_END_ERR BIT(18)
130#define SD_BUF_DATA_TIMEOUT BIT(19)
131#define SD_BUF_OVERFLOW BIT(20)
132#define SD_BUF_UNDERFLOW BIT(21)
133#define SD_BUF_CMD_TIMEOUT BIT(22)
134#define SD_BUF_UNK7 BIT(23)
135#define SD_BUF_READ_ENABLE BIT(24)
136#define SD_BUF_WRITE_ENABLE BIT(25)
137#define SD_BUF_ILLEGAL_FUNCTION BIT(29)
138#define SD_BUF_CMD_BUSY BIT(30)
139#define SD_BUF_ILLEGAL_ACCESS BIT(31)
140
141#define SD_ERR0_RESP_CMD_ERR BIT(0)
142#define SD_ERR0_RESP_NON_CMD12_END_BIT_ERR BIT(2)
143#define SD_ERR0_RESP_CMD12_END_BIT_ERR BIT(3)
144#define SD_ERR0_READ_DATA_END_BIT_ERR BIT(4)
145#define SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR BIT(5)
146#define SD_ERR0_RESP_NON_CMD12_CRC_ERR BIT(8)
147#define SD_ERR0_RESP_CMD12_CRC_ERR BIT(9)
148#define SD_ERR0_READ_DATA_CRC_ERR BIT(10)
149#define SD_ERR0_WRITE_CMD_CRC_ERR BIT(11)
150
151#define SD_ERR1_NO_CMD_RESP BIT(16)
152#define SD_ERR1_TIMEOUT_READ_DATA BIT(20)
153#define SD_ERR1_TIMEOUT_CRS_STATUS BIT(21)
154#define SD_ERR1_TIMEOUT_CRC_BUSY BIT(22)
155
156#define IRQ_DONT_CARE_BITS (SD_CARD_PRESENT_3 \
157 | SD_CARD_WRITE_PROTECT \
158 | SD_CARD_UNK6 \
159 | SD_CARD_PRESENT_0 \
160 | SD_BUF_UNK7 \
161 | SD_BUF_CMD_BUSY)
162
163struct toshsd_host {
164 struct pci_dev *pdev;
165 struct mmc_host *mmc;
166
167 spinlock_t lock;
168
169 struct mmc_request *mrq;/* Current request */
170 struct mmc_command *cmd;/* Current command */
171 struct mmc_data *data; /* Current data request */
172
173 struct sg_mapping_iter sg_miter; /* for PIO */
174
175 void __iomem *ioaddr; /* mapped address */
176};
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c13d83e15ace..45f09a66e6c9 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
225 225
226 bond_option_arp_ip_targets_clear(bond); 226 bond_option_arp_ip_targets_clear(bond);
227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { 227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
228 __be32 target = nla_get_be32(attr); 228 __be32 target;
229
230 if (nla_len(attr) < sizeof(target))
231 return -EINVAL;
232
233 target = nla_get_be32(attr);
229 234
230 bond_opt_initval(&newval, (__force u64)target); 235 bond_opt_initval(&newval, (__force u64)target);
231 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, 236 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b9625968daac..4f4c2a7888e5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
377 return IRQ_HANDLED; 377 return IRQ_HANDLED;
378} 378}
379 379
380static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
381{
382 unsigned int timeout = 1000;
383 u32 reg;
384
385 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
386 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
387 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
388
389 do {
390 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
391 if (!(reg & SOFTWARE_RESET))
392 break;
393
394 usleep_range(1000, 2000);
395 } while (timeout-- > 0);
396
397 if (timeout == 0)
398 return -ETIMEDOUT;
399
400 return 0;
401}
402
380static int bcm_sf2_sw_setup(struct dsa_switch *ds) 403static int bcm_sf2_sw_setup(struct dsa_switch *ds)
381{ 404{
382 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 405 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
404 *base = of_iomap(dn, i); 427 *base = of_iomap(dn, i);
405 if (*base == NULL) { 428 if (*base == NULL) {
406 pr_err("unable to find register: %s\n", reg_names[i]); 429 pr_err("unable to find register: %s\n", reg_names[i]);
407 return -ENODEV; 430 ret = -ENOMEM;
431 goto out_unmap;
408 } 432 }
409 base++; 433 base++;
410 } 434 }
411 435
436 ret = bcm_sf2_sw_rst(priv);
437 if (ret) {
438 pr_err("unable to software reset switch: %d\n", ret);
439 goto out_unmap;
440 }
441
412 /* Disable all interrupts and request them */ 442 /* Disable all interrupts and request them */
413 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 443 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
414 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 444 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
484out_unmap: 514out_unmap:
485 base = &priv->core; 515 base = &priv->core;
486 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 516 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
487 iounmap(*base); 517 if (*base)
518 iounmap(*base);
488 base++; 519 base++;
489 } 520 }
490 return ret; 521 return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
733 return 0; 764 return 0;
734} 765}
735 766
736static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
737{
738 unsigned int timeout = 1000;
739 u32 reg;
740
741 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
742 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
743 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
744
745 do {
746 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
747 if (!(reg & SOFTWARE_RESET))
748 break;
749
750 usleep_range(1000, 2000);
751 } while (timeout-- > 0);
752
753 if (timeout == 0)
754 return -ETIMEDOUT;
755
756 return 0;
757}
758
759static int bcm_sf2_sw_resume(struct dsa_switch *ds) 767static int bcm_sf2_sw_resume(struct dsa_switch *ds)
760{ 768{
761 struct bcm_sf2_priv *priv = ds_to_priv(ds); 769 struct bcm_sf2_priv *priv = ds_to_priv(ds);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index dbb41c1923e6..77f8f836cbbe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
8563 if (tnapi->rx_rcb) 8563 if (tnapi->rx_rcb)
8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8565 8565
8566 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8566 if (tnapi->prodring.rx_std &&
8567 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8567 tg3_free_rings(tp); 8568 tg3_free_rings(tp);
8568 return -ENOMEM; 8569 return -ENOMEM;
8569 } 8570 }
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 9e089d24466e..3564fe9d3f69 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST) 25 depends on HAS_DMA && (ARCH_AT91 || COMPILE_TEST)
26 select MACB 26 select MACB
27 ---help--- 27 ---help---
28 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8520d5529df8..279873cb6e3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2443 SUPPORTED_10000baseKX4_Full; 2443 SUPPORTED_10000baseKX4_Full;
2444 else if (type == FW_PORT_TYPE_FIBER_XFI || 2444 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2446 v |= SUPPORTED_FIBRE; 2446 v |= SUPPORTED_FIBRE;
2447 else if (type == FW_PORT_TYPE_BP40_BA) 2447 if (caps & FW_PORT_CAP_SPEED_1G)
2448 v |= SUPPORTED_1000baseT_Full;
2449 if (caps & FW_PORT_CAP_SPEED_10G)
2450 v |= SUPPORTED_10000baseT_Full;
2451 } else if (type == FW_PORT_TYPE_BP40_BA)
2448 v |= SUPPORTED_40000baseSR4_Full; 2452 v |= SUPPORTED_40000baseSR4_Full;
2449 2453
2450 if (caps & FW_PORT_CAP_ANEG) 2454 if (caps & FW_PORT_CAP_ANEG)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e8475cae4f9..597c463e384d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4309 return -EOPNOTSUPP; 4309 return -EOPNOTSUPP;
4310 4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4312 if (!br_spec)
4313 return -EINVAL;
4312 4314
4313 nla_for_each_nested(attr, br_spec, rem) { 4315 nla_for_each_nested(attr, br_spec, rem) {
4314 if (nla_type(attr) != IFLA_BRIDGE_MODE) 4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4315 continue; 4317 continue;
4316 4318
4319 if (nla_len(attr) < sizeof(mode))
4320 return -EINVAL;
4321
4317 mode = nla_get_u16(attr); 4322 mode = nla_get_u16(attr);
4318 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 4323 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4319 return -EINVAL; 4324 return -EINVAL;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 247335d2c7ec..952ef7c434e8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6372,7 +6372,6 @@ static int e1000e_pm_resume(struct device *dev)
6372} 6372}
6373#endif /* CONFIG_PM_SLEEP */ 6373#endif /* CONFIG_PM_SLEEP */
6374 6374
6375#ifdef CONFIG_PM_RUNTIME
6376static int e1000e_pm_runtime_idle(struct device *dev) 6375static int e1000e_pm_runtime_idle(struct device *dev)
6377{ 6376{
6378 struct pci_dev *pdev = to_pci_dev(dev); 6377 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6432,7 +6431,6 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
6432 6431
6433 return 0; 6432 return 0;
6434} 6433}
6435#endif /* CONFIG_PM_RUNTIME */
6436#endif /* CONFIG_PM */ 6434#endif /* CONFIG_PM */
6437 6435
6438static void e1000_shutdown(struct pci_dev *pdev) 6436static void e1000_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a2d72a87cbde..b85880a6e4c4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -186,11 +186,9 @@ static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
186static int igb_suspend(struct device *); 186static int igb_suspend(struct device *);
187#endif 187#endif
188static int igb_resume(struct device *); 188static int igb_resume(struct device *);
189#ifdef CONFIG_PM_RUNTIME
190static int igb_runtime_suspend(struct device *dev); 189static int igb_runtime_suspend(struct device *dev);
191static int igb_runtime_resume(struct device *dev); 190static int igb_runtime_resume(struct device *dev);
192static int igb_runtime_idle(struct device *dev); 191static int igb_runtime_idle(struct device *dev);
193#endif
194static const struct dev_pm_ops igb_pm_ops = { 192static const struct dev_pm_ops igb_pm_ops = {
195 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) 193 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
196 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, 194 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
@@ -1012,7 +1010,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1012 /* igb_get_stats64() might access the rings on this vector, 1010 /* igb_get_stats64() might access the rings on this vector,
1013 * we must wait a grace period before freeing it. 1011 * we must wait a grace period before freeing it.
1014 */ 1012 */
1015 kfree_rcu(q_vector, rcu); 1013 if (q_vector)
1014 kfree_rcu(q_vector, rcu);
1016} 1015}
1017 1016
1018/** 1017/**
@@ -1792,8 +1791,10 @@ void igb_down(struct igb_adapter *adapter)
1792 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 1791 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1793 1792
1794 for (i = 0; i < adapter->num_q_vectors; i++) { 1793 for (i = 0; i < adapter->num_q_vectors; i++) {
1795 napi_synchronize(&(adapter->q_vector[i]->napi)); 1794 if (adapter->q_vector[i]) {
1796 napi_disable(&(adapter->q_vector[i]->napi)); 1795 napi_synchronize(&adapter->q_vector[i]->napi);
1796 napi_disable(&adapter->q_vector[i]->napi);
1797 }
1797 } 1798 }
1798 1799
1799 1800
@@ -3717,7 +3718,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3717 int i; 3718 int i;
3718 3719
3719 for (i = 0; i < adapter->num_tx_queues; i++) 3720 for (i = 0; i < adapter->num_tx_queues; i++)
3720 igb_free_tx_resources(adapter->tx_ring[i]); 3721 if (adapter->tx_ring[i])
3722 igb_free_tx_resources(adapter->tx_ring[i]);
3721} 3723}
3722 3724
3723void igb_unmap_and_free_tx_resource(struct igb_ring *ring, 3725void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3784,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3782 int i; 3784 int i;
3783 3785
3784 for (i = 0; i < adapter->num_tx_queues; i++) 3786 for (i = 0; i < adapter->num_tx_queues; i++)
3785 igb_clean_tx_ring(adapter->tx_ring[i]); 3787 if (adapter->tx_ring[i])
3788 igb_clean_tx_ring(adapter->tx_ring[i]);
3786} 3789}
3787 3790
3788/** 3791/**
@@ -3819,7 +3822,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3819 int i; 3822 int i;
3820 3823
3821 for (i = 0; i < adapter->num_rx_queues; i++) 3824 for (i = 0; i < adapter->num_rx_queues; i++)
3822 igb_free_rx_resources(adapter->rx_ring[i]); 3825 if (adapter->rx_ring[i])
3826 igb_free_rx_resources(adapter->rx_ring[i]);
3823} 3827}
3824 3828
3825/** 3829/**
@@ -3874,7 +3878,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3874 int i; 3878 int i;
3875 3879
3876 for (i = 0; i < adapter->num_rx_queues; i++) 3880 for (i = 0; i < adapter->num_rx_queues; i++)
3877 igb_clean_rx_ring(adapter->rx_ring[i]); 3881 if (adapter->rx_ring[i])
3882 igb_clean_rx_ring(adapter->rx_ring[i]);
3878} 3883}
3879 3884
3880/** 3885/**
@@ -7404,6 +7409,8 @@ static int igb_resume(struct device *dev)
7404 pci_restore_state(pdev); 7409 pci_restore_state(pdev);
7405 pci_save_state(pdev); 7410 pci_save_state(pdev);
7406 7411
7412 if (!pci_device_is_present(pdev))
7413 return -ENODEV;
7407 err = pci_enable_device_mem(pdev); 7414 err = pci_enable_device_mem(pdev);
7408 if (err) { 7415 if (err) {
7409 dev_err(&pdev->dev, 7416 dev_err(&pdev->dev,
@@ -7441,7 +7448,6 @@ static int igb_resume(struct device *dev)
7441 return 0; 7448 return 0;
7442} 7449}
7443 7450
7444#ifdef CONFIG_PM_RUNTIME
7445static int igb_runtime_idle(struct device *dev) 7451static int igb_runtime_idle(struct device *dev)
7446{ 7452{
7447 struct pci_dev *pdev = to_pci_dev(dev); 7453 struct pci_dev *pdev = to_pci_dev(dev);
@@ -7478,8 +7484,7 @@ static int igb_runtime_resume(struct device *dev)
7478{ 7484{
7479 return igb_resume(dev); 7485 return igb_resume(dev);
7480} 7486}
7481#endif /* CONFIG_PM_RUNTIME */ 7487#endif /* CONFIG_PM */
7482#endif
7483 7488
7484static void igb_shutdown(struct pci_dev *pdev) 7489static void igb_shutdown(struct pci_dev *pdev)
7485{ 7490{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d2df4e3d1032..cc51554c9e99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3936 * if SR-IOV and VMDQ are disabled - otherwise ensure 3936 * if SR-IOV and VMDQ are disabled - otherwise ensure
3937 * that hardware VLAN filters remain enabled. 3937 * that hardware VLAN filters remain enabled.
3938 */ 3938 */
3939 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 3939 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3940 IXGBE_FLAG_SRIOV_ENABLED))) 3940 IXGBE_FLAG_SRIOV_ENABLED))
3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); 3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3942 } else { 3942 } else {
3943 if (netdev->flags & IFF_ALLMULTI) { 3943 if (netdev->flags & IFF_ALLMULTI) {
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7669 return -EOPNOTSUPP; 7669 return -EOPNOTSUPP;
7670 7670
7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7672 if (!br_spec)
7673 return -EINVAL;
7672 7674
7673 nla_for_each_nested(attr, br_spec, rem) { 7675 nla_for_each_nested(attr, br_spec, rem) {
7674 __u16 mode; 7676 __u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7677 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7679 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7678 continue; 7680 continue;
7679 7681
7682 if (nla_len(attr) < sizeof(mode))
7683 return -EINVAL;
7684
7680 mode = nla_get_u16(attr); 7685 mode = nla_get_u16(attr);
7681 if (mode == BRIDGE_MODE_VEPA) { 7686 if (mode == BRIDGE_MODE_VEPA) {
7682 reg = 0; 7687 reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7979 int i, err, pci_using_dac, expected_gts; 7984 int i, err, pci_using_dac, expected_gts;
7980 unsigned int indices = MAX_TX_QUEUES; 7985 unsigned int indices = MAX_TX_QUEUES;
7981 u8 part_str[IXGBE_PBANUM_LENGTH]; 7986 u8 part_str[IXGBE_PBANUM_LENGTH];
7987 bool disable_dev = false;
7982#ifdef IXGBE_FCOE 7988#ifdef IXGBE_FCOE
7983 u16 device_caps; 7989 u16 device_caps;
7984#endif 7990#endif
@@ -8369,13 +8375,14 @@ err_sw_init:
8369 iounmap(adapter->io_addr); 8375 iounmap(adapter->io_addr);
8370 kfree(adapter->mac_table); 8376 kfree(adapter->mac_table);
8371err_ioremap: 8377err_ioremap:
8378 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8372 free_netdev(netdev); 8379 free_netdev(netdev);
8373err_alloc_etherdev: 8380err_alloc_etherdev:
8374 pci_release_selected_regions(pdev, 8381 pci_release_selected_regions(pdev,
8375 pci_select_bars(pdev, IORESOURCE_MEM)); 8382 pci_select_bars(pdev, IORESOURCE_MEM));
8376err_pci_reg: 8383err_pci_reg:
8377err_dma: 8384err_dma:
8378 if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8385 if (!adapter || disable_dev)
8379 pci_disable_device(pdev); 8386 pci_disable_device(pdev);
8380 return err; 8387 return err;
8381} 8388}
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8393{ 8400{
8394 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8401 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8395 struct net_device *netdev = adapter->netdev; 8402 struct net_device *netdev = adapter->netdev;
8403 bool disable_dev;
8396 8404
8397 ixgbe_dbg_adapter_exit(adapter); 8405 ixgbe_dbg_adapter_exit(adapter);
8398 8406
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
8442 e_dev_info("complete\n"); 8450 e_dev_info("complete\n");
8443 8451
8444 kfree(adapter->mac_table); 8452 kfree(adapter->mac_table);
8453 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8445 free_netdev(netdev); 8454 free_netdev(netdev);
8446 8455
8447 pci_disable_pcie_error_reporting(pdev); 8456 pci_disable_pcie_error_reporting(pdev);
8448 8457
8449 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8458 if (disable_dev)
8450 pci_disable_device(pdev); 8459 pci_disable_device(pdev);
8451} 8460}
8452 8461
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5d2498dcf536..cd5cf6d957c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1546 1546
1547 switch (op) { 1547 switch (op) {
1548 case RES_OP_RESERVE: 1548 case RES_OP_RESERVE:
1549 count = get_param_l(&in_param); 1549 count = get_param_l(&in_param) & 0xffffff;
1550 align = get_param_h(&in_param); 1550 align = get_param_h(&in_param);
1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552 if (err) 1552 if (err)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2cd051e..b5db6b3f939f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
917 return ret; 917 return ret;
918} 918}
919 919
920#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921static void sh_eth_set_receive_align(struct sk_buff *skb) 920static void sh_eth_set_receive_align(struct sk_buff *skb)
922{ 921{
923 int reserve; 922 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
924 923
925 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 if (reserve) 924 if (reserve)
927 skb_reserve(skb, reserve); 925 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
928} 926}
929#else
930static void sh_eth_set_receive_align(struct sk_buff *skb)
931{
932 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933}
934#endif
935 927
936 928
937/* CPU <-> EDMAC endian convert */ 929/* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1119 struct sh_eth_txdesc *txdesc = NULL; 1111 struct sh_eth_txdesc *txdesc = NULL;
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1114 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1122 1115
1123 mdp->cur_rx = 0; 1116 mdp->cur_rx = 0;
1124 mdp->cur_tx = 0; 1117 mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
1131 for (i = 0; i < mdp->num_rx_ring; i++) { 1124 for (i = 0; i < mdp->num_rx_ring; i++) {
1132 /* skb */ 1125 /* skb */
1133 mdp->rx_skbuff[i] = NULL; 1126 mdp->rx_skbuff[i] = NULL;
1134 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1127 skb = netdev_alloc_skb(ndev, skbuff_size);
1135 mdp->rx_skbuff[i] = skb; 1128 mdp->rx_skbuff[i] = skb;
1136 if (skb == NULL) 1129 if (skb == NULL)
1137 break; 1130 break;
1138 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1139 DMA_FROM_DEVICE);
1140 sh_eth_set_receive_align(skb); 1131 sh_eth_set_receive_align(skb);
1141 1132
1142 /* RX descriptor */ 1133 /* RX descriptor */
1143 rxdesc = &mdp->rx_ring[i]; 1134 rxdesc = &mdp->rx_ring[i];
1135 /* The size of the buffer is a multiple of 16 bytes. */
1136 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1137 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1138 DMA_FROM_DEVICE);
1144 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1145 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1146 1141
1147 /* The size of the buffer is 16 byte boundary. */
1148 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1149 /* Rx descriptor address set */ 1142 /* Rx descriptor address set */
1150 if (i == 0) { 1143 if (i == 0) {
1151 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 struct sk_buff *skb; 1390 struct sk_buff *skb;
1398 u16 pkt_len = 0; 1391 u16 pkt_len = 0;
1399 u32 desc_status; 1392 u32 desc_status;
1393 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1400 1394
1401 rxdesc = &mdp->rx_ring[entry]; 1395 rxdesc = &mdp->rx_ring[entry];
1402 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 if (mdp->cd->rpadir) 1442 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1443 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451 mdp->rx_buf_sz, 1445 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1446 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1447 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1448 skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1468 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 1463
1470 if (mdp->rx_skbuff[entry] == NULL) { 1464 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1465 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb; 1466 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1467 if (skb == NULL)
1474 break; /* Better luck next round. */ 1468 break; /* Better luck next round. */
1475 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1476 DMA_FROM_DEVICE);
1477 sh_eth_set_receive_align(skb); 1469 sh_eth_set_receive_align(skb);
1470 dma_map_single(&ndev->dev, skb->data,
1471 rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 1472
1479 skb_checksum_none_assert(skb); 1473 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
2042 if (ret) 2036 if (ret)
2043 goto out_free_irq; 2037 goto out_free_irq;
2044 2038
2039 mdp->is_opened = 1;
2040
2045 return ret; 2041 return ret;
2046 2042
2047out_free_irq: 2043out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2131 return NETDEV_TX_OK; 2127 return NETDEV_TX_OK;
2132} 2128}
2133 2129
2130static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2131{
2132 struct sh_eth_private *mdp = netdev_priv(ndev);
2133
2134 if (sh_eth_is_rz_fast_ether(mdp))
2135 return &ndev->stats;
2136
2137 if (!mdp->is_opened)
2138 return &ndev->stats;
2139
2140 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2141 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2142 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2143 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2145 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2146
2147 if (sh_eth_is_gether(mdp)) {
2148 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2149 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2150 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2151 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2152 } else {
2153 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2154 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2155 }
2156
2157 return &ndev->stats;
2158}
2159
2134/* device close function */ 2160/* device close function */
2135static int sh_eth_close(struct net_device *ndev) 2161static int sh_eth_close(struct net_device *ndev)
2136{ 2162{
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
2145 sh_eth_write(ndev, 0, EDTRR); 2171 sh_eth_write(ndev, 0, EDTRR);
2146 sh_eth_write(ndev, 0, EDRRR); 2172 sh_eth_write(ndev, 0, EDRRR);
2147 2173
2174 sh_eth_get_stats(ndev);
2148 /* PHY Disconnect */ 2175 /* PHY Disconnect */
2149 if (mdp->phydev) { 2176 if (mdp->phydev) {
2150 phy_stop(mdp->phydev); 2177 phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
2163 2190
2164 pm_runtime_put_sync(&mdp->pdev->dev); 2191 pm_runtime_put_sync(&mdp->pdev->dev);
2165 2192
2166 return 0; 2193 mdp->is_opened = 0;
2167}
2168
2169static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2170{
2171 struct sh_eth_private *mdp = netdev_priv(ndev);
2172
2173 if (sh_eth_is_rz_fast_ether(mdp))
2174 return &ndev->stats;
2175 2194
2176 pm_runtime_get_sync(&mdp->pdev->dev); 2195 return 0;
2177
2178 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2179 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2180 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2181 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2182 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2183 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2184 if (sh_eth_is_gether(mdp)) {
2185 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2186 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2187 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2188 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2189 } else {
2190 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2191 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2192 }
2193 pm_runtime_put_sync(&mdp->pdev->dev);
2194
2195 return &ndev->stats;
2196} 2196}
2197 2197
2198/* ioctl to device function */ 2198/* ioctl to device function */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index b37c427144ee..22301bf9c21d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -162,9 +162,9 @@ enum {
162 162
163/* Driver's parameters */ 163/* Driver's parameters */
164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
165#define SH4_SKB_RX_ALIGN 32 165#define SH_ETH_RX_ALIGN 32
166#else 166#else
167#define SH2_SH3_SKB_RX_ALIGN 2 167#define SH_ETH_RX_ALIGN 2
168#endif 168#endif
169 169
170/* Register's bits 170/* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
522 522
523 unsigned no_ether_link:1; 523 unsigned no_ether_link:1;
524 unsigned ether_link_active_low:1; 524 unsigned ether_link_active_low:1;
525 unsigned is_opened:1;
525}; 526};
526 527
527static inline void sh_eth_soft_swap(char *src, int len) 528static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index db56fa7ce8f9..58a1a0a423d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
177 */ 177 */
178 plat->maxmtu = JUMBO_LEN; 178 plat->maxmtu = JUMBO_LEN;
179 179
180 /* Set default value for multicast hash bins */
181 plat->multicast_filter_bins = HASH_TABLE_SIZE;
182
183 /* Set default value for unicast filter entries */
184 plat->unicast_filter_entries = 1;
185
186 /* 180 /*
187 * Currently only the properties needed on SPEAr600 181 * Currently only the properties needed on SPEAr600
188 * are provided. All other properties should be added 182 * are provided. All other properties should be added
@@ -270,16 +264,23 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
270 return PTR_ERR(addr); 264 return PTR_ERR(addr);
271 265
272 plat_dat = dev_get_platdata(&pdev->dev); 266 plat_dat = dev_get_platdata(&pdev->dev);
273 if (pdev->dev.of_node) { 267
274 if (!plat_dat) 268 if (!plat_dat)
275 plat_dat = devm_kzalloc(&pdev->dev, 269 plat_dat = devm_kzalloc(&pdev->dev,
276 sizeof(struct plat_stmmacenet_data), 270 sizeof(struct plat_stmmacenet_data),
277 GFP_KERNEL); 271 GFP_KERNEL);
278 if (!plat_dat) { 272 if (!plat_dat) {
279 pr_err("%s: ERROR: no memory", __func__); 273 pr_err("%s: ERROR: no memory", __func__);
280 return -ENOMEM; 274 return -ENOMEM;
281 } 275 }
276
277 /* Set default value for multicast hash bins */
278 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
282 279
280 /* Set default value for unicast filter entries */
281 plat_dat->unicast_filter_entries = 1;
282
283 if (pdev->dev.of_node) {
283 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 284 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
284 if (ret) { 285 if (ret) {
285 pr_err("%s: main dt probe failed", __func__); 286 pr_err("%s: main dt probe failed", __func__);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 880cc090dc44..af90ab5e5768 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -45,6 +45,18 @@ struct macvtap_queue {
45 struct list_head next; 45 struct list_head next;
46}; 46};
47 47
48#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE)
49
50static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
51{
52 return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val);
53}
54
55static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
56{
57 return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val);
58}
59
48static struct proto macvtap_proto = { 60static struct proto macvtap_proto = {
49 .name = "macvtap", 61 .name = "macvtap",
50 .owner = THIS_MODULE, 62 .owner = THIS_MODULE,
@@ -557,7 +569,8 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
557 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should 569 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
558 * be shared with the tun/tap driver. 570 * be shared with the tun/tap driver.
559 */ 571 */
560static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, 572static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
573 struct sk_buff *skb,
561 struct virtio_net_hdr *vnet_hdr) 574 struct virtio_net_hdr *vnet_hdr)
562{ 575{
563 unsigned short gso_type = 0; 576 unsigned short gso_type = 0;
@@ -588,13 +601,13 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
588 } 601 }
589 602
590 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 603 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
591 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start, 604 if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
592 vnet_hdr->csum_offset)) 605 macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
593 return -EINVAL; 606 return -EINVAL;
594 } 607 }
595 608
596 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 609 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
597 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size; 610 skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
598 skb_shinfo(skb)->gso_type = gso_type; 611 skb_shinfo(skb)->gso_type = gso_type;
599 612
600 /* Header must be checked, and gso_segs computed. */ 613 /* Header must be checked, and gso_segs computed. */
@@ -604,8 +617,9 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
604 return 0; 617 return 0;
605} 618}
606 619
607static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, 620static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
608 struct virtio_net_hdr *vnet_hdr) 621 const struct sk_buff *skb,
622 struct virtio_net_hdr *vnet_hdr)
609{ 623{
610 memset(vnet_hdr, 0, sizeof(*vnet_hdr)); 624 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
611 625
@@ -613,8 +627,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
613 struct skb_shared_info *sinfo = skb_shinfo(skb); 627 struct skb_shared_info *sinfo = skb_shinfo(skb);
614 628
615 /* This is a hint as to how much should be linear. */ 629 /* This is a hint as to how much should be linear. */
616 vnet_hdr->hdr_len = skb_headlen(skb); 630 vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
617 vnet_hdr->gso_size = sinfo->gso_size; 631 vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
618 if (sinfo->gso_type & SKB_GSO_TCPV4) 632 if (sinfo->gso_type & SKB_GSO_TCPV4)
619 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
620 else if (sinfo->gso_type & SKB_GSO_TCPV6) 634 else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -628,10 +642,13 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
628 642
629 if (skb->ip_summed == CHECKSUM_PARTIAL) { 643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 644 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
631 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
632 if (vlan_tx_tag_present(skb)) 645 if (vlan_tx_tag_present(skb))
633 vnet_hdr->csum_start += VLAN_HLEN; 646 vnet_hdr->csum_start = cpu_to_macvtap16(q,
634 vnet_hdr->csum_offset = skb->csum_offset; 647 skb_checksum_start_offset(skb) + VLAN_HLEN);
648 else
649 vnet_hdr->csum_start = cpu_to_macvtap16(q,
650 skb_checksum_start_offset(skb));
651 vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 652 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 653 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
637 } /* else everything is zero */ 654 } /* else everything is zero */
@@ -666,12 +683,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
666 if (err < 0) 683 if (err < 0)
667 goto err; 684 goto err;
668 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 685 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
669 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > 686 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
670 vnet_hdr.hdr_len) 687 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
671 vnet_hdr.hdr_len = vnet_hdr.csum_start + 688 macvtap16_to_cpu(q, vnet_hdr.hdr_len))
672 vnet_hdr.csum_offset + 2; 689 vnet_hdr.hdr_len = cpu_to_macvtap16(q,
690 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
691 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
673 err = -EINVAL; 692 err = -EINVAL;
674 if (vnet_hdr.hdr_len > len) 693 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
675 goto err; 694 goto err;
676 } 695 }
677 696
@@ -684,7 +703,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
684 goto err; 703 goto err;
685 704
686 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 705 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
687 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; 706 copylen = vnet_hdr.hdr_len ?
707 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
688 if (copylen > good_linear) 708 if (copylen > good_linear)
689 copylen = good_linear; 709 copylen = good_linear;
690 linear = copylen; 710 linear = copylen;
@@ -695,10 +715,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
695 715
696 if (!zerocopy) { 716 if (!zerocopy) {
697 copylen = len; 717 copylen = len;
698 if (vnet_hdr.hdr_len > good_linear) 718 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
699 linear = good_linear; 719 linear = good_linear;
700 else 720 else
701 linear = vnet_hdr.hdr_len; 721 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
702 } 722 }
703 723
704 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 724 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
@@ -725,7 +745,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
725 skb->protocol = eth_hdr(skb)->h_proto; 745 skb->protocol = eth_hdr(skb)->h_proto;
726 746
727 if (vnet_hdr_len) { 747 if (vnet_hdr_len) {
728 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); 748 err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr);
729 if (err) 749 if (err)
730 goto err_kfree; 750 goto err_kfree;
731 } 751 }
@@ -791,7 +811,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
791 if ((len -= vnet_hdr_len) < 0) 811 if ((len -= vnet_hdr_len) < 0)
792 return -EINVAL; 812 return -EINVAL;
793 813
794 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); 814 macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr);
795 815
796 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 816 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
797 return -EFAULT; 817 return -EFAULT;
@@ -1003,8 +1023,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1003 return -EFAULT; 1023 return -EFAULT;
1004 1024
1005 ret = 0; 1025 ret = 0;
1006 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != 1026 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
1007 (IFF_NO_PI | IFF_TAP))
1008 ret = -EINVAL; 1027 ret = -EINVAL;
1009 else 1028 else
1010 q->flags = u; 1029 q->flags = u;
@@ -1036,8 +1055,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1036 return ret; 1055 return ret;
1037 1056
1038 case TUNGETFEATURES: 1057 case TUNGETFEATURES:
1039 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | 1058 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
1040 IFF_MULTI_QUEUE, up))
1041 return -EFAULT; 1059 return -EFAULT;
1042 return 0; 1060 return 0;
1043 1061
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9dd3746994a4..798ce70e3d61 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -103,6 +103,15 @@ do { \
103} while (0) 103} while (0)
104#endif 104#endif
105 105
106/* TUN device flags */
107
108/* IFF_ATTACH_QUEUE is never stored in device flags,
109 * overload it to mean fasync when stored there.
110 */
111#define TUN_FASYNC IFF_ATTACH_QUEUE
112
113#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
114 IFF_VNET_LE | IFF_MULTI_QUEUE)
106#define GOODCOPY_LEN 128 115#define GOODCOPY_LEN 128
107 116
108#define FLT_EXACT_COUNT 8 117#define FLT_EXACT_COUNT 8
@@ -196,6 +205,16 @@ struct tun_struct {
196 u32 flow_count; 205 u32 flow_count;
197}; 206};
198 207
208static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
209{
210 return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
211}
212
213static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
214{
215 return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
216}
217
199static inline u32 tun_hashfn(u32 rxhash) 218static inline u32 tun_hashfn(u32 rxhash)
200{ 219{
201 return rxhash & 0x3ff; 220 return rxhash & 0x3ff;
@@ -472,7 +491,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
472 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 491 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
473 netif_carrier_off(tun->dev); 492 netif_carrier_off(tun->dev);
474 493
475 if (!(tun->flags & TUN_PERSIST) && 494 if (!(tun->flags & IFF_PERSIST) &&
476 tun->dev->reg_state == NETREG_REGISTERED) 495 tun->dev->reg_state == NETREG_REGISTERED)
477 unregister_netdevice(tun->dev); 496 unregister_netdevice(tun->dev);
478 } 497 }
@@ -523,7 +542,7 @@ static void tun_detach_all(struct net_device *dev)
523 } 542 }
524 BUG_ON(tun->numdisabled != 0); 543 BUG_ON(tun->numdisabled != 0);
525 544
526 if (tun->flags & TUN_PERSIST) 545 if (tun->flags & IFF_PERSIST)
527 module_put(THIS_MODULE); 546 module_put(THIS_MODULE);
528} 547}
529 548
@@ -541,7 +560,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
541 goto out; 560 goto out;
542 561
543 err = -EBUSY; 562 err = -EBUSY;
544 if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1) 563 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
545 goto out; 564 goto out;
546 565
547 err = -E2BIG; 566 err = -E2BIG;
@@ -920,7 +939,7 @@ static void tun_net_init(struct net_device *dev)
920 struct tun_struct *tun = netdev_priv(dev); 939 struct tun_struct *tun = netdev_priv(dev);
921 940
922 switch (tun->flags & TUN_TYPE_MASK) { 941 switch (tun->flags & TUN_TYPE_MASK) {
923 case TUN_TUN_DEV: 942 case IFF_TUN:
924 dev->netdev_ops = &tun_netdev_ops; 943 dev->netdev_ops = &tun_netdev_ops;
925 944
926 /* Point-to-Point TUN Device */ 945 /* Point-to-Point TUN Device */
@@ -934,7 +953,7 @@ static void tun_net_init(struct net_device *dev)
934 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ 953 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
935 break; 954 break;
936 955
937 case TUN_TAP_DEV: 956 case IFF_TAP:
938 dev->netdev_ops = &tap_netdev_ops; 957 dev->netdev_ops = &tap_netdev_ops;
939 /* Ethernet TAP Device */ 958 /* Ethernet TAP Device */
940 ether_setup(dev); 959 ether_setup(dev);
@@ -1025,7 +1044,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1025 int err; 1044 int err;
1026 u32 rxhash; 1045 u32 rxhash;
1027 1046
1028 if (!(tun->flags & TUN_NO_PI)) { 1047 if (!(tun->flags & IFF_NO_PI)) {
1029 if (len < sizeof(pi)) 1048 if (len < sizeof(pi))
1030 return -EINVAL; 1049 return -EINVAL;
1031 len -= sizeof(pi); 1050 len -= sizeof(pi);
@@ -1035,7 +1054,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1035 offset += sizeof(pi); 1054 offset += sizeof(pi);
1036 } 1055 }
1037 1056
1038 if (tun->flags & TUN_VNET_HDR) { 1057 if (tun->flags & IFF_VNET_HDR) {
1039 if (len < tun->vnet_hdr_sz) 1058 if (len < tun->vnet_hdr_sz)
1040 return -EINVAL; 1059 return -EINVAL;
1041 len -= tun->vnet_hdr_sz; 1060 len -= tun->vnet_hdr_sz;
@@ -1044,18 +1063,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1044 return -EFAULT; 1063 return -EFAULT;
1045 1064
1046 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1065 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1047 gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) 1066 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1048 gso.hdr_len = gso.csum_start + gso.csum_offset + 2; 1067 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1049 1068
1050 if (gso.hdr_len > len) 1069 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1051 return -EINVAL; 1070 return -EINVAL;
1052 offset += tun->vnet_hdr_sz; 1071 offset += tun->vnet_hdr_sz;
1053 } 1072 }
1054 1073
1055 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 1074 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1056 align += NET_IP_ALIGN; 1075 align += NET_IP_ALIGN;
1057 if (unlikely(len < ETH_HLEN || 1076 if (unlikely(len < ETH_HLEN ||
1058 (gso.hdr_len && gso.hdr_len < ETH_HLEN))) 1077 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1059 return -EINVAL; 1078 return -EINVAL;
1060 } 1079 }
1061 1080
@@ -1066,7 +1085,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1066 * enough room for skb expand head in case it is used. 1085 * enough room for skb expand head in case it is used.
1067 * The rest of the buffer is mapped from userspace. 1086 * The rest of the buffer is mapped from userspace.
1068 */ 1087 */
1069 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; 1088 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1070 if (copylen > good_linear) 1089 if (copylen > good_linear)
1071 copylen = good_linear; 1090 copylen = good_linear;
1072 linear = copylen; 1091 linear = copylen;
@@ -1076,10 +1095,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1076 1095
1077 if (!zerocopy) { 1096 if (!zerocopy) {
1078 copylen = len; 1097 copylen = len;
1079 if (gso.hdr_len > good_linear) 1098 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1080 linear = good_linear; 1099 linear = good_linear;
1081 else 1100 else
1082 linear = gso.hdr_len; 1101 linear = tun16_to_cpu(tun, gso.hdr_len);
1083 } 1102 }
1084 1103
1085 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); 1104 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
@@ -1106,8 +1125,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1106 } 1125 }
1107 1126
1108 if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1127 if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1109 if (!skb_partial_csum_set(skb, gso.csum_start, 1128 if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
1110 gso.csum_offset)) { 1129 tun16_to_cpu(tun, gso.csum_offset))) {
1111 tun->dev->stats.rx_frame_errors++; 1130 tun->dev->stats.rx_frame_errors++;
1112 kfree_skb(skb); 1131 kfree_skb(skb);
1113 return -EINVAL; 1132 return -EINVAL;
@@ -1115,8 +1134,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1115 } 1134 }
1116 1135
1117 switch (tun->flags & TUN_TYPE_MASK) { 1136 switch (tun->flags & TUN_TYPE_MASK) {
1118 case TUN_TUN_DEV: 1137 case IFF_TUN:
1119 if (tun->flags & TUN_NO_PI) { 1138 if (tun->flags & IFF_NO_PI) {
1120 switch (skb->data[0] & 0xf0) { 1139 switch (skb->data[0] & 0xf0) {
1121 case 0x40: 1140 case 0x40:
1122 pi.proto = htons(ETH_P_IP); 1141 pi.proto = htons(ETH_P_IP);
@@ -1135,7 +1154,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1135 skb->protocol = pi.proto; 1154 skb->protocol = pi.proto;
1136 skb->dev = tun->dev; 1155 skb->dev = tun->dev;
1137 break; 1156 break;
1138 case TUN_TAP_DEV: 1157 case IFF_TAP:
1139 skb->protocol = eth_type_trans(skb, tun->dev); 1158 skb->protocol = eth_type_trans(skb, tun->dev);
1140 break; 1159 break;
1141 } 1160 }
@@ -1175,7 +1194,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1175 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) 1194 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1176 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 1195 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1177 1196
1178 skb_shinfo(skb)->gso_size = gso.gso_size; 1197 skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
1179 if (skb_shinfo(skb)->gso_size == 0) { 1198 if (skb_shinfo(skb)->gso_size == 0) {
1180 tun->dev->stats.rx_frame_errors++; 1199 tun->dev->stats.rx_frame_errors++;
1181 kfree_skb(skb); 1200 kfree_skb(skb);
@@ -1241,10 +1260,10 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1241 if (vlan_tx_tag_present(skb)) 1260 if (vlan_tx_tag_present(skb))
1242 vlan_hlen = VLAN_HLEN; 1261 vlan_hlen = VLAN_HLEN;
1243 1262
1244 if (tun->flags & TUN_VNET_HDR) 1263 if (tun->flags & IFF_VNET_HDR)
1245 vnet_hdr_sz = tun->vnet_hdr_sz; 1264 vnet_hdr_sz = tun->vnet_hdr_sz;
1246 1265
1247 if (!(tun->flags & TUN_NO_PI)) { 1266 if (!(tun->flags & IFF_NO_PI)) {
1248 if ((len -= sizeof(pi)) < 0) 1267 if ((len -= sizeof(pi)) < 0)
1249 return -EINVAL; 1268 return -EINVAL;
1250 1269
@@ -1267,8 +1286,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1267 struct skb_shared_info *sinfo = skb_shinfo(skb); 1286 struct skb_shared_info *sinfo = skb_shinfo(skb);
1268 1287
1269 /* This is a hint as to how much should be linear. */ 1288 /* This is a hint as to how much should be linear. */
1270 gso.hdr_len = skb_headlen(skb); 1289 gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
1271 gso.gso_size = sinfo->gso_size; 1290 gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
1272 if (sinfo->gso_type & SKB_GSO_TCPV4) 1291 if (sinfo->gso_type & SKB_GSO_TCPV4)
1273 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1292 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1274 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1293 else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -1276,12 +1295,12 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1276 else { 1295 else {
1277 pr_err("unexpected GSO type: " 1296 pr_err("unexpected GSO type: "
1278 "0x%x, gso_size %d, hdr_len %d\n", 1297 "0x%x, gso_size %d, hdr_len %d\n",
1279 sinfo->gso_type, gso.gso_size, 1298 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
1280 gso.hdr_len); 1299 tun16_to_cpu(tun, gso.hdr_len));
1281 print_hex_dump(KERN_ERR, "tun: ", 1300 print_hex_dump(KERN_ERR, "tun: ",
1282 DUMP_PREFIX_NONE, 1301 DUMP_PREFIX_NONE,
1283 16, 1, skb->head, 1302 16, 1, skb->head,
1284 min((int)gso.hdr_len, 64), true); 1303 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
1285 WARN_ON_ONCE(1); 1304 WARN_ON_ONCE(1);
1286 return -EINVAL; 1305 return -EINVAL;
1287 } 1306 }
@@ -1292,9 +1311,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1292 1311
1293 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1312 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1294 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1313 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1295 gso.csum_start = skb_checksum_start_offset(skb) + 1314 gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
1296 vlan_hlen; 1315 vlan_hlen);
1297 gso.csum_offset = skb->csum_offset; 1316 gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
1298 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1317 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1299 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; 1318 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1300 } /* else everything is zero */ 1319 } /* else everything is zero */
@@ -1521,32 +1540,7 @@ static struct proto tun_proto = {
1521 1540
1522static int tun_flags(struct tun_struct *tun) 1541static int tun_flags(struct tun_struct *tun)
1523{ 1542{
1524 int flags = 0; 1543 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
1525
1526 if (tun->flags & TUN_TUN_DEV)
1527 flags |= IFF_TUN;
1528 else
1529 flags |= IFF_TAP;
1530
1531 if (tun->flags & TUN_NO_PI)
1532 flags |= IFF_NO_PI;
1533
1534 /* This flag has no real effect. We track the value for backwards
1535 * compatibility.
1536 */
1537 if (tun->flags & TUN_ONE_QUEUE)
1538 flags |= IFF_ONE_QUEUE;
1539
1540 if (tun->flags & TUN_VNET_HDR)
1541 flags |= IFF_VNET_HDR;
1542
1543 if (tun->flags & TUN_TAP_MQ)
1544 flags |= IFF_MULTI_QUEUE;
1545
1546 if (tun->flags & TUN_PERSIST)
1547 flags |= IFF_PERSIST;
1548
1549 return flags;
1550} 1544}
1551 1545
1552static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 1546static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
@@ -1602,7 +1596,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1602 return -EINVAL; 1596 return -EINVAL;
1603 1597
1604 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 1598 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1605 !!(tun->flags & TUN_TAP_MQ)) 1599 !!(tun->flags & IFF_MULTI_QUEUE))
1606 return -EINVAL; 1600 return -EINVAL;
1607 1601
1608 if (tun_not_capable(tun)) 1602 if (tun_not_capable(tun))
@@ -1615,7 +1609,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1615 if (err < 0) 1609 if (err < 0)
1616 return err; 1610 return err;
1617 1611
1618 if (tun->flags & TUN_TAP_MQ && 1612 if (tun->flags & IFF_MULTI_QUEUE &&
1619 (tun->numqueues + tun->numdisabled > 1)) { 1613 (tun->numqueues + tun->numdisabled > 1)) {
1620 /* One or more queue has already been attached, no need 1614 /* One or more queue has already been attached, no need
1621 * to initialize the device again. 1615 * to initialize the device again.
@@ -1638,11 +1632,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1638 /* Set dev type */ 1632 /* Set dev type */
1639 if (ifr->ifr_flags & IFF_TUN) { 1633 if (ifr->ifr_flags & IFF_TUN) {
1640 /* TUN device */ 1634 /* TUN device */
1641 flags |= TUN_TUN_DEV; 1635 flags |= IFF_TUN;
1642 name = "tun%d"; 1636 name = "tun%d";
1643 } else if (ifr->ifr_flags & IFF_TAP) { 1637 } else if (ifr->ifr_flags & IFF_TAP) {
1644 /* TAP device */ 1638 /* TAP device */
1645 flags |= TUN_TAP_DEV; 1639 flags |= IFF_TAP;
1646 name = "tap%d"; 1640 name = "tap%d";
1647 } else 1641 } else
1648 return -EINVAL; 1642 return -EINVAL;
@@ -1706,28 +1700,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1706 1700
1707 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1701 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1708 1702
1709 if (ifr->ifr_flags & IFF_NO_PI) 1703 tun->flags = (tun->flags & ~TUN_FEATURES) |
1710 tun->flags |= TUN_NO_PI; 1704 (ifr->ifr_flags & TUN_FEATURES);
1711 else
1712 tun->flags &= ~TUN_NO_PI;
1713
1714 /* This flag has no real effect. We track the value for backwards
1715 * compatibility.
1716 */
1717 if (ifr->ifr_flags & IFF_ONE_QUEUE)
1718 tun->flags |= TUN_ONE_QUEUE;
1719 else
1720 tun->flags &= ~TUN_ONE_QUEUE;
1721
1722 if (ifr->ifr_flags & IFF_VNET_HDR)
1723 tun->flags |= TUN_VNET_HDR;
1724 else
1725 tun->flags &= ~TUN_VNET_HDR;
1726
1727 if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1728 tun->flags |= TUN_TAP_MQ;
1729 else
1730 tun->flags &= ~TUN_TAP_MQ;
1731 1705
1732 /* Make sure persistent devices do not get stuck in 1706 /* Make sure persistent devices do not get stuck in
1733 * xoff state. 1707 * xoff state.
@@ -1855,7 +1829,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1855 ret = tun_attach(tun, file, false); 1829 ret = tun_attach(tun, file, false);
1856 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1830 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1857 tun = rtnl_dereference(tfile->tun); 1831 tun = rtnl_dereference(tfile->tun);
1858 if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) 1832 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1859 ret = -EINVAL; 1833 ret = -EINVAL;
1860 else 1834 else
1861 __tun_detach(tfile, false); 1835 __tun_detach(tfile, false);
@@ -1890,9 +1864,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1890 if (cmd == TUNGETFEATURES) { 1864 if (cmd == TUNGETFEATURES) {
1891 /* Currently this just means: "what IFF flags are valid?". 1865 /* Currently this just means: "what IFF flags are valid?".
1892 * This is needed because we never checked for invalid flags on 1866 * This is needed because we never checked for invalid flags on
1893 * TUNSETIFF. */ 1867 * TUNSETIFF.
1894 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | 1868 */
1895 IFF_VNET_HDR | IFF_MULTI_QUEUE, 1869 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
1896 (unsigned int __user*)argp); 1870 (unsigned int __user*)argp);
1897 } else if (cmd == TUNSETQUEUE) 1871 } else if (cmd == TUNSETQUEUE)
1898 return tun_set_queue(file, &ifr); 1872 return tun_set_queue(file, &ifr);
@@ -1959,12 +1933,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1959 /* Disable/Enable persist mode. Keep an extra reference to the 1933 /* Disable/Enable persist mode. Keep an extra reference to the
1960 * module to prevent the module being unprobed. 1934 * module to prevent the module being unprobed.
1961 */ 1935 */
1962 if (arg && !(tun->flags & TUN_PERSIST)) { 1936 if (arg && !(tun->flags & IFF_PERSIST)) {
1963 tun->flags |= TUN_PERSIST; 1937 tun->flags |= IFF_PERSIST;
1964 __module_get(THIS_MODULE); 1938 __module_get(THIS_MODULE);
1965 } 1939 }
1966 if (!arg && (tun->flags & TUN_PERSIST)) { 1940 if (!arg && (tun->flags & IFF_PERSIST)) {
1967 tun->flags &= ~TUN_PERSIST; 1941 tun->flags &= ~IFF_PERSIST;
1968 module_put(THIS_MODULE); 1942 module_put(THIS_MODULE);
1969 } 1943 }
1970 1944
@@ -2022,7 +1996,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2022 case TUNSETTXFILTER: 1996 case TUNSETTXFILTER:
2023 /* Can be set only for TAPs */ 1997 /* Can be set only for TAPs */
2024 ret = -EINVAL; 1998 ret = -EINVAL;
2025 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1999 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2026 break; 2000 break;
2027 ret = update_filter(&tun->txflt, (void __user *)arg); 2001 ret = update_filter(&tun->txflt, (void __user *)arg);
2028 break; 2002 break;
@@ -2081,7 +2055,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2081 case TUNATTACHFILTER: 2055 case TUNATTACHFILTER:
2082 /* Can be set only for TAPs */ 2056 /* Can be set only for TAPs */
2083 ret = -EINVAL; 2057 ret = -EINVAL;
2084 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 2058 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2085 break; 2059 break;
2086 ret = -EFAULT; 2060 ret = -EFAULT;
2087 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 2061 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
@@ -2093,7 +2067,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2093 case TUNDETACHFILTER: 2067 case TUNDETACHFILTER:
2094 /* Can be set only for TAPs */ 2068 /* Can be set only for TAPs */
2095 ret = -EINVAL; 2069 ret = -EINVAL;
2096 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 2070 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2097 break; 2071 break;
2098 ret = 0; 2072 ret = 0;
2099 tun_detach_filter(tun, tun->numqueues); 2073 tun_detach_filter(tun, tun->numqueues);
@@ -2101,7 +2075,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2101 2075
2102 case TUNGETFILTER: 2076 case TUNGETFILTER:
2103 ret = -EINVAL; 2077 ret = -EINVAL;
2104 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 2078 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2105 break; 2079 break;
2106 ret = -EFAULT; 2080 ret = -EFAULT;
2107 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 2081 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
@@ -2222,7 +2196,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
2222} 2196}
2223 2197
2224#ifdef CONFIG_PROC_FS 2198#ifdef CONFIG_PROC_FS
2225static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f) 2199static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2226{ 2200{
2227 struct tun_struct *tun; 2201 struct tun_struct *tun;
2228 struct ifreq ifr; 2202 struct ifreq ifr;
@@ -2238,7 +2212,7 @@ static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2238 if (tun) 2212 if (tun)
2239 tun_put(tun); 2213 tun_put(tun);
2240 2214
2241 return seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 2215 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2242} 2216}
2243#endif 2217#endif
2244 2218
@@ -2294,10 +2268,10 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2294 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2268 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2295 2269
2296 switch (tun->flags & TUN_TYPE_MASK) { 2270 switch (tun->flags & TUN_TYPE_MASK) {
2297 case TUN_TUN_DEV: 2271 case IFF_TUN:
2298 strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 2272 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2299 break; 2273 break;
2300 case TUN_TAP_DEV: 2274 case IFF_TAP:
2301 strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 2275 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2302 break; 2276 break;
2303 } 2277 }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0bc8ead47de..b8bd7191572d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -123,6 +123,9 @@ struct virtnet_info {
123 /* Host can handle any s/g split between our header and packet data */ 123 /* Host can handle any s/g split between our header and packet data */
124 bool any_header_sg; 124 bool any_header_sg;
125 125
126 /* Packet virtio header size */
127 u8 hdr_len;
128
126 /* Active statistics */ 129 /* Active statistics */
127 struct virtnet_stats __percpu *stats; 130 struct virtnet_stats __percpu *stats;
128 131
@@ -139,21 +142,14 @@ struct virtnet_info {
139 struct notifier_block nb; 142 struct notifier_block nb;
140}; 143};
141 144
142struct skb_vnet_hdr {
143 union {
144 struct virtio_net_hdr hdr;
145 struct virtio_net_hdr_mrg_rxbuf mhdr;
146 };
147};
148
149struct padded_vnet_hdr { 145struct padded_vnet_hdr {
150 struct virtio_net_hdr hdr; 146 struct virtio_net_hdr_mrg_rxbuf hdr;
151 /* 147 /*
152 * virtio_net_hdr should be in a separated sg buffer because of a 148 * hdr is in a separate sg buffer, and data sg buffer shares same page
153 * QEMU bug, and data sg buffer shares same page with this header sg. 149 * with this header sg. This padding makes next sg 16 byte aligned
154 * This padding makes next sg 16 byte aligned after virtio_net_hdr. 150 * after the header.
155 */ 151 */
156 char padding[6]; 152 char padding[4];
157}; 153};
158 154
159/* Converting between virtqueue no. and kernel tx/rx queue no. 155/* Converting between virtqueue no. and kernel tx/rx queue no.
@@ -179,9 +175,9 @@ static int rxq2vq(int rxq)
179 return rxq * 2; 175 return rxq * 2;
180} 176}
181 177
182static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 178static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
183{ 179{
184 return (struct skb_vnet_hdr *)skb->cb; 180 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
185} 181}
186 182
187/* 183/*
@@ -241,13 +237,13 @@ static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
241} 237}
242 238
243/* Called from bottom half context */ 239/* Called from bottom half context */
244static struct sk_buff *page_to_skb(struct receive_queue *rq, 240static struct sk_buff *page_to_skb(struct virtnet_info *vi,
241 struct receive_queue *rq,
245 struct page *page, unsigned int offset, 242 struct page *page, unsigned int offset,
246 unsigned int len, unsigned int truesize) 243 unsigned int len, unsigned int truesize)
247{ 244{
248 struct virtnet_info *vi = rq->vq->vdev->priv;
249 struct sk_buff *skb; 245 struct sk_buff *skb;
250 struct skb_vnet_hdr *hdr; 246 struct virtio_net_hdr_mrg_rxbuf *hdr;
251 unsigned int copy, hdr_len, hdr_padded_len; 247 unsigned int copy, hdr_len, hdr_padded_len;
252 char *p; 248 char *p;
253 249
@@ -260,13 +256,11 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
260 256
261 hdr = skb_vnet_hdr(skb); 257 hdr = skb_vnet_hdr(skb);
262 258
263 if (vi->mergeable_rx_bufs) { 259 hdr_len = vi->hdr_len;
264 hdr_len = sizeof hdr->mhdr; 260 if (vi->mergeable_rx_bufs)
265 hdr_padded_len = sizeof hdr->mhdr; 261 hdr_padded_len = sizeof *hdr;
266 } else { 262 else
267 hdr_len = sizeof hdr->hdr;
268 hdr_padded_len = sizeof(struct padded_vnet_hdr); 263 hdr_padded_len = sizeof(struct padded_vnet_hdr);
269 }
270 264
271 memcpy(hdr, p, hdr_len); 265 memcpy(hdr, p, hdr_len);
272 266
@@ -317,23 +311,24 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
317 return skb; 311 return skb;
318} 312}
319 313
320static struct sk_buff *receive_small(void *buf, unsigned int len) 314static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
321{ 315{
322 struct sk_buff * skb = buf; 316 struct sk_buff * skb = buf;
323 317
324 len -= sizeof(struct virtio_net_hdr); 318 len -= vi->hdr_len;
325 skb_trim(skb, len); 319 skb_trim(skb, len);
326 320
327 return skb; 321 return skb;
328} 322}
329 323
330static struct sk_buff *receive_big(struct net_device *dev, 324static struct sk_buff *receive_big(struct net_device *dev,
325 struct virtnet_info *vi,
331 struct receive_queue *rq, 326 struct receive_queue *rq,
332 void *buf, 327 void *buf,
333 unsigned int len) 328 unsigned int len)
334{ 329{
335 struct page *page = buf; 330 struct page *page = buf;
336 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); 331 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
337 332
338 if (unlikely(!skb)) 333 if (unlikely(!skb))
339 goto err; 334 goto err;
@@ -347,18 +342,20 @@ err:
347} 342}
348 343
349static struct sk_buff *receive_mergeable(struct net_device *dev, 344static struct sk_buff *receive_mergeable(struct net_device *dev,
345 struct virtnet_info *vi,
350 struct receive_queue *rq, 346 struct receive_queue *rq,
351 unsigned long ctx, 347 unsigned long ctx,
352 unsigned int len) 348 unsigned int len)
353{ 349{
354 void *buf = mergeable_ctx_to_buf_address(ctx); 350 void *buf = mergeable_ctx_to_buf_address(ctx);
355 struct skb_vnet_hdr *hdr = buf; 351 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
356 int num_buf = hdr->mhdr.num_buffers; 352 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
357 struct page *page = virt_to_head_page(buf); 353 struct page *page = virt_to_head_page(buf);
358 int offset = buf - page_address(page); 354 int offset = buf - page_address(page);
359 unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); 355 unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
360 356
361 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); 357 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len,
358 truesize);
362 struct sk_buff *curr_skb = head_skb; 359 struct sk_buff *curr_skb = head_skb;
363 360
364 if (unlikely(!curr_skb)) 361 if (unlikely(!curr_skb))
@@ -369,7 +366,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
369 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); 366 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
370 if (unlikely(!ctx)) { 367 if (unlikely(!ctx)) {
371 pr_debug("%s: rx error: %d buffers out of %d missing\n", 368 pr_debug("%s: rx error: %d buffers out of %d missing\n",
372 dev->name, num_buf, hdr->mhdr.num_buffers); 369 dev->name, num_buf,
370 virtio16_to_cpu(vi->vdev,
371 hdr->num_buffers));
373 dev->stats.rx_length_errors++; 372 dev->stats.rx_length_errors++;
374 goto err_buf; 373 goto err_buf;
375 } 374 }
@@ -430,15 +429,15 @@ err_buf:
430 return NULL; 429 return NULL;
431} 430}
432 431
433static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 432static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
433 void *buf, unsigned int len)
434{ 434{
435 struct virtnet_info *vi = rq->vq->vdev->priv;
436 struct net_device *dev = vi->dev; 435 struct net_device *dev = vi->dev;
437 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 436 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
438 struct sk_buff *skb; 437 struct sk_buff *skb;
439 struct skb_vnet_hdr *hdr; 438 struct virtio_net_hdr_mrg_rxbuf *hdr;
440 439
441 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 440 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
442 pr_debug("%s: short packet %i\n", dev->name, len); 441 pr_debug("%s: short packet %i\n", dev->name, len);
443 dev->stats.rx_length_errors++; 442 dev->stats.rx_length_errors++;
444 if (vi->mergeable_rx_bufs) { 443 if (vi->mergeable_rx_bufs) {
@@ -454,11 +453,11 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
454 } 453 }
455 454
456 if (vi->mergeable_rx_bufs) 455 if (vi->mergeable_rx_bufs)
457 skb = receive_mergeable(dev, rq, (unsigned long)buf, len); 456 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
458 else if (vi->big_packets) 457 else if (vi->big_packets)
459 skb = receive_big(dev, rq, buf, len); 458 skb = receive_big(dev, vi, rq, buf, len);
460 else 459 else
461 skb = receive_small(buf, len); 460 skb = receive_small(vi, buf, len);
462 461
463 if (unlikely(!skb)) 462 if (unlikely(!skb))
464 return; 463 return;
@@ -473,8 +472,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
473 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 472 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
474 pr_debug("Needs csum!\n"); 473 pr_debug("Needs csum!\n");
475 if (!skb_partial_csum_set(skb, 474 if (!skb_partial_csum_set(skb,
476 hdr->hdr.csum_start, 475 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
477 hdr->hdr.csum_offset)) 476 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
478 goto frame_err; 477 goto frame_err;
479 } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { 478 } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
480 skb->ip_summed = CHECKSUM_UNNECESSARY; 479 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -514,7 +513,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
514 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) 513 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
515 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 514 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
516 515
517 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; 516 skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
517 hdr->hdr.gso_size);
518 if (skb_shinfo(skb)->gso_size == 0) { 518 if (skb_shinfo(skb)->gso_size == 0) {
519 net_warn_ratelimited("%s: zero gso size.\n", dev->name); 519 net_warn_ratelimited("%s: zero gso size.\n", dev->name);
520 goto frame_err; 520 goto frame_err;
@@ -535,11 +535,11 @@ frame_err:
535 dev_kfree_skb(skb); 535 dev_kfree_skb(skb);
536} 536}
537 537
538static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) 538static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
539 gfp_t gfp)
539{ 540{
540 struct virtnet_info *vi = rq->vq->vdev->priv;
541 struct sk_buff *skb; 541 struct sk_buff *skb;
542 struct skb_vnet_hdr *hdr; 542 struct virtio_net_hdr_mrg_rxbuf *hdr;
543 int err; 543 int err;
544 544
545 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); 545 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
@@ -550,7 +550,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
550 550
551 hdr = skb_vnet_hdr(skb); 551 hdr = skb_vnet_hdr(skb);
552 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); 552 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
553 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); 553 sg_set_buf(rq->sg, hdr, vi->hdr_len);
554 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 554 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
555 555
556 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); 556 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
@@ -560,7 +560,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
560 return err; 560 return err;
561} 561}
562 562
563static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) 563static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
564 gfp_t gfp)
564{ 565{
565 struct page *first, *list = NULL; 566 struct page *first, *list = NULL;
566 char *p; 567 char *p;
@@ -591,8 +592,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
591 p = page_address(first); 592 p = page_address(first);
592 593
593 /* rq->sg[0], rq->sg[1] share the same page */ 594 /* rq->sg[0], rq->sg[1] share the same page */
594 /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ 595 /* a separated rq->sg[0] for header - required in case !any_header_sg */
595 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); 596 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
596 597
597 /* rq->sg[1] for data packet, from offset */ 598 /* rq->sg[1] for data packet, from offset */
598 offset = sizeof(struct padded_vnet_hdr); 599 offset = sizeof(struct padded_vnet_hdr);
@@ -660,9 +661,9 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
660 * before we're receiving packets, or from refill_work which is 661 * before we're receiving packets, or from refill_work which is
661 * careful to disable receiving (using napi_disable). 662 * careful to disable receiving (using napi_disable).
662 */ 663 */
663static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) 664static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
665 gfp_t gfp)
664{ 666{
665 struct virtnet_info *vi = rq->vq->vdev->priv;
666 int err; 667 int err;
667 bool oom; 668 bool oom;
668 669
@@ -671,9 +672,9 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
671 if (vi->mergeable_rx_bufs) 672 if (vi->mergeable_rx_bufs)
672 err = add_recvbuf_mergeable(rq, gfp); 673 err = add_recvbuf_mergeable(rq, gfp);
673 else if (vi->big_packets) 674 else if (vi->big_packets)
674 err = add_recvbuf_big(rq, gfp); 675 err = add_recvbuf_big(vi, rq, gfp);
675 else 676 else
676 err = add_recvbuf_small(rq, gfp); 677 err = add_recvbuf_small(vi, rq, gfp);
677 678
678 oom = err == -ENOMEM; 679 oom = err == -ENOMEM;
679 if (err) 680 if (err)
@@ -722,7 +723,7 @@ static void refill_work(struct work_struct *work)
722 struct receive_queue *rq = &vi->rq[i]; 723 struct receive_queue *rq = &vi->rq[i];
723 724
724 napi_disable(&rq->napi); 725 napi_disable(&rq->napi);
725 still_empty = !try_fill_recv(rq, GFP_KERNEL); 726 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
726 virtnet_napi_enable(rq); 727 virtnet_napi_enable(rq);
727 728
728 /* In theory, this can happen: if we don't get any buffers in 729 /* In theory, this can happen: if we don't get any buffers in
@@ -741,12 +742,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
741 742
742 while (received < budget && 743 while (received < budget &&
743 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 744 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
744 receive_buf(rq, buf, len); 745 receive_buf(vi, rq, buf, len);
745 received++; 746 received++;
746 } 747 }
747 748
748 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 749 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
749 if (!try_fill_recv(rq, GFP_ATOMIC)) 750 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
750 schedule_delayed_work(&vi->refill, 0); 751 schedule_delayed_work(&vi->refill, 0);
751 } 752 }
752 753
@@ -822,7 +823,7 @@ static int virtnet_open(struct net_device *dev)
822 for (i = 0; i < vi->max_queue_pairs; i++) { 823 for (i = 0; i < vi->max_queue_pairs; i++) {
823 if (i < vi->curr_queue_pairs) 824 if (i < vi->curr_queue_pairs)
824 /* Make sure we have some buffers: if oom use wq. */ 825 /* Make sure we have some buffers: if oom use wq. */
825 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 826 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
826 schedule_delayed_work(&vi->refill, 0); 827 schedule_delayed_work(&vi->refill, 0);
827 virtnet_napi_enable(&vi->rq[i]); 828 virtnet_napi_enable(&vi->rq[i]);
828 } 829 }
@@ -851,18 +852,14 @@ static void free_old_xmit_skbs(struct send_queue *sq)
851 852
852static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 853static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
853{ 854{
854 struct skb_vnet_hdr *hdr; 855 struct virtio_net_hdr_mrg_rxbuf *hdr;
855 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 856 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
856 struct virtnet_info *vi = sq->vq->vdev->priv; 857 struct virtnet_info *vi = sq->vq->vdev->priv;
857 unsigned num_sg; 858 unsigned num_sg;
858 unsigned hdr_len; 859 unsigned hdr_len = vi->hdr_len;
859 bool can_push; 860 bool can_push;
860 861
861 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 862 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
862 if (vi->mergeable_rx_bufs)
863 hdr_len = sizeof hdr->mhdr;
864 else
865 hdr_len = sizeof hdr->hdr;
866 863
867 can_push = vi->any_header_sg && 864 can_push = vi->any_header_sg &&
868 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 865 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
@@ -870,22 +867,25 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
870 /* Even if we can, don't push here yet as this would skew 867 /* Even if we can, don't push here yet as this would skew
871 * csum_start offset below. */ 868 * csum_start offset below. */
872 if (can_push) 869 if (can_push)
873 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); 870 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
874 else 871 else
875 hdr = skb_vnet_hdr(skb); 872 hdr = skb_vnet_hdr(skb);
876 873
877 if (skb->ip_summed == CHECKSUM_PARTIAL) { 874 if (skb->ip_summed == CHECKSUM_PARTIAL) {
878 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 875 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
879 hdr->hdr.csum_start = skb_checksum_start_offset(skb); 876 hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
880 hdr->hdr.csum_offset = skb->csum_offset; 877 skb_checksum_start_offset(skb));
878 hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
879 skb->csum_offset);
881 } else { 880 } else {
882 hdr->hdr.flags = 0; 881 hdr->hdr.flags = 0;
883 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; 882 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
884 } 883 }
885 884
886 if (skb_is_gso(skb)) { 885 if (skb_is_gso(skb)) {
887 hdr->hdr.hdr_len = skb_headlen(skb); 886 hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
888 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; 887 hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
888 skb_shinfo(skb)->gso_size);
889 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 889 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
891 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 891 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -900,7 +900,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
900 } 900 }
901 901
902 if (vi->mergeable_rx_bufs) 902 if (vi->mergeable_rx_bufs)
903 hdr->mhdr.num_buffers = 0; 903 hdr->num_buffers = 0;
904 904
905 sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); 905 sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
906 if (can_push) { 906 if (can_push) {
@@ -1030,7 +1030,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
1030 "Failed to set mac address by vq command.\n"); 1030 "Failed to set mac address by vq command.\n");
1031 return -EINVAL; 1031 return -EINVAL;
1032 } 1032 }
1033 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 1033 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1034 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1034 unsigned int i; 1035 unsigned int i;
1035 1036
1036 /* Naturally, this has an atomicity problem. */ 1037 /* Naturally, this has an atomicity problem. */
@@ -1112,7 +1113,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1112 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1113 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1113 return 0; 1114 return 0;
1114 1115
1115 s.virtqueue_pairs = queue_pairs; 1116 s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1116 sg_init_one(&sg, &s, sizeof(s)); 1117 sg_init_one(&sg, &s, sizeof(s));
1117 1118
1118 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1119 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
@@ -1189,7 +1190,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1189 sg_init_table(sg, 2); 1190 sg_init_table(sg, 2);
1190 1191
1191 /* Store the unicast list and count in the front of the buffer */ 1192 /* Store the unicast list and count in the front of the buffer */
1192 mac_data->entries = uc_count; 1193 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1193 i = 0; 1194 i = 0;
1194 netdev_for_each_uc_addr(ha, dev) 1195 netdev_for_each_uc_addr(ha, dev)
1195 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1196 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
@@ -1200,7 +1201,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1200 /* multicast list and count fill the end */ 1201 /* multicast list and count fill the end */
1201 mac_data = (void *)&mac_data->macs[uc_count][0]; 1202 mac_data = (void *)&mac_data->macs[uc_count][0];
1202 1203
1203 mac_data->entries = mc_count; 1204 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1204 i = 0; 1205 i = 0;
1205 netdev_for_each_mc_addr(ha, dev) 1206 netdev_for_each_mc_addr(ha, dev)
1206 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 1207 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
@@ -1805,18 +1806,20 @@ static int virtnet_probe(struct virtio_device *vdev)
1805 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1806 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1806 vi->mergeable_rx_bufs = true; 1807 vi->mergeable_rx_bufs = true;
1807 1808
1809 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
1810 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1811 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1812 else
1813 vi->hdr_len = sizeof(struct virtio_net_hdr);
1814
1808 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1815 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1809 vi->any_header_sg = true; 1816 vi->any_header_sg = true;
1810 1817
1811 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1818 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1812 vi->has_cvq = true; 1819 vi->has_cvq = true;
1813 1820
1814 if (vi->any_header_sg) { 1821 if (vi->any_header_sg)
1815 if (vi->mergeable_rx_bufs) 1822 dev->needed_headroom = vi->hdr_len;
1816 dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1817 else
1818 dev->needed_headroom = sizeof(struct virtio_net_hdr);
1819 }
1820 1823
1821 /* Use single tx/rx queue pair as default */ 1824 /* Use single tx/rx queue pair as default */
1822 vi->curr_queue_pairs = 1; 1825 vi->curr_queue_pairs = 1;
@@ -1844,7 +1847,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1844 1847
1845 /* Last of all, set up some receive buffers. */ 1848 /* Last of all, set up some receive buffers. */
1846 for (i = 0; i < vi->curr_queue_pairs; i++) { 1849 for (i = 0; i < vi->curr_queue_pairs; i++) {
1847 try_fill_recv(&vi->rq[i], GFP_KERNEL); 1850 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
1848 1851
1849 /* If we didn't even get one input buffer, we're useless. */ 1852 /* If we didn't even get one input buffer, we're useless. */
1850 if (vi->rq[i].vq->num_free == 1853 if (vi->rq[i].vq->num_free ==
@@ -1964,7 +1967,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1964 1967
1965 if (netif_running(vi->dev)) { 1968 if (netif_running(vi->dev)) {
1966 for (i = 0; i < vi->curr_queue_pairs; i++) 1969 for (i = 0; i < vi->curr_queue_pairs; i++)
1967 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 1970 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1968 schedule_delayed_work(&vi->refill, 0); 1971 schedule_delayed_work(&vi->refill, 0);
1969 1972
1970 for (i = 0; i < vi->max_queue_pairs; i++) 1973 for (i = 0; i < vi->max_queue_pairs; i++)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e1e335c339e3..be4649a49c5e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2306,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2306 if (ipv6) { 2306 if (ipv6) {
2307 udp_conf.family = AF_INET6; 2307 udp_conf.family = AF_INET6;
2308 udp_conf.use_udp6_tx_checksums = 2308 udp_conf.use_udp6_tx_checksums =
2309 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2309 !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2310 udp_conf.use_udp6_rx_checksums = 2310 udp_conf.use_udp6_rx_checksums =
2311 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2311 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2312 } else { 2312 } else {
2313 udp_conf.family = AF_INET; 2313 udp_conf.family = AF_INET;
2314 udp_conf.local_ip.s_addr = INADDR_ANY; 2314 udp_conf.local_ip.s_addr = INADDR_ANY;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 4f6e66892acc..b894a84e8393 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests 155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), 156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
157 * which also implies support for the scheduler configuration command 157 * which also implies support for the scheduler configuration command
158 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
158 */ 159 */
159enum iwl_ucode_tlv_capa { 160enum iwl_ucode_tlv_capa {
160 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), 161 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
163 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), 164 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
164 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), 165 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
165 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), 166 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
167 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
166}; 168};
167 169
168/* The default calibrate table size if not specified by firmware file */ 170/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b62405865b25..b6d2683da3a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2448,9 +2448,15 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
2448 2448
2449 switch (vif->type) { 2449 switch (vif->type) {
2450 case NL80211_IFTYPE_STATION: 2450 case NL80211_IFTYPE_STATION:
2451 /* Use aux roc framework (HS20) */ 2451 if (mvm->fw->ucode_capa.capa[0] &
2452 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 2452 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
2453 vif, duration); 2453 /* Use aux roc framework (HS20) */
2454 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
2455 vif, duration);
2456 goto out_unlock;
2457 }
2458 IWL_ERR(mvm, "hotspot not supported\n");
2459 ret = -EINVAL;
2454 goto out_unlock; 2460 goto out_unlock;
2455 case NL80211_IFTYPE_P2P_DEVICE: 2461 case NL80211_IFTYPE_P2P_DEVICE:
2456 /* handle below */ 2462 /* handle below */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 61f5d36eca6a..846a2e6e34d8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -2249,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
2249 /*like read eeprom and so on */ 2249 /*like read eeprom and so on */
2250 rtlpriv->cfg->ops->read_eeprom_info(hw); 2250 rtlpriv->cfg->ops->read_eeprom_info(hw);
2251 2251
2252 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2253 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2254 err = -ENODEV;
2255 goto fail3;
2256 }
2257 rtlpriv->cfg->ops->init_sw_leds(hw);
2258
2259 /*aspm */
2260 rtl_pci_init_aspm(hw);
2261
2252 /* Init mac80211 sw */ 2262 /* Init mac80211 sw */
2253 err = rtl_init_core(hw); 2263 err = rtl_init_core(hw);
2254 if (err) { 2264 if (err) {
@@ -2264,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
2264 goto fail3; 2274 goto fail3;
2265 } 2275 }
2266 2276
2267 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2268 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2269 err = -ENODEV;
2270 goto fail3;
2271 }
2272 rtlpriv->cfg->ops->init_sw_leds(hw);
2273
2274 /*aspm */
2275 rtl_pci_init_aspm(hw);
2276
2277 err = ieee80211_register_hw(hw); 2277 err = ieee80211_register_hw(hw);
2278 if (err) { 2278 if (err) {
2279 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2279 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 310d3163dc5b..8ec8200002c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
3672 mac->opmode == NL80211_IFTYPE_ADHOC) 3672 mac->opmode == NL80211_IFTYPE_ADHOC)
3673 macid = sta->aid + 1; 3673 macid = sta->aid + 1;
3674 if (wirelessmode == WIRELESS_MODE_N_5G || 3674 if (wirelessmode == WIRELESS_MODE_N_5G ||
3675 wirelessmode == WIRELESS_MODE_AC_5G) 3675 wirelessmode == WIRELESS_MODE_AC_5G ||
3676 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; 3676 wirelessmode == WIRELESS_MODE_A)
3677 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
3677 else 3678 else
3678 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; 3679 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
3679 3680
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 4e56a27f9689..fab0d4b42f58 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,7 +39,7 @@ struct backend_info {
39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
40static void connect(struct backend_info *be); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be); 41static int read_xenbus_vif_flags(struct backend_info *be);
42static void backend_create_xenvif(struct backend_info *be); 42static int backend_create_xenvif(struct backend_info *be);
43static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
44static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
45 enum xenbus_state state); 45 enum xenbus_state state);
@@ -352,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
352 be->state = XenbusStateInitWait; 352 be->state = XenbusStateInitWait;
353 353
354 /* This kicks hotplug scripts, so do it immediately. */ 354 /* This kicks hotplug scripts, so do it immediately. */
355 backend_create_xenvif(be); 355 err = backend_create_xenvif(be);
356 if (err)
357 goto fail;
356 358
357 return 0; 359 return 0;
358 360
@@ -397,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
397} 399}
398 400
399 401
400static void backend_create_xenvif(struct backend_info *be) 402static int backend_create_xenvif(struct backend_info *be)
401{ 403{
402 int err; 404 int err;
403 long handle; 405 long handle;
404 struct xenbus_device *dev = be->dev; 406 struct xenbus_device *dev = be->dev;
405 407
406 if (be->vif != NULL) 408 if (be->vif != NULL)
407 return; 409 return 0;
408 410
409 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 411 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
410 if (err != 1) { 412 if (err != 1) {
411 xenbus_dev_fatal(dev, err, "reading handle"); 413 xenbus_dev_fatal(dev, err, "reading handle");
412 return; 414 return (err < 0) ? err : -EINVAL;
413 } 415 }
414 416
415 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 417 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -417,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
417 err = PTR_ERR(be->vif); 419 err = PTR_ERR(be->vif);
418 be->vif = NULL; 420 be->vif = NULL;
419 xenbus_dev_fatal(dev, err, "creating interface"); 421 xenbus_dev_fatal(dev, err, "creating interface");
420 return; 422 return err;
421 } 423 }
422 424
423 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 425 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
426 return 0;
424} 427}
425 428
426static void backend_disconnect(struct backend_info *be) 429static void backend_disconnect(struct backend_info *be)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cca871346a0f..ece8d1804d13 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
496 len = skb_frag_size(frag); 496 len = skb_frag_size(frag);
497 offset = frag->page_offset; 497 offset = frag->page_offset;
498 498
499 /* Data must not cross a page boundary. */
500 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
501
502 /* Skip unused frames from start of page */ 499 /* Skip unused frames from start of page */
503 page += offset >> PAGE_SHIFT; 500 page += offset >> PAGE_SHIFT;
504 offset &= ~PAGE_MASK; 501 offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
506 while (len > 0) { 503 while (len > 0) {
507 unsigned long bytes; 504 unsigned long bytes;
508 505
509 BUG_ON(offset >= PAGE_SIZE);
510
511 bytes = PAGE_SIZE - offset; 506 bytes = PAGE_SIZE - offset;
512 if (bytes > len) 507 if (bytes > len)
513 bytes = len; 508 bytes = len;
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 5066a7ef7b6c..3319cf19deeb 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -920,14 +920,10 @@ void __init nubus_probe_slot(int slot)
920 rp = nubus_rom_addr(slot); 920 rp = nubus_rom_addr(slot);
921 for(i = 4; i; i--) 921 for(i = 4; i; i--)
922 { 922 {
923 unsigned long flags;
924 int card_present; 923 int card_present;
925 924
926 rp--; 925 rp--;
927 local_irq_save(flags);
928 card_present = hwreg_present(rp); 926 card_present = hwreg_present(rp);
929 local_irq_restore(flags);
930
931 if (!card_present) 927 if (!card_present)
932 continue; 928 continue;
933 929
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f2be7c860cf2..36536b6a8834 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1272,6 +1272,39 @@ int of_property_read_u64(const struct device_node *np, const char *propname,
1272EXPORT_SYMBOL_GPL(of_property_read_u64); 1272EXPORT_SYMBOL_GPL(of_property_read_u64);
1273 1273
1274/** 1274/**
1275 * of_property_read_u64_array - Find and read an array of 64 bit integers
1276 * from a property.
1277 *
1278 * @np: device node from which the property value is to be read.
1279 * @propname: name of the property to be searched.
1280 * @out_values: pointer to return value, modified only if return value is 0.
1281 * @sz: number of array elements to read
1282 *
1283 * Search for a property in a device node and read 64-bit value(s) from
1284 * it. Returns 0 on success, -EINVAL if the property does not exist,
1285 * -ENODATA if property does not have a value, and -EOVERFLOW if the
1286 * property data isn't large enough.
1287 *
1288 * The out_values is modified only if a valid u64 value can be decoded.
1289 */
1290int of_property_read_u64_array(const struct device_node *np,
1291 const char *propname, u64 *out_values,
1292 size_t sz)
1293{
1294 const __be32 *val = of_find_property_value_of_size(np, propname,
1295 (sz * sizeof(*out_values)));
1296
1297 if (IS_ERR(val))
1298 return PTR_ERR(val);
1299
1300 while (sz--) {
1301 *out_values++ = of_read_number(val, 2);
1302 val += 2;
1303 }
1304 return 0;
1305}
1306
1307/**
1275 * of_property_read_string - Find and read a string from a property 1308 * of_property_read_string - Find and read a string from a property
1276 * @np: device node from which the property value is to be read. 1309 * @np: device node from which the property value is to be read.
1277 * @propname: name of the property to be searched. 1310 * @propname: name of the property to be searched.
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a41f9fdb1aa0..510074226d57 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -983,8 +983,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
983int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 983int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
984 phys_addr_t size, bool nomap) 984 phys_addr_t size, bool nomap)
985{ 985{
986 if (memblock_is_region_reserved(base, size))
987 return -EBUSY;
988 if (nomap) 986 if (nomap)
989 return memblock_remove(base, size); 987 return memblock_remove(base, size);
990 return memblock_reserve(base, size); 988 return memblock_reserve(base, size);
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 8882b467be95..88471d3d98cd 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
236static LIST_HEAD(of_pci_msi_chip_list); 236static LIST_HEAD(of_pci_msi_chip_list);
237static DEFINE_MUTEX(of_pci_msi_chip_mutex); 237static DEFINE_MUTEX(of_pci_msi_chip_mutex);
238 238
239int of_pci_msi_chip_add(struct msi_chip *chip) 239int of_pci_msi_chip_add(struct msi_controller *chip)
240{ 240{
241 if (!of_property_read_bool(chip->of_node, "msi-controller")) 241 if (!of_property_read_bool(chip->of_node, "msi-controller"))
242 return -EINVAL; 242 return -EINVAL;
@@ -249,7 +249,7 @@ int of_pci_msi_chip_add(struct msi_chip *chip)
249} 249}
250EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); 250EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
251 251
252void of_pci_msi_chip_remove(struct msi_chip *chip) 252void of_pci_msi_chip_remove(struct msi_controller *chip)
253{ 253{
254 mutex_lock(&of_pci_msi_chip_mutex); 254 mutex_lock(&of_pci_msi_chip_mutex);
255 list_del(&chip->list); 255 list_del(&chip->list);
@@ -257,9 +257,9 @@ void of_pci_msi_chip_remove(struct msi_chip *chip)
257} 257}
258EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); 258EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
259 259
260struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node) 260struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
261{ 261{
262 struct msi_chip *c; 262 struct msi_controller *c;
263 263
264 mutex_lock(&of_pci_msi_chip_mutex); 264 mutex_lock(&of_pci_msi_chip_mutex);
265 list_for_each_entry(c, &of_pci_msi_chip_list, list) { 265 list_for_each_entry(c, &of_pci_msi_chip_list, list) {
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 893503fa1782..cced84233ac0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -4,6 +4,7 @@
4config PCI_MSI 4config PCI_MSI
5 bool "Message Signaled Interrupts (MSI and MSI-X)" 5 bool "Message Signaled Interrupts (MSI and MSI-X)"
6 depends on PCI 6 depends on PCI
7 select GENERIC_MSI_IRQ
7 help 8 help
8 This allows device drivers to enable MSI (Message Signaled 9 This allows device drivers to enable MSI (Message Signaled
9 Interrupts). Message Signaled Interrupts enable a device to 10 Interrupts). Message Signaled Interrupts enable a device to
@@ -16,6 +17,11 @@ config PCI_MSI
16 17
17 If you don't know what to do here, say Y. 18 If you don't know what to do here, say Y.
18 19
20config PCI_MSI_IRQ_DOMAIN
21 bool
22 depends on PCI_MSI
23 select GENERIC_MSI_IRQ_DOMAIN
24
19config PCI_DEBUG 25config PCI_DEBUG
20 bool "PCI Debugging" 26 bool "PCI Debugging"
21 depends on PCI && DEBUG_KERNEL 27 depends on PCI && DEBUG_KERNEL
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 3dc25fad490c..c4b6568e486d 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -32,7 +32,10 @@ config PCI_IMX6
32 32
33config PCI_TEGRA 33config PCI_TEGRA
34 bool "NVIDIA Tegra PCIe controller" 34 bool "NVIDIA Tegra PCIe controller"
35 depends on ARCH_TEGRA 35 depends on ARCH_TEGRA && !ARM64
36 help
37 Say Y here if you want support for the PCIe host controller found
38 on NVIDIA Tegra SoCs.
36 39
37config PCI_RCAR_GEN2 40config PCI_RCAR_GEN2
38 bool "Renesas R-Car Gen2 Internal PCI controller" 41 bool "Renesas R-Car Gen2 Internal PCI controller"
@@ -91,4 +94,12 @@ config PCI_XGENE
91 There are 5 internal PCIe ports available. Each port is GEN3 capable 94 There are 5 internal PCIe ports available. Each port is GEN3 capable
92 and have varied lanes from x1 to x8. 95 and have varied lanes from x1 to x8.
93 96
97config PCI_LAYERSCAPE
98 bool "Freescale Layerscape PCIe controller"
99 depends on OF && ARM
100 select PCIE_DW
101 select MFD_SYSCON
102 help
103 Say Y here if you want PCIe controller support on Layerscape SoCs.
104
94endmenu 105endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 26b3461d68d7..44c26998027f 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o 11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o 13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 52b34fee07fd..8c6969747acd 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -270,8 +270,8 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
270 return IRQ_HANDLED; 270 return IRQ_HANDLED;
271} 271}
272 272
273static int add_pcie_port(struct dra7xx_pcie *dra7xx, 273static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
274 struct platform_device *pdev) 274 struct platform_device *pdev)
275{ 275{
276 int ret; 276 int ret;
277 struct pcie_port *pp; 277 struct pcie_port *pp;
@@ -398,7 +398,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
398 398
399 platform_set_drvdata(pdev, dra7xx); 399 platform_set_drvdata(pdev, dra7xx);
400 400
401 ret = add_pcie_port(dra7xx, pdev); 401 ret = dra7xx_add_pcie_port(dra7xx, pdev);
402 if (ret < 0) 402 if (ret < 0)
403 goto err_add_port; 403 goto err_add_port;
404 404
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index c5d0ca384502..850c9f951a3f 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -312,7 +312,6 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp)
312 if (exynos_pcie->reset_gpio >= 0) 312 if (exynos_pcie->reset_gpio >= 0)
313 devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio, 313 devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
314 GPIOF_OUT_INIT_HIGH, "RESET"); 314 GPIOF_OUT_INIT_HIGH, "RESET");
315 return;
316} 315}
317 316
318static int exynos_pcie_establish_link(struct pcie_port *pp) 317static int exynos_pcie_establish_link(struct pcie_port *pp)
@@ -388,7 +387,6 @@ static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
388 387
389 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE); 388 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
390 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE); 389 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
391 return;
392} 390}
393 391
394static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) 392static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
@@ -400,7 +398,6 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
400 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | 398 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
401 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, 399 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
402 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE); 400 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
403 return;
404} 401}
405 402
406static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) 403static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -429,7 +426,6 @@ static void exynos_pcie_msi_init(struct pcie_port *pp)
429 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL); 426 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
430 val |= IRQ_MSI_ENABLE; 427 val |= IRQ_MSI_ENABLE;
431 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL); 428 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
432 return;
433} 429}
434 430
435static void exynos_pcie_enable_interrupts(struct pcie_port *pp) 431static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
@@ -438,8 +434,6 @@ static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
438 434
439 if (IS_ENABLED(CONFIG_PCI_MSI)) 435 if (IS_ENABLED(CONFIG_PCI_MSI))
440 exynos_pcie_msi_init(pp); 436 exynos_pcie_msi_init(pp);
441
442 return;
443} 437}
444 438
445static inline void exynos_pcie_readl_rc(struct pcie_port *pp, 439static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
@@ -448,7 +442,6 @@ static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
448 exynos_pcie_sideband_dbi_r_mode(pp, true); 442 exynos_pcie_sideband_dbi_r_mode(pp, true);
449 *val = readl(dbi_base); 443 *val = readl(dbi_base);
450 exynos_pcie_sideband_dbi_r_mode(pp, false); 444 exynos_pcie_sideband_dbi_r_mode(pp, false);
451 return;
452} 445}
453 446
454static inline void exynos_pcie_writel_rc(struct pcie_port *pp, 447static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
@@ -457,7 +450,6 @@ static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
457 exynos_pcie_sideband_dbi_w_mode(pp, true); 450 exynos_pcie_sideband_dbi_w_mode(pp, true);
458 writel(val, dbi_base); 451 writel(val, dbi_base);
459 exynos_pcie_sideband_dbi_w_mode(pp, false); 452 exynos_pcie_sideband_dbi_w_mode(pp, false);
460 return;
461} 453}
462 454
463static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 455static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -509,8 +501,8 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
509 .host_init = exynos_pcie_host_init, 501 .host_init = exynos_pcie_host_init,
510}; 502};
511 503
512static int __init add_pcie_port(struct pcie_port *pp, 504static int __init exynos_add_pcie_port(struct pcie_port *pp,
513 struct platform_device *pdev) 505 struct platform_device *pdev)
514{ 506{
515 int ret; 507 int ret;
516 508
@@ -615,7 +607,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
615 goto fail_bus_clk; 607 goto fail_bus_clk;
616 } 608 }
617 609
618 ret = add_pcie_port(pp, pdev); 610 ret = exynos_add_pcie_port(pp, pdev);
619 if (ret < 0) 611 if (ret < 0)
620 goto fail_bus_clk; 612 goto fail_bus_clk;
621 613
@@ -656,11 +648,11 @@ static struct platform_driver exynos_pcie_driver = {
656 648
657/* Exynos PCIe driver does not allow module unload */ 649/* Exynos PCIe driver does not allow module unload */
658 650
659static int __init pcie_init(void) 651static int __init exynos_pcie_init(void)
660{ 652{
661 return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); 653 return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
662} 654}
663subsys_initcall(pcie_init); 655subsys_initcall(exynos_pcie_init);
664 656
665MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); 657MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
666MODULE_DESCRIPTION("Samsung PCIe host controller driver"); 658MODULE_DESCRIPTION("Samsung PCIe host controller driver");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 3d2076f59911..18959075d164 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -32,7 +32,7 @@ struct gen_pci_cfg_bus_ops {
32 32
33struct gen_pci_cfg_windows { 33struct gen_pci_cfg_windows {
34 struct resource res; 34 struct resource res;
35 struct resource bus_range; 35 struct resource *bus_range;
36 void __iomem **win; 36 void __iomem **win;
37 37
38 const struct gen_pci_cfg_bus_ops *ops; 38 const struct gen_pci_cfg_bus_ops *ops;
@@ -50,7 +50,7 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50{ 50{
51 struct pci_sys_data *sys = bus->sysdata; 51 struct pci_sys_data *sys = bus->sysdata;
52 struct gen_pci *pci = sys->private_data; 52 struct gen_pci *pci = sys->private_data;
53 resource_size_t idx = bus->number - pci->cfg.bus_range.start; 53 resource_size_t idx = bus->number - pci->cfg.bus_range->start;
54 54
55 return pci->cfg.win[idx] + ((devfn << 8) | where); 55 return pci->cfg.win[idx] + ((devfn << 8) | where);
56} 56}
@@ -66,7 +66,7 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
66{ 66{
67 struct pci_sys_data *sys = bus->sysdata; 67 struct pci_sys_data *sys = bus->sysdata;
68 struct gen_pci *pci = sys->private_data; 68 struct gen_pci *pci = sys->private_data;
69 resource_size_t idx = bus->number - pci->cfg.bus_range.start; 69 resource_size_t idx = bus->number - pci->cfg.bus_range->start;
70 70
71 return pci->cfg.win[idx] + ((devfn << 12) | where); 71 return pci->cfg.win[idx] + ((devfn << 12) | where);
72} 72}
@@ -138,106 +138,50 @@ static const struct of_device_id gen_pci_of_match[] = {
138}; 138};
139MODULE_DEVICE_TABLE(of, gen_pci_of_match); 139MODULE_DEVICE_TABLE(of, gen_pci_of_match);
140 140
141static int gen_pci_calc_io_offset(struct device *dev,
142 struct of_pci_range *range,
143 struct resource *res,
144 resource_size_t *offset)
145{
146 static atomic_t wins = ATOMIC_INIT(0);
147 int err, idx, max_win;
148 unsigned int window;
149
150 if (!PAGE_ALIGNED(range->cpu_addr))
151 return -EINVAL;
152
153 max_win = (IO_SPACE_LIMIT + 1) / SZ_64K;
154 idx = atomic_inc_return(&wins);
155 if (idx > max_win)
156 return -ENOSPC;
157
158 window = (idx - 1) * SZ_64K;
159 err = pci_ioremap_io(window, range->cpu_addr);
160 if (err)
161 return err;
162
163 of_pci_range_to_resource(range, dev->of_node, res);
164 res->start = window;
165 res->end = res->start + range->size - 1;
166 *offset = window - range->pci_addr;
167 return 0;
168}
169
170static int gen_pci_calc_mem_offset(struct device *dev,
171 struct of_pci_range *range,
172 struct resource *res,
173 resource_size_t *offset)
174{
175 of_pci_range_to_resource(range, dev->of_node, res);
176 *offset = range->cpu_addr - range->pci_addr;
177 return 0;
178}
179
180static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) 141static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
181{ 142{
182 struct pci_host_bridge_window *win;
183
184 list_for_each_entry(win, &pci->resources, list)
185 release_resource(win->res);
186
187 pci_free_resource_list(&pci->resources); 143 pci_free_resource_list(&pci->resources);
188} 144}
189 145
190static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) 146static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
191{ 147{
192 struct of_pci_range range;
193 struct of_pci_range_parser parser;
194 int err, res_valid = 0; 148 int err, res_valid = 0;
195 struct device *dev = pci->host.dev.parent; 149 struct device *dev = pci->host.dev.parent;
196 struct device_node *np = dev->of_node; 150 struct device_node *np = dev->of_node;
151 resource_size_t iobase;
152 struct pci_host_bridge_window *win;
197 153
198 if (of_pci_range_parser_init(&parser, np)) { 154 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
199 dev_err(dev, "missing \"ranges\" property\n"); 155 &iobase);
200 return -EINVAL; 156 if (err)
201 } 157 return err;
202
203 for_each_of_pci_range(&parser, &range) {
204 struct resource *parent, *res;
205 resource_size_t offset;
206 u32 restype = range.flags & IORESOURCE_TYPE_BITS;
207 158
208 res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); 159 list_for_each_entry(win, &pci->resources, list) {
209 if (!res) { 160 struct resource *parent, *res = win->res;
210 err = -ENOMEM;
211 goto out_release_res;
212 }
213 161
214 switch (restype) { 162 switch (resource_type(res)) {
215 case IORESOURCE_IO: 163 case IORESOURCE_IO:
216 parent = &ioport_resource; 164 parent = &ioport_resource;
217 err = gen_pci_calc_io_offset(dev, &range, res, &offset); 165 err = pci_remap_iospace(res, iobase);
166 if (err) {
167 dev_warn(dev, "error %d: failed to map resource %pR\n",
168 err, res);
169 continue;
170 }
218 break; 171 break;
219 case IORESOURCE_MEM: 172 case IORESOURCE_MEM:
220 parent = &iomem_resource; 173 parent = &iomem_resource;
221 err = gen_pci_calc_mem_offset(dev, &range, res, &offset); 174 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
222 res_valid |= !(res->flags & IORESOURCE_PREFETCH || err);
223 break; 175 break;
176 case IORESOURCE_BUS:
177 pci->cfg.bus_range = res;
224 default: 178 default:
225 err = -EINVAL;
226 continue;
227 }
228
229 if (err) {
230 dev_warn(dev,
231 "error %d: failed to add resource [type 0x%x, %lld bytes]\n",
232 err, restype, range.size);
233 continue; 179 continue;
234 } 180 }
235 181
236 err = request_resource(parent, res); 182 err = devm_request_resource(dev, parent, res);
237 if (err) 183 if (err)
238 goto out_release_res; 184 goto out_release_res;
239
240 pci_add_resource_offset(&pci->resources, res, offset);
241 } 185 }
242 186
243 if (!res_valid) { 187 if (!res_valid) {
@@ -262,38 +206,30 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
262 struct device *dev = pci->host.dev.parent; 206 struct device *dev = pci->host.dev.parent;
263 struct device_node *np = dev->of_node; 207 struct device_node *np = dev->of_node;
264 208
265 if (of_pci_parse_bus_range(np, &pci->cfg.bus_range))
266 pci->cfg.bus_range = (struct resource) {
267 .name = np->name,
268 .start = 0,
269 .end = 0xff,
270 .flags = IORESOURCE_BUS,
271 };
272
273 err = of_address_to_resource(np, 0, &pci->cfg.res); 209 err = of_address_to_resource(np, 0, &pci->cfg.res);
274 if (err) { 210 if (err) {
275 dev_err(dev, "missing \"reg\" property\n"); 211 dev_err(dev, "missing \"reg\" property\n");
276 return err; 212 return err;
277 } 213 }
278 214
279 pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), 215 /* Limit the bus-range to fit within reg */
216 bus_max = pci->cfg.bus_range->start +
217 (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
218 pci->cfg.bus_range->end = min_t(resource_size_t,
219 pci->cfg.bus_range->end, bus_max);
220
221 pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
280 sizeof(*pci->cfg.win), GFP_KERNEL); 222 sizeof(*pci->cfg.win), GFP_KERNEL);
281 if (!pci->cfg.win) 223 if (!pci->cfg.win)
282 return -ENOMEM; 224 return -ENOMEM;
283 225
284 /* Limit the bus-range to fit within reg */
285 bus_max = pci->cfg.bus_range.start +
286 (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
287 pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end,
288 bus_max);
289
290 /* Map our Configuration Space windows */ 226 /* Map our Configuration Space windows */
291 if (!devm_request_mem_region(dev, pci->cfg.res.start, 227 if (!devm_request_mem_region(dev, pci->cfg.res.start,
292 resource_size(&pci->cfg.res), 228 resource_size(&pci->cfg.res),
293 "Configuration Space")) 229 "Configuration Space"))
294 return -ENOMEM; 230 return -ENOMEM;
295 231
296 bus_range = &pci->cfg.bus_range; 232 bus_range = pci->cfg.bus_range;
297 for (busn = bus_range->start; busn <= bus_range->end; ++busn) { 233 for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
298 u32 idx = busn - bus_range->start; 234 u32 idx = busn - bus_range->start;
299 u32 sz = 1 << pci->cfg.ops->bus_shift; 235 u32 sz = 1 << pci->cfg.ops->bus_shift;
@@ -305,8 +241,6 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
305 return -ENOMEM; 241 return -ENOMEM;
306 } 242 }
307 243
308 /* Register bus resource */
309 pci_add_resource(&pci->resources, bus_range);
310 return 0; 244 return 0;
311} 245}
312 246
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 69202d1eb8fb..d1a26d17b586 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -533,8 +533,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
533 } 533 }
534 534
535 ret = devm_request_irq(&pdev->dev, pp->msi_irq, 535 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
536 imx6_pcie_msi_handler, 536 imx6_pcie_msi_handler,
537 IRQF_SHARED, "mx6-pcie-msi", pp); 537 IRQF_SHARED, "mx6-pcie-msi", pp);
538 if (ret) { 538 if (ret) {
539 dev_err(&pdev->dev, "failed to request MSI irq\n"); 539 dev_err(&pdev->dev, "failed to request MSI irq\n");
540 return -ENODEV; 540 return -ENODEV;
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 34086ce88e8e..66d8ea41b972 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -155,7 +155,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
155 /* Mask the end point if PVM implemented */ 155 /* Mask the end point if PVM implemented */
156 if (IS_ENABLED(CONFIG_PCI_MSI)) { 156 if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 if (msi->msi_attrib.maskbit) 157 if (msi->msi_attrib.maskbit)
158 mask_msi_irq(d); 158 pci_msi_mask_irq(d);
159 } 159 }
160 160
161 ks_dw_pcie_msi_clear_irq(pp, offset); 161 ks_dw_pcie_msi_clear_irq(pp, offset);
@@ -177,7 +177,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
177 /* Mask the end point if PVM implemented */ 177 /* Mask the end point if PVM implemented */
178 if (IS_ENABLED(CONFIG_PCI_MSI)) { 178 if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 if (msi->msi_attrib.maskbit) 179 if (msi->msi_attrib.maskbit)
180 unmask_msi_irq(d); 180 pci_msi_unmask_irq(d);
181 } 181 }
182 182
183 ks_dw_pcie_msi_set_irq(pp, offset); 183 ks_dw_pcie_msi_set_irq(pp, offset);
@@ -201,11 +201,11 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
201 return 0; 201 return 0;
202} 202}
203 203
204const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { 204static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 .map = ks_dw_pcie_msi_map, 205 .map = ks_dw_pcie_msi_map,
206}; 206};
207 207
208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) 208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
209{ 209{
210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 int i; 211 int i;
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 1b893bc8b842..62b9454c86fb 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -353,10 +353,9 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
353 353
354 ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), 354 ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
355 GFP_KERNEL); 355 GFP_KERNEL);
356 if (!ks_pcie) { 356 if (!ks_pcie)
357 dev_err(dev, "no memory for keystone pcie\n");
358 return -ENOMEM; 357 return -ENOMEM;
359 } 358
360 pp = &ks_pcie->pp; 359 pp = &ks_pcie->pp;
361 360
362 /* initialize SerDes Phy if present */ 361 /* initialize SerDes Phy if present */
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index 1fc1fceede9e..478d932b602d 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -55,4 +55,4 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); 55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); 56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
57int ks_dw_pcie_msi_host_init(struct pcie_port *pp, 57int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
58 struct msi_chip *chip); 58 struct msi_controller *chip);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
new file mode 100644
index 000000000000..6697b1a4d4fa
--- /dev/null
+++ b/drivers/pci/host/pci-layerscape.c
@@ -0,0 +1,179 @@
1/*
2 * PCIe host controller driver for Freescale Layerscape SoCs
3 *
4 * Copyright (C) 2014 Freescale Semiconductor.
5 *
6 * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/of_pci.h>
18#include <linux/of_platform.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/pci.h>
22#include <linux/platform_device.h>
23#include <linux/resource.h>
24#include <linux/mfd/syscon.h>
25#include <linux/regmap.h>
26
27#include "pcie-designware.h"
28
29/* PEX1/2 Misc Ports Status Register */
30#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
31#define LTSSM_STATE_SHIFT 20
32#define LTSSM_STATE_MASK 0x3f
33#define LTSSM_PCIE_L0 0x11 /* L0 state */
34
35/* Symbol Timer Register and Filter Mask Register 1 */
36#define PCIE_STRFMR1 0x71c
37
38struct ls_pcie {
39 struct list_head node;
40 struct device *dev;
41 struct pci_bus *bus;
42 void __iomem *dbi;
43 struct regmap *scfg;
44 struct pcie_port pp;
45 int index;
46 int msi_irq;
47};
48
49#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp)
50
51static int ls_pcie_link_up(struct pcie_port *pp)
52{
53 u32 state;
54 struct ls_pcie *pcie = to_ls_pcie(pp);
55
56 regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
57 state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
58
59 if (state < LTSSM_PCIE_L0)
60 return 0;
61
62 return 1;
63}
64
65static void ls_pcie_host_init(struct pcie_port *pp)
66{
67 struct ls_pcie *pcie = to_ls_pcie(pp);
68 int count = 0;
69 u32 val;
70
71 dw_pcie_setup_rc(pp);
72
73 while (!ls_pcie_link_up(pp)) {
74 usleep_range(100, 1000);
75 count++;
76 if (count >= 200) {
77 dev_err(pp->dev, "phy link never came up\n");
78 return;
79 }
80 }
81
82 /*
83 * LS1021A Workaround for internal TKT228622
84 * to fix the INTx hang issue
85 */
86 val = ioread32(pcie->dbi + PCIE_STRFMR1);
87 val &= 0xffff;
88 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
89}
90
91static struct pcie_host_ops ls_pcie_host_ops = {
92 .link_up = ls_pcie_link_up,
93 .host_init = ls_pcie_host_init,
94};
95
96static int ls_add_pcie_port(struct ls_pcie *pcie)
97{
98 struct pcie_port *pp;
99 int ret;
100
101 pp = &pcie->pp;
102 pp->dev = pcie->dev;
103 pp->dbi_base = pcie->dbi;
104 pp->root_bus_nr = -1;
105 pp->ops = &ls_pcie_host_ops;
106
107 ret = dw_pcie_host_init(pp);
108 if (ret) {
109 dev_err(pp->dev, "failed to initialize host\n");
110 return ret;
111 }
112
113 return 0;
114}
115
116static int __init ls_pcie_probe(struct platform_device *pdev)
117{
118 struct ls_pcie *pcie;
119 struct resource *dbi_base;
120 u32 index[2];
121 int ret;
122
123 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
124 if (!pcie)
125 return -ENOMEM;
126
127 pcie->dev = &pdev->dev;
128
129 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
130 if (!dbi_base) {
131 dev_err(&pdev->dev, "missing *regs* space\n");
132 return -ENODEV;
133 }
134
135 pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
136 if (IS_ERR(pcie->dbi))
137 return PTR_ERR(pcie->dbi);
138
139 pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
140 "fsl,pcie-scfg");
141 if (IS_ERR(pcie->scfg)) {
142 dev_err(&pdev->dev, "No syscfg phandle specified\n");
143 return PTR_ERR(pcie->scfg);
144 }
145
146 ret = of_property_read_u32_array(pdev->dev.of_node,
147 "fsl,pcie-scfg", index, 2);
148 if (ret)
149 return ret;
150 pcie->index = index[1];
151
152 ret = ls_add_pcie_port(pcie);
153 if (ret < 0)
154 return ret;
155
156 platform_set_drvdata(pdev, pcie);
157
158 return 0;
159}
160
161static const struct of_device_id ls_pcie_of_match[] = {
162 { .compatible = "fsl,ls1021a-pcie" },
163 { },
164};
165MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
166
167static struct platform_driver ls_pcie_driver = {
168 .driver = {
169 .name = "layerscape-pcie",
170 .owner = THIS_MODULE,
171 .of_match_table = ls_pcie_of_match,
172 },
173};
174
175module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
176
177MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
178MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver");
179MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index b1315e197ffb..fed3fab132f2 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -99,7 +99,7 @@ struct mvebu_pcie_port;
99struct mvebu_pcie { 99struct mvebu_pcie {
100 struct platform_device *pdev; 100 struct platform_device *pdev;
101 struct mvebu_pcie_port *ports; 101 struct mvebu_pcie_port *ports;
102 struct msi_chip *msi; 102 struct msi_controller *msi;
103 struct resource io; 103 struct resource io;
104 char io_name[30]; 104 char io_name[30];
105 struct resource realio; 105 struct resource realio;
@@ -622,6 +622,7 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
622 622
623 for (i = 0; i < pcie->nports; i++) { 623 for (i = 0; i < pcie->nports; i++) {
624 struct mvebu_pcie_port *port = &pcie->ports[i]; 624 struct mvebu_pcie_port *port = &pcie->ports[i];
625
625 if (bus->number == 0 && port->devfn == devfn) 626 if (bus->number == 0 && port->devfn == devfn)
626 return port; 627 return port;
627 if (bus->number != 0 && 628 if (bus->number != 0 &&
@@ -751,6 +752,7 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
751 752
752 for (i = 0; i < pcie->nports; i++) { 753 for (i = 0; i < pcie->nports; i++) {
753 struct mvebu_pcie_port *port = &pcie->ports[i]; 754 struct mvebu_pcie_port *port = &pcie->ports[i];
755
754 if (!port->base) 756 if (!port->base)
755 continue; 757 continue;
756 mvebu_pcie_setup_hw(port); 758 mvebu_pcie_setup_hw(port);
@@ -774,12 +776,6 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
774 return bus; 776 return bus;
775} 777}
776 778
777static void mvebu_pcie_add_bus(struct pci_bus *bus)
778{
779 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
780 bus->msi = pcie->msi;
781}
782
783static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, 779static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
784 const struct resource *res, 780 const struct resource *res,
785 resource_size_t start, 781 resource_size_t start,
@@ -816,6 +812,10 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
816 812
817 memset(&hw, 0, sizeof(hw)); 813 memset(&hw, 0, sizeof(hw));
818 814
815#ifdef CONFIG_PCI_MSI
816 hw.msi_ctrl = pcie->msi;
817#endif
818
819 hw.nr_controllers = 1; 819 hw.nr_controllers = 1;
820 hw.private_data = (void **)&pcie; 820 hw.private_data = (void **)&pcie;
821 hw.setup = mvebu_pcie_setup; 821 hw.setup = mvebu_pcie_setup;
@@ -823,7 +823,6 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
823 hw.map_irq = of_irq_parse_and_map_pci; 823 hw.map_irq = of_irq_parse_and_map_pci;
824 hw.ops = &mvebu_pcie_ops; 824 hw.ops = &mvebu_pcie_ops;
825 hw.align_resource = mvebu_pcie_align_resource; 825 hw.align_resource = mvebu_pcie_align_resource;
826 hw.add_bus = mvebu_pcie_add_bus;
827 826
828 pci_common_init(&hw); 827 pci_common_init(&hw);
829} 828}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3d43874319be..feccfa6b6c11 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -238,7 +238,7 @@
238 ) 238 )
239 239
240struct tegra_msi { 240struct tegra_msi {
241 struct msi_chip chip; 241 struct msi_controller chip;
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain; 243 struct irq_domain *domain;
244 unsigned long pages; 244 unsigned long pages;
@@ -259,7 +259,7 @@ struct tegra_pcie_soc_data {
259 bool has_gen2; 259 bool has_gen2;
260}; 260};
261 261
262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
263{ 263{
264 return container_of(chip, struct tegra_msi, chip); 264 return container_of(chip, struct tegra_msi, chip);
265} 265}
@@ -276,6 +276,7 @@ struct tegra_pcie {
276 276
277 struct resource all; 277 struct resource all;
278 struct resource io; 278 struct resource io;
279 struct resource pio;
279 struct resource mem; 280 struct resource mem;
280 struct resource prefetch; 281 struct resource prefetch;
281 struct resource busn; 282 struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
658{ 659{
659 struct tegra_pcie *pcie = sys_to_pcie(sys); 660 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err; 661 int err;
661 phys_addr_t io_start;
662 662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); 663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0) 664 if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
668 if (err) 668 if (err)
669 return err; 669 return err;
670 670
671 io_start = pci_pio_to_address(pcie->io.start);
672
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 671 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 672 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
675 sys->mem_offset); 673 sys->mem_offset);
676 pci_add_resource(&sys->resources, &pcie->busn); 674 pci_add_resource(&sys->resources, &pcie->busn);
677 675
678 pci_ioremap_io(nr * SZ_64K, io_start); 676 pci_ioremap_io(pcie->pio.start, pcie->io.start);
679 677
680 return 1; 678 return 1;
681} 679}
@@ -694,15 +692,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
694 return irq; 692 return irq;
695} 693}
696 694
697static void tegra_pcie_add_bus(struct pci_bus *bus)
698{
699 if (IS_ENABLED(CONFIG_PCI_MSI)) {
700 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
701
702 bus->msi = &pcie->msi.chip;
703 }
704}
705
706static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) 695static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
707{ 696{
708 struct tegra_pcie *pcie = sys_to_pcie(sys); 697 struct tegra_pcie *pcie = sys_to_pcie(sys);
@@ -786,7 +775,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
786static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 775static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
787{ 776{
788 u32 fpci_bar, size, axi_address; 777 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
790 778
791 /* Bar 0: type 1 extended configuration space */ 779 /* Bar 0: type 1 extended configuration space */
792 fpci_bar = 0xfe100000; 780 fpci_bar = 0xfe100000;
@@ -799,7 +787,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
799 /* Bar 1: downstream IO bar */ 787 /* Bar 1: downstream IO bar */
800 fpci_bar = 0xfdfc0000; 788 fpci_bar = 0xfdfc0000;
801 size = resource_size(&pcie->io); 789 size = resource_size(&pcie->io);
802 axi_address = io_start; 790 axi_address = pcie->io.start;
803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 791 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 792 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 793 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1283,8 +1271,8 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1283 return processed > 0 ? IRQ_HANDLED : IRQ_NONE; 1271 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1284} 1272}
1285 1273
1286static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 1274static int tegra_msi_setup_irq(struct msi_controller *chip,
1287 struct msi_desc *desc) 1275 struct pci_dev *pdev, struct msi_desc *desc)
1288{ 1276{
1289 struct tegra_msi *msi = to_tegra_msi(chip); 1277 struct tegra_msi *msi = to_tegra_msi(chip);
1290 struct msi_msg msg; 1278 struct msi_msg msg;
@@ -1308,12 +1296,13 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1308 msg.address_hi = 0; 1296 msg.address_hi = 0;
1309 msg.data = hwirq; 1297 msg.data = hwirq;
1310 1298
1311 write_msi_msg(irq, &msg); 1299 pci_write_msi_msg(irq, &msg);
1312 1300
1313 return 0; 1301 return 0;
1314} 1302}
1315 1303
1316static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 1304static void tegra_msi_teardown_irq(struct msi_controller *chip,
1305 unsigned int irq)
1317{ 1306{
1318 struct tegra_msi *msi = to_tegra_msi(chip); 1307 struct tegra_msi *msi = to_tegra_msi(chip);
1319 struct irq_data *d = irq_get_irq_data(irq); 1308 struct irq_data *d = irq_get_irq_data(irq);
@@ -1325,10 +1314,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1325 1314
1326static struct irq_chip tegra_msi_irq_chip = { 1315static struct irq_chip tegra_msi_irq_chip = {
1327 .name = "Tegra PCIe MSI", 1316 .name = "Tegra PCIe MSI",
1328 .irq_enable = unmask_msi_irq, 1317 .irq_enable = pci_msi_unmask_irq,
1329 .irq_disable = mask_msi_irq, 1318 .irq_disable = pci_msi_mask_irq,
1330 .irq_mask = mask_msi_irq, 1319 .irq_mask = pci_msi_mask_irq,
1331 .irq_unmask = unmask_msi_irq, 1320 .irq_unmask = pci_msi_unmask_irq,
1332}; 1321};
1333 1322
1334static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, 1323static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
@@ -1690,8 +1679,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1690 1679
1691 switch (res.flags & IORESOURCE_TYPE_BITS) { 1680 switch (res.flags & IORESOURCE_TYPE_BITS) {
1692 case IORESOURCE_IO: 1681 case IORESOURCE_IO:
1693 memcpy(&pcie->io, &res, sizeof(res)); 1682 memcpy(&pcie->pio, &res, sizeof(res));
1694 pcie->io.name = np->full_name; 1683 pcie->pio.name = np->full_name;
1684
1685 /*
1686 * The Tegra PCIe host bridge uses this to program the
1687 * mapping of the I/O space to the physical address,
1688 * so we override the .start and .end fields here that
1689 * of_pci_range_to_resource() converted to I/O space.
1690 * We also set the IORESOURCE_MEM type to clarify that
1691 * the resource is in the physical memory space.
1692 */
1693 pcie->io.start = range.cpu_addr;
1694 pcie->io.end = range.cpu_addr + range.size - 1;
1695 pcie->io.flags = IORESOURCE_MEM;
1696 pcie->io.name = "I/O";
1697
1698 memcpy(&res, &pcie->io, sizeof(res));
1695 break; 1699 break;
1696 1700
1697 case IORESOURCE_MEM: 1701 case IORESOURCE_MEM:
@@ -1881,11 +1885,14 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
1881 1885
1882 memset(&hw, 0, sizeof(hw)); 1886 memset(&hw, 0, sizeof(hw));
1883 1887
1888#ifdef CONFIG_PCI_MSI
1889 hw.msi_ctrl = &pcie->msi.chip;
1890#endif
1891
1884 hw.nr_controllers = 1; 1892 hw.nr_controllers = 1;
1885 hw.private_data = (void **)&pcie; 1893 hw.private_data = (void **)&pcie;
1886 hw.setup = tegra_pcie_setup; 1894 hw.setup = tegra_pcie_setup;
1887 hw.map_irq = tegra_pcie_map_irq; 1895 hw.map_irq = tegra_pcie_map_irq;
1888 hw.add_bus = tegra_pcie_add_bus;
1889 hw.scan = tegra_pcie_scan_bus; 1896 hw.scan = tegra_pcie_scan_bus;
1890 hw.ops = &tegra_pcie_ops; 1897 hw.ops = &tegra_pcie_ops;
1891 1898
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index dfed00aa3ac0..df781cdf13c1 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -152,10 +152,10 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
152 152
153static struct irq_chip dw_msi_irq_chip = { 153static struct irq_chip dw_msi_irq_chip = {
154 .name = "PCI-MSI", 154 .name = "PCI-MSI",
155 .irq_enable = unmask_msi_irq, 155 .irq_enable = pci_msi_unmask_irq,
156 .irq_disable = mask_msi_irq, 156 .irq_disable = pci_msi_mask_irq,
157 .irq_mask = mask_msi_irq, 157 .irq_mask = pci_msi_mask_irq,
158 .irq_unmask = unmask_msi_irq, 158 .irq_unmask = pci_msi_unmask_irq,
159}; 159};
160 160
161/* MSI int handler */ 161/* MSI int handler */
@@ -276,7 +276,7 @@ no_valid_irq:
276 return -ENOSPC; 276 return -ENOSPC;
277} 277}
278 278
279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 279static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
280 struct msi_desc *desc) 280 struct msi_desc *desc)
281{ 281{
282 int irq, pos; 282 int irq, pos;
@@ -298,12 +298,12 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
298 else 298 else
299 msg.data = pos; 299 msg.data = pos;
300 300
301 write_msi_msg(irq, &msg); 301 pci_write_msi_msg(irq, &msg);
302 302
303 return 0; 303 return 0;
304} 304}
305 305
306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 306static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
307{ 307{
308 struct irq_data *data = irq_get_irq_data(irq); 308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data); 309 struct msi_desc *msi = irq_data_get_msi(data);
@@ -312,7 +312,7 @@ static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
312 clear_irq_range(pp, irq, 1, data->hwirq); 312 clear_irq_range(pp, irq, 1, data->hwirq);
313} 313}
314 314
315static struct msi_chip dw_pcie_msi_chip = { 315static struct msi_controller dw_pcie_msi_chip = {
316 .setup_irq = dw_msi_setup_irq, 316 .setup_irq = dw_msi_setup_irq,
317 .teardown_irq = dw_msi_teardown_irq, 317 .teardown_irq = dw_msi_teardown_irq,
318}; 318};
@@ -380,6 +380,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
380 /* Get the I/O and memory ranges from DT */ 380 /* Get the I/O and memory ranges from DT */
381 for_each_of_pci_range(&parser, &range) { 381 for_each_of_pci_range(&parser, &range) {
382 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; 382 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
383
383 if (restype == IORESOURCE_IO) { 384 if (restype == IORESOURCE_IO) {
384 of_pci_range_to_resource(&range, np, &pp->io); 385 of_pci_range_to_resource(&range, np, &pp->io);
385 pp->io.name = "I/O"; 386 pp->io.name = "I/O";
@@ -498,6 +499,11 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
498 val |= PORT_LOGIC_SPEED_CHANGE; 499 val |= PORT_LOGIC_SPEED_CHANGE;
499 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 500 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
500 501
502#ifdef CONFIG_PCI_MSI
503 dw_pcie_msi_chip.dev = pp->dev;
504 dw_pci.msi_ctrl = &dw_pcie_msi_chip;
505#endif
506
501 dw_pci.nr_controllers = 1; 507 dw_pci.nr_controllers = 1;
502 dw_pci.private_data = (void **)&pp; 508 dw_pci.private_data = (void **)&pp;
503 509
@@ -747,21 +753,10 @@ static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
747 return irq; 753 return irq;
748} 754}
749 755
750static void dw_pcie_add_bus(struct pci_bus *bus)
751{
752 if (IS_ENABLED(CONFIG_PCI_MSI)) {
753 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
754
755 dw_pcie_msi_chip.dev = pp->dev;
756 bus->msi = &dw_pcie_msi_chip;
757 }
758}
759
760static struct hw_pci dw_pci = { 756static struct hw_pci dw_pci = {
761 .setup = dw_pcie_setup, 757 .setup = dw_pcie_setup,
762 .scan = dw_pcie_scan_bus, 758 .scan = dw_pcie_scan_bus,
763 .map_irq = dw_pcie_map_irq, 759 .map_irq = dw_pcie_map_irq,
764 .add_bus = dw_pcie_add_bus,
765}; 760};
766 761
767void dw_pcie_setup_rc(struct pcie_port *pp) 762void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c6256751daff..d0bbd276840d 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -73,7 +73,7 @@ struct pcie_host_ops {
73 u32 (*get_msi_addr)(struct pcie_port *pp); 73 u32 (*get_msi_addr)(struct pcie_port *pp);
74 u32 (*get_msi_data)(struct pcie_port *pp, int pos); 74 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
75 void (*scan_bus)(struct pcie_port *pp); 75 void (*scan_bus)(struct pcie_port *pp);
76 int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip); 76 int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
77}; 77};
78 78
79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); 79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 61158e03ab5f..5519e939e412 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -111,14 +111,14 @@
111struct rcar_msi { 111struct rcar_msi {
112 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 112 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
113 struct irq_domain *domain; 113 struct irq_domain *domain;
114 struct msi_chip chip; 114 struct msi_controller chip;
115 unsigned long pages; 115 unsigned long pages;
116 struct mutex lock; 116 struct mutex lock;
117 int irq1; 117 int irq1;
118 int irq2; 118 int irq2;
119}; 119};
120 120
121static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) 121static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
122{ 122{
123 return container_of(chip, struct rcar_msi, chip); 123 return container_of(chip, struct rcar_msi, chip);
124} 124}
@@ -380,20 +380,10 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
380 return 1; 380 return 1;
381} 381}
382 382
383static void rcar_pcie_add_bus(struct pci_bus *bus) 383static struct hw_pci rcar_pci = {
384{
385 if (IS_ENABLED(CONFIG_PCI_MSI)) {
386 struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
387
388 bus->msi = &pcie->msi.chip;
389 }
390}
391
392struct hw_pci rcar_pci = {
393 .setup = rcar_pcie_setup, 384 .setup = rcar_pcie_setup,
394 .map_irq = of_irq_parse_and_map_pci, 385 .map_irq = of_irq_parse_and_map_pci,
395 .ops = &rcar_pcie_ops, 386 .ops = &rcar_pcie_ops,
396 .add_bus = rcar_pcie_add_bus,
397}; 387};
398 388
399static void rcar_pcie_enable(struct rcar_pcie *pcie) 389static void rcar_pcie_enable(struct rcar_pcie *pcie)
@@ -402,6 +392,9 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
402 392
403 rcar_pci.nr_controllers = 1; 393 rcar_pci.nr_controllers = 1;
404 rcar_pci.private_data = (void **)&pcie; 394 rcar_pci.private_data = (void **)&pcie;
395#ifdef CONFIG_PCI_MSI
396 rcar_pci.msi_ctrl = &pcie->msi.chip;
397#endif
405 398
406 pci_common_init_dev(&pdev->dev, &rcar_pci); 399 pci_common_init_dev(&pdev->dev, &rcar_pci);
407#ifdef CONFIG_PCI_DOMAINS 400#ifdef CONFIG_PCI_DOMAINS
@@ -622,7 +615,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
622 return IRQ_HANDLED; 615 return IRQ_HANDLED;
623} 616}
624 617
625static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 618static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
626 struct msi_desc *desc) 619 struct msi_desc *desc)
627{ 620{
628 struct rcar_msi *msi = to_rcar_msi(chip); 621 struct rcar_msi *msi = to_rcar_msi(chip);
@@ -647,12 +640,12 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
647 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 640 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
648 msg.data = hwirq; 641 msg.data = hwirq;
649 642
650 write_msi_msg(irq, &msg); 643 pci_write_msi_msg(irq, &msg);
651 644
652 return 0; 645 return 0;
653} 646}
654 647
655static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 648static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
656{ 649{
657 struct rcar_msi *msi = to_rcar_msi(chip); 650 struct rcar_msi *msi = to_rcar_msi(chip);
658 struct irq_data *d = irq_get_irq_data(irq); 651 struct irq_data *d = irq_get_irq_data(irq);
@@ -662,10 +655,10 @@ static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
662 655
663static struct irq_chip rcar_msi_irq_chip = { 656static struct irq_chip rcar_msi_irq_chip = {
664 .name = "R-Car PCIe MSI", 657 .name = "R-Car PCIe MSI",
665 .irq_enable = unmask_msi_irq, 658 .irq_enable = pci_msi_unmask_irq,
666 .irq_disable = mask_msi_irq, 659 .irq_disable = pci_msi_mask_irq,
667 .irq_mask = mask_msi_irq, 660 .irq_mask = pci_msi_mask_irq,
668 .irq_unmask = unmask_msi_irq, 661 .irq_unmask = pci_msi_unmask_irq,
669}; 662};
670 663
671static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, 664static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 85f594e1708f..2ca10cc887ee 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -269,7 +269,8 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
269 .host_init = spear13xx_pcie_host_init, 269 .host_init = spear13xx_pcie_host_init,
270}; 270};
271 271
272static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) 272static int __init spear13xx_add_pcie_port(struct pcie_port *pp,
273 struct platform_device *pdev)
273{ 274{
274 struct device *dev = &pdev->dev; 275 struct device *dev = &pdev->dev;
275 int ret; 276 int ret;
@@ -308,10 +309,8 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
308 int ret; 309 int ret;
309 310
310 spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); 311 spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
311 if (!spear13xx_pcie) { 312 if (!spear13xx_pcie)
312 dev_err(dev, "no memory for SPEAr13xx pcie\n");
313 return -ENOMEM; 313 return -ENOMEM;
314 }
315 314
316 spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 315 spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
317 if (IS_ERR(spear13xx_pcie->phy)) { 316 if (IS_ERR(spear13xx_pcie->phy)) {
@@ -352,7 +351,7 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
352 if (of_property_read_bool(np, "st,pcie-is-gen1")) 351 if (of_property_read_bool(np, "st,pcie-is-gen1"))
353 spear13xx_pcie->is_gen1 = true; 352 spear13xx_pcie->is_gen1 = true;
354 353
355 ret = add_pcie_port(pp, pdev); 354 ret = spear13xx_add_pcie_port(pp, pdev);
356 if (ret < 0) 355 if (ret < 0)
357 goto fail_clk; 356 goto fail_clk;
358 357
@@ -382,11 +381,11 @@ static struct platform_driver spear13xx_pcie_driver __initdata = {
382 381
383/* SPEAr13xx PCIe driver does not allow module unload */ 382/* SPEAr13xx PCIe driver does not allow module unload */
384 383
385static int __init pcie_init(void) 384static int __init spear13xx_pcie_init(void)
386{ 385{
387 return platform_driver_register(&spear13xx_pcie_driver); 386 return platform_driver_register(&spear13xx_pcie_driver);
388} 387}
389module_init(pcie_init); 388module_init(spear13xx_pcie_init);
390 389
391MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver"); 390MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
392MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>"); 391MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index ccc496b33a97..2f50fa5953fd 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -335,7 +335,8 @@ static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
335 * @chip: MSI Chip descriptor 335 * @chip: MSI Chip descriptor
336 * @irq: MSI IRQ to destroy 336 * @irq: MSI IRQ to destroy
337 */ 337 */
338static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 338static void xilinx_msi_teardown_irq(struct msi_controller *chip,
339 unsigned int irq)
339{ 340{
340 xilinx_pcie_destroy_msi(irq); 341 xilinx_pcie_destroy_msi(irq);
341} 342}
@@ -348,7 +349,7 @@ static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
348 * 349 *
349 * Return: '0' on success and error value on failure 350 * Return: '0' on success and error value on failure
350 */ 351 */
351static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, 352static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
352 struct pci_dev *pdev, 353 struct pci_dev *pdev,
353 struct msi_desc *desc) 354 struct msi_desc *desc)
354{ 355{
@@ -374,13 +375,13 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
374 msg.address_lo = msg_addr; 375 msg.address_lo = msg_addr;
375 msg.data = irq; 376 msg.data = irq;
376 377
377 write_msi_msg(irq, &msg); 378 pci_write_msi_msg(irq, &msg);
378 379
379 return 0; 380 return 0;
380} 381}
381 382
382/* MSI Chip Descriptor */ 383/* MSI Chip Descriptor */
383static struct msi_chip xilinx_pcie_msi_chip = { 384static struct msi_controller xilinx_pcie_msi_chip = {
384 .setup_irq = xilinx_pcie_msi_setup_irq, 385 .setup_irq = xilinx_pcie_msi_setup_irq,
385 .teardown_irq = xilinx_msi_teardown_irq, 386 .teardown_irq = xilinx_msi_teardown_irq,
386}; 387};
@@ -388,10 +389,10 @@ static struct msi_chip xilinx_pcie_msi_chip = {
388/* HW Interrupt Chip Descriptor */ 389/* HW Interrupt Chip Descriptor */
389static struct irq_chip xilinx_msi_irq_chip = { 390static struct irq_chip xilinx_msi_irq_chip = {
390 .name = "Xilinx PCIe MSI", 391 .name = "Xilinx PCIe MSI",
391 .irq_enable = unmask_msi_irq, 392 .irq_enable = pci_msi_unmask_irq,
392 .irq_disable = mask_msi_irq, 393 .irq_disable = pci_msi_mask_irq,
393 .irq_mask = mask_msi_irq, 394 .irq_mask = pci_msi_mask_irq,
394 .irq_unmask = unmask_msi_irq, 395 .irq_unmask = pci_msi_unmask_irq,
395}; 396};
396 397
397/** 398/**
@@ -431,20 +432,6 @@ static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
431 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); 432 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
432} 433}
433 434
434/**
435 * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
436 * @bus: PCIe bus
437 */
438static void xilinx_pcie_add_bus(struct pci_bus *bus)
439{
440 if (IS_ENABLED(CONFIG_PCI_MSI)) {
441 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
442
443 xilinx_pcie_msi_chip.dev = port->dev;
444 bus->msi = &xilinx_pcie_msi_chip;
445 }
446}
447
448/* INTx Functions */ 435/* INTx Functions */
449 436
450/** 437/**
@@ -924,10 +911,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
924 .private_data = (void **)&port, 911 .private_data = (void **)&port,
925 .setup = xilinx_pcie_setup, 912 .setup = xilinx_pcie_setup,
926 .map_irq = of_irq_parse_and_map_pci, 913 .map_irq = of_irq_parse_and_map_pci,
927 .add_bus = xilinx_pcie_add_bus,
928 .scan = xilinx_pcie_scan_bus, 914 .scan = xilinx_pcie_scan_bus,
929 .ops = &xilinx_pcie_ops, 915 .ops = &xilinx_pcie_ops,
930 }; 916 };
917
918#ifdef CONFIG_PCI_MSI
919 xilinx_pcie_msi_chip.dev = port->dev;
920 hw.msi_ctrl = &xilinx_pcie_msi_chip;
921#endif
931 pci_common_init_dev(dev, &hw); 922 pci_common_init_dev(dev, &hw);
932 923
933 return 0; 924 return 0;
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index 219ba8090a37..f279060cf6e2 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -376,10 +376,7 @@ int __init ibmphp_rsrc_init (void)
376 if (rc) 376 if (rc)
377 return rc; 377 return rc;
378 } 378 }
379 rc = once_over (); /* This is to align ranges (so no -1) */ 379 return once_over (); /* This is to align ranges (so no -1) */
380 if (rc)
381 return rc;
382 return 0;
383} 380}
384 381
385/******************************************************************************** 382/********************************************************************************
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4d109c07294a..4b3a4eaad996 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -479,20 +479,16 @@ void pci_iov_release(struct pci_dev *dev)
479 * pci_iov_resource_bar - get position of the SR-IOV BAR 479 * pci_iov_resource_bar - get position of the SR-IOV BAR
480 * @dev: the PCI device 480 * @dev: the PCI device
481 * @resno: the resource number 481 * @resno: the resource number
482 * @type: the BAR type to be filled in
483 * 482 *
484 * Returns position of the BAR encapsulated in the SR-IOV capability. 483 * Returns position of the BAR encapsulated in the SR-IOV capability.
485 */ 484 */
486int pci_iov_resource_bar(struct pci_dev *dev, int resno, 485int pci_iov_resource_bar(struct pci_dev *dev, int resno)
487 enum pci_bar_type *type)
488{ 486{
489 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) 487 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
490 return 0; 488 return 0;
491 489
492 BUG_ON(!dev->is_physfn); 490 BUG_ON(!dev->is_physfn);
493 491
494 *type = pci_bar_unknown;
495
496 return dev->sriov->pos + PCI_SRIOV_BAR + 492 return dev->sriov->pos + PCI_SRIOV_BAR +
497 4 * (resno - PCI_IOV_RESOURCES); 493 4 * (resno - PCI_IOV_RESOURCES);
498} 494}
@@ -510,13 +506,12 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
510resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 506resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
511{ 507{
512 struct resource tmp; 508 struct resource tmp;
513 enum pci_bar_type type; 509 int reg = pci_iov_resource_bar(dev, resno);
514 int reg = pci_iov_resource_bar(dev, resno, &type);
515 510
516 if (!reg) 511 if (!reg)
517 return 0; 512 return 0;
518 513
519 __pci_read_base(dev, type, &tmp, reg); 514 __pci_read_base(dev, pci_bar_unknown, &tmp, reg);
520 return resource_alignment(&tmp); 515 return resource_alignment(&tmp);
521} 516}
522 517
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9fab30af0e75..fd60806d3fd0 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -19,19 +19,82 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/irqdomain.h>
22 23
23#include "pci.h" 24#include "pci.h"
24 25
25static int pci_msi_enable = 1; 26static int pci_msi_enable = 1;
27int pci_msi_ignore_mask;
26 28
27#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) 29#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
28 30
31#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
32static struct irq_domain *pci_msi_default_domain;
33static DEFINE_MUTEX(pci_msi_domain_lock);
34
35struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
36{
37 return pci_msi_default_domain;
38}
39
40static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
41{
42 struct irq_domain *domain = NULL;
43
44 if (dev->bus->msi)
45 domain = dev->bus->msi->domain;
46 if (!domain)
47 domain = arch_get_pci_msi_domain(dev);
48
49 return domain;
50}
51
52static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
53{
54 struct irq_domain *domain;
55
56 domain = pci_msi_get_domain(dev);
57 if (domain)
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
59
60 return arch_setup_msi_irqs(dev, nvec, type);
61}
62
63static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
64{
65 struct irq_domain *domain;
66
67 domain = pci_msi_get_domain(dev);
68 if (domain)
69 pci_msi_domain_free_irqs(domain, dev);
70 else
71 arch_teardown_msi_irqs(dev);
72}
73#else
74#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
75#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
76#endif
29 77
30/* Arch hooks */ 78/* Arch hooks */
31 79
80struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
81{
82 return NULL;
83}
84
85static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
86{
87 struct msi_controller *msi_ctrl = dev->bus->msi;
88
89 if (msi_ctrl)
90 return msi_ctrl;
91
92 return pcibios_msi_controller(dev);
93}
94
32int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) 95int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
33{ 96{
34 struct msi_chip *chip = dev->bus->msi; 97 struct msi_controller *chip = pci_msi_controller(dev);
35 int err; 98 int err;
36 99
37 if (!chip || !chip->setup_irq) 100 if (!chip || !chip->setup_irq)
@@ -48,7 +111,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
48 111
49void __weak arch_teardown_msi_irq(unsigned int irq) 112void __weak arch_teardown_msi_irq(unsigned int irq)
50{ 113{
51 struct msi_chip *chip = irq_get_chip_data(irq); 114 struct msi_controller *chip = irq_get_chip_data(irq);
52 115
53 if (!chip || !chip->teardown_irq) 116 if (!chip || !chip->teardown_irq)
54 return; 117 return;
@@ -85,19 +148,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
85 */ 148 */
86void default_teardown_msi_irqs(struct pci_dev *dev) 149void default_teardown_msi_irqs(struct pci_dev *dev)
87{ 150{
151 int i;
88 struct msi_desc *entry; 152 struct msi_desc *entry;
89 153
90 list_for_each_entry(entry, &dev->msi_list, list) { 154 list_for_each_entry(entry, &dev->msi_list, list)
91 int i, nvec; 155 if (entry->irq)
92 if (entry->irq == 0) 156 for (i = 0; i < entry->nvec_used; i++)
93 continue; 157 arch_teardown_msi_irq(entry->irq + i);
94 if (entry->nvec_used)
95 nvec = entry->nvec_used;
96 else
97 nvec = 1 << entry->msi_attrib.multiple;
98 for (i = 0; i < nvec; i++)
99 arch_teardown_msi_irq(entry->irq + i);
100 }
101} 158}
102 159
103void __weak arch_teardown_msi_irqs(struct pci_dev *dev) 160void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -120,7 +177,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
120 } 177 }
121 178
122 if (entry) 179 if (entry)
123 __write_msi_msg(entry, &entry->msg); 180 __pci_write_msi_msg(entry, &entry->msg);
124} 181}
125 182
126void __weak arch_restore_msi_irqs(struct pci_dev *dev) 183void __weak arch_restore_msi_irqs(struct pci_dev *dev)
@@ -163,11 +220,11 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
163 * reliably as devices without an INTx disable bit will then generate a 220 * reliably as devices without an INTx disable bit will then generate a
164 * level IRQ which will never be cleared. 221 * level IRQ which will never be cleared.
165 */ 222 */
166u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 223u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
167{ 224{
168 u32 mask_bits = desc->masked; 225 u32 mask_bits = desc->masked;
169 226
170 if (!desc->msi_attrib.maskbit) 227 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
171 return 0; 228 return 0;
172 229
173 mask_bits &= ~mask; 230 mask_bits &= ~mask;
@@ -177,14 +234,9 @@ u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
177 return mask_bits; 234 return mask_bits;
178} 235}
179 236
180__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
181{
182 return default_msi_mask_irq(desc, mask, flag);
183}
184
185static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 237static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
186{ 238{
187 desc->masked = arch_msi_mask_irq(desc, mask, flag); 239 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
188} 240}
189 241
190/* 242/*
@@ -194,11 +246,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
194 * file. This saves a few milliseconds when initialising devices with lots 246 * file. This saves a few milliseconds when initialising devices with lots
195 * of MSI-X interrupts. 247 * of MSI-X interrupts.
196 */ 248 */
197u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) 249u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
198{ 250{
199 u32 mask_bits = desc->masked; 251 u32 mask_bits = desc->masked;
200 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 252 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
201 PCI_MSIX_ENTRY_VECTOR_CTRL; 253 PCI_MSIX_ENTRY_VECTOR_CTRL;
254
255 if (pci_msi_ignore_mask)
256 return 0;
257
202 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 258 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
203 if (flag) 259 if (flag)
204 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 260 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
@@ -207,14 +263,9 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag)
207 return mask_bits; 263 return mask_bits;
208} 264}
209 265
210__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
211{
212 return default_msix_mask_irq(desc, flag);
213}
214
215static void msix_mask_irq(struct msi_desc *desc, u32 flag) 266static void msix_mask_irq(struct msi_desc *desc, u32 flag)
216{ 267{
217 desc->masked = arch_msix_mask_irq(desc, flag); 268 desc->masked = __pci_msix_desc_mask_irq(desc, flag);
218} 269}
219 270
220static void msi_set_mask_bit(struct irq_data *data, u32 flag) 271static void msi_set_mask_bit(struct irq_data *data, u32 flag)
@@ -230,12 +281,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
230 } 281 }
231} 282}
232 283
233void mask_msi_irq(struct irq_data *data) 284/**
285 * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts
286 * @data: pointer to irqdata associated to that interrupt
287 */
288void pci_msi_mask_irq(struct irq_data *data)
234{ 289{
235 msi_set_mask_bit(data, 1); 290 msi_set_mask_bit(data, 1);
236} 291}
237 292
238void unmask_msi_irq(struct irq_data *data) 293/**
294 * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts
295 * @data: pointer to irqdata associated to that interrupt
296 */
297void pci_msi_unmask_irq(struct irq_data *data)
239{ 298{
240 msi_set_mask_bit(data, 0); 299 msi_set_mask_bit(data, 0);
241} 300}
@@ -244,12 +303,11 @@ void default_restore_msi_irqs(struct pci_dev *dev)
244{ 303{
245 struct msi_desc *entry; 304 struct msi_desc *entry;
246 305
247 list_for_each_entry(entry, &dev->msi_list, list) { 306 list_for_each_entry(entry, &dev->msi_list, list)
248 default_restore_msi_irq(dev, entry->irq); 307 default_restore_msi_irq(dev, entry->irq);
249 }
250} 308}
251 309
252void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 310void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
253{ 311{
254 BUG_ON(entry->dev->current_state != PCI_D0); 312 BUG_ON(entry->dev->current_state != PCI_D0);
255 313
@@ -279,32 +337,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
279 } 337 }
280} 338}
281 339
282void read_msi_msg(unsigned int irq, struct msi_msg *msg) 340void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
283{
284 struct msi_desc *entry = irq_get_msi_desc(irq);
285
286 __read_msi_msg(entry, msg);
287}
288
289void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
290{
291 /* Assert that the cache is valid, assuming that
292 * valid messages are not all-zeroes. */
293 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
294 entry->msg.data));
295
296 *msg = entry->msg;
297}
298
299void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
300{
301 struct msi_desc *entry = irq_get_msi_desc(irq);
302
303 __get_cached_msi_msg(entry, msg);
304}
305EXPORT_SYMBOL_GPL(get_cached_msi_msg);
306
307void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
308{ 341{
309 if (entry->dev->current_state != PCI_D0) { 342 if (entry->dev->current_state != PCI_D0) {
310 /* Don't touch the hardware now */ 343 /* Don't touch the hardware now */
@@ -341,34 +374,27 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
341 entry->msg = *msg; 374 entry->msg = *msg;
342} 375}
343 376
344void write_msi_msg(unsigned int irq, struct msi_msg *msg) 377void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
345{ 378{
346 struct msi_desc *entry = irq_get_msi_desc(irq); 379 struct msi_desc *entry = irq_get_msi_desc(irq);
347 380
348 __write_msi_msg(entry, msg); 381 __pci_write_msi_msg(entry, msg);
349} 382}
350EXPORT_SYMBOL_GPL(write_msi_msg); 383EXPORT_SYMBOL_GPL(pci_write_msi_msg);
351 384
352static void free_msi_irqs(struct pci_dev *dev) 385static void free_msi_irqs(struct pci_dev *dev)
353{ 386{
354 struct msi_desc *entry, *tmp; 387 struct msi_desc *entry, *tmp;
355 struct attribute **msi_attrs; 388 struct attribute **msi_attrs;
356 struct device_attribute *dev_attr; 389 struct device_attribute *dev_attr;
357 int count = 0; 390 int i, count = 0;
358 391
359 list_for_each_entry(entry, &dev->msi_list, list) { 392 list_for_each_entry(entry, &dev->msi_list, list)
360 int i, nvec; 393 if (entry->irq)
361 if (!entry->irq) 394 for (i = 0; i < entry->nvec_used; i++)
362 continue; 395 BUG_ON(irq_has_action(entry->irq + i));
363 if (entry->nvec_used)
364 nvec = entry->nvec_used;
365 else
366 nvec = 1 << entry->msi_attrib.multiple;
367 for (i = 0; i < nvec; i++)
368 BUG_ON(irq_has_action(entry->irq + i));
369 }
370 396
371 arch_teardown_msi_irqs(dev); 397 pci_msi_teardown_msi_irqs(dev);
372 398
373 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 399 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
374 if (entry->msi_attrib.is_msix) { 400 if (entry->msi_attrib.is_msix) {
@@ -451,9 +477,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
451 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 477 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
452 478
453 arch_restore_msi_irqs(dev); 479 arch_restore_msi_irqs(dev);
454 list_for_each_entry(entry, &dev->msi_list, list) { 480 list_for_each_entry(entry, &dev->msi_list, list)
455 msix_mask_irq(entry, entry->masked); 481 msix_mask_irq(entry, entry->masked);
456 }
457 482
458 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 483 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
459} 484}
@@ -497,9 +522,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
497 int count = 0; 522 int count = 0;
498 523
499 /* Determine how many msi entries we have */ 524 /* Determine how many msi entries we have */
500 list_for_each_entry(entry, &pdev->msi_list, list) { 525 list_for_each_entry(entry, &pdev->msi_list, list)
501 ++num_msi; 526 ++num_msi;
502 }
503 if (!num_msi) 527 if (!num_msi)
504 return 0; 528 return 0;
505 529
@@ -559,7 +583,7 @@ error_attrs:
559 return ret; 583 return ret;
560} 584}
561 585
562static struct msi_desc *msi_setup_entry(struct pci_dev *dev) 586static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
563{ 587{
564 u16 control; 588 u16 control;
565 struct msi_desc *entry; 589 struct msi_desc *entry;
@@ -577,6 +601,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
577 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); 601 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
578 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 602 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
579 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 603 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
604 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
605 entry->nvec_used = nvec;
580 606
581 if (control & PCI_MSI_FLAGS_64BIT) 607 if (control & PCI_MSI_FLAGS_64BIT)
582 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; 608 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -590,6 +616,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
590 return entry; 616 return entry;
591} 617}
592 618
619static int msi_verify_entries(struct pci_dev *dev)
620{
621 struct msi_desc *entry;
622
623 list_for_each_entry(entry, &dev->msi_list, list) {
624 if (!dev->no_64bit_msi || !entry->msg.address_hi)
625 continue;
626 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
627 " tried to assign one above 4G\n");
628 return -EIO;
629 }
630 return 0;
631}
632
593/** 633/**
594 * msi_capability_init - configure device's MSI capability structure 634 * msi_capability_init - configure device's MSI capability structure
595 * @dev: pointer to the pci_dev data structure of MSI device function 635 * @dev: pointer to the pci_dev data structure of MSI device function
@@ -609,7 +649,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
609 649
610 msi_set_enable(dev, 0); /* Disable MSI during set up */ 650 msi_set_enable(dev, 0); /* Disable MSI during set up */
611 651
612 entry = msi_setup_entry(dev); 652 entry = msi_setup_entry(dev, nvec);
613 if (!entry) 653 if (!entry)
614 return -ENOMEM; 654 return -ENOMEM;
615 655
@@ -620,7 +660,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
620 list_add_tail(&entry->list, &dev->msi_list); 660 list_add_tail(&entry->list, &dev->msi_list);
621 661
622 /* Configure MSI capability structure */ 662 /* Configure MSI capability structure */
623 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 663 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
664 if (ret) {
665 msi_mask_irq(entry, mask, ~mask);
666 free_msi_irqs(dev);
667 return ret;
668 }
669
670 ret = msi_verify_entries(dev);
624 if (ret) { 671 if (ret) {
625 msi_mask_irq(entry, mask, ~mask); 672 msi_mask_irq(entry, mask, ~mask);
626 free_msi_irqs(dev); 673 free_msi_irqs(dev);
@@ -680,6 +727,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
680 entry->msi_attrib.entry_nr = entries[i].entry; 727 entry->msi_attrib.entry_nr = entries[i].entry;
681 entry->msi_attrib.default_irq = dev->irq; 728 entry->msi_attrib.default_irq = dev->irq;
682 entry->mask_base = base; 729 entry->mask_base = base;
730 entry->nvec_used = 1;
683 731
684 list_add_tail(&entry->list, &dev->msi_list); 732 list_add_tail(&entry->list, &dev->msi_list);
685 } 733 }
@@ -698,7 +746,6 @@ static void msix_program_entries(struct pci_dev *dev,
698 PCI_MSIX_ENTRY_VECTOR_CTRL; 746 PCI_MSIX_ENTRY_VECTOR_CTRL;
699 747
700 entries[i].vector = entry->irq; 748 entries[i].vector = entry->irq;
701 irq_set_msi_desc(entry->irq, entry);
702 entry->masked = readl(entry->mask_base + offset); 749 entry->masked = readl(entry->mask_base + offset);
703 msix_mask_irq(entry, 1); 750 msix_mask_irq(entry, 1);
704 i++; 751 i++;
@@ -735,10 +782,15 @@ static int msix_capability_init(struct pci_dev *dev,
735 if (ret) 782 if (ret)
736 return ret; 783 return ret;
737 784
738 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 785 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
739 if (ret) 786 if (ret)
740 goto out_avail; 787 goto out_avail;
741 788
789 /* Check if all MSI entries honor device restrictions */
790 ret = msi_verify_entries(dev);
791 if (ret)
792 goto out_free;
793
742 /* 794 /*
743 * Some devices require MSI-X to be enabled before we can touch the 795 * Some devices require MSI-X to be enabled before we can touch the
744 * MSI-X registers. We need to mask all the vectors to prevent 796 * MSI-X registers. We need to mask all the vectors to prevent
@@ -869,7 +921,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
869 /* Return the device with MSI unmasked as initial states */ 921 /* Return the device with MSI unmasked as initial states */
870 mask = msi_mask(desc->msi_attrib.multi_cap); 922 mask = msi_mask(desc->msi_attrib.multi_cap);
871 /* Keep cached state to be restored */ 923 /* Keep cached state to be restored */
872 arch_msi_mask_irq(desc, mask, ~mask); 924 __pci_msi_desc_mask_irq(desc, mask, ~mask);
873 925
874 /* Restore dev->irq to its default pin-assertion irq */ 926 /* Restore dev->irq to its default pin-assertion irq */
875 dev->irq = desc->msi_attrib.default_irq; 927 dev->irq = desc->msi_attrib.default_irq;
@@ -967,7 +1019,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
967 /* Return the device with MSI-X masked as initial states */ 1019 /* Return the device with MSI-X masked as initial states */
968 list_for_each_entry(entry, &dev->msi_list, list) { 1020 list_for_each_entry(entry, &dev->msi_list, list) {
969 /* Keep cached states to be restored */ 1021 /* Keep cached states to be restored */
970 arch_msix_mask_irq(entry, 1); 1022 __pci_msix_desc_mask_irq(entry, 1);
971 } 1023 }
972 1024
973 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1025 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
@@ -1112,3 +1164,197 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1112 return nvec; 1164 return nvec;
1113} 1165}
1114EXPORT_SYMBOL(pci_enable_msix_range); 1166EXPORT_SYMBOL(pci_enable_msix_range);
1167
1168#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
1169/**
1170 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
1171 * @irq_data: Pointer to interrupt data of the MSI interrupt
1172 * @msg: Pointer to the message
1173 */
1174void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
1175{
1176 struct msi_desc *desc = irq_data->msi_desc;
1177
1178 /*
1179 * For MSI-X desc->irq is always equal to irq_data->irq. For
1180 * MSI only the first interrupt of MULTI MSI passes the test.
1181 */
1182 if (desc->irq == irq_data->irq)
1183 __pci_write_msi_msg(desc, msg);
1184}
1185
1186/**
1187 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
1188 * @dev: Pointer to the PCI device
1189 * @desc: Pointer to the msi descriptor
1190 *
1191 * The ID number is only used within the irqdomain.
1192 */
1193irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
1194 struct msi_desc *desc)
1195{
1196 return (irq_hw_number_t)desc->msi_attrib.entry_nr |
1197 PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
1198 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
1199}
1200
1201static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
1202{
1203 return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
1204}
1205
1206/**
1207 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
1208 * @domain: The interrupt domain to check
1209 * @info: The domain info for verification
1210 * @dev: The device to check
1211 *
1212 * Returns:
1213 * 0 if the functionality is supported
1214 * 1 if Multi MSI is requested, but the domain does not support it
1215 * -ENOTSUPP otherwise
1216 */
1217int pci_msi_domain_check_cap(struct irq_domain *domain,
1218 struct msi_domain_info *info, struct device *dev)
1219{
1220 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1221
1222 /* Special handling to support pci_enable_msi_range() */
1223 if (pci_msi_desc_is_multi_msi(desc) &&
1224 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1225 return 1;
1226 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
1227 return -ENOTSUPP;
1228
1229 return 0;
1230}
1231
1232static int pci_msi_domain_handle_error(struct irq_domain *domain,
1233 struct msi_desc *desc, int error)
1234{
1235 /* Special handling to support pci_enable_msi_range() */
1236 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1237 return 1;
1238
1239 return error;
1240}
1241
1242#ifdef GENERIC_MSI_DOMAIN_OPS
1243static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
1244 struct msi_desc *desc)
1245{
1246 arg->desc = desc;
1247 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
1248 desc);
1249}
1250#else
1251#define pci_msi_domain_set_desc NULL
1252#endif
1253
1254static struct msi_domain_ops pci_msi_domain_ops_default = {
1255 .set_desc = pci_msi_domain_set_desc,
1256 .msi_check = pci_msi_domain_check_cap,
1257 .handle_error = pci_msi_domain_handle_error,
1258};
1259
1260static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
1261{
1262 struct msi_domain_ops *ops = info->ops;
1263
1264 if (ops == NULL) {
1265 info->ops = &pci_msi_domain_ops_default;
1266 } else {
1267 if (ops->set_desc == NULL)
1268 ops->set_desc = pci_msi_domain_set_desc;
1269 if (ops->msi_check == NULL)
1270 ops->msi_check = pci_msi_domain_check_cap;
1271 if (ops->handle_error == NULL)
1272 ops->handle_error = pci_msi_domain_handle_error;
1273 }
1274}
1275
1276static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1277{
1278 struct irq_chip *chip = info->chip;
1279
1280 BUG_ON(!chip);
1281 if (!chip->irq_write_msi_msg)
1282 chip->irq_write_msi_msg = pci_msi_domain_write_msg;
1283}
1284
1285/**
1286 * pci_msi_create_irq_domain - Creat a MSI interrupt domain
1287 * @node: Optional device-tree node of the interrupt controller
1288 * @info: MSI domain info
1289 * @parent: Parent irq domain
1290 *
1291 * Updates the domain and chip ops and creates a MSI interrupt domain.
1292 *
1293 * Returns:
1294 * A domain pointer or NULL in case of failure.
1295 */
1296struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
1297 struct msi_domain_info *info,
1298 struct irq_domain *parent)
1299{
1300 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
1301 pci_msi_domain_update_dom_ops(info);
1302 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1303 pci_msi_domain_update_chip_ops(info);
1304
1305 return msi_create_irq_domain(node, info, parent);
1306}
1307
1308/**
1309 * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
1310 * @domain: The interrupt domain to allocate from
1311 * @dev: The device for which to allocate
1312 * @nvec: The number of interrupts to allocate
1313 * @type: Unused to allow simpler migration from the arch_XXX interfaces
1314 *
1315 * Returns:
1316 * A virtual interrupt number or an error code in case of failure
1317 */
1318int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
1319 int nvec, int type)
1320{
1321 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
1322}
1323
1324/**
1325 * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
1326 * @domain: The interrupt domain
1327 * @dev: The device for which to free interrupts
1328 */
1329void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
1330{
1331 msi_domain_free_irqs(domain, &dev->dev);
1332}
1333
1334/**
1335 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
1336 * @node: Optional device-tree node of the interrupt controller
1337 * @info: MSI domain info
1338 * @parent: Parent irq domain
1339 *
1340 * Returns: A domain pointer or NULL in case of failure. If successful
1341 * the default PCI/MSI irqdomain pointer is updated.
1342 */
1343struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
1344 struct msi_domain_info *info, struct irq_domain *parent)
1345{
1346 struct irq_domain *domain;
1347
1348 mutex_lock(&pci_msi_domain_lock);
1349 if (pci_msi_default_domain) {
1350 pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
1351 domain = NULL;
1352 } else {
1353 domain = pci_msi_create_irq_domain(node, info, parent);
1354 pci_msi_default_domain = domain;
1355 }
1356 mutex_unlock(&pci_msi_domain_lock);
1357
1358 return domain;
1359}
1360#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 6ebf8edc5f3c..3542150fc8a3 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -322,8 +322,7 @@ static void pci_acpi_wake_dev(struct work_struct *work)
322 pci_wakeup_event(pci_dev); 322 pci_wakeup_event(pci_dev);
323 pm_runtime_resume(&pci_dev->dev); 323 pm_runtime_resume(&pci_dev->dev);
324 324
325 if (pci_dev->subordinate) 325 pci_pme_wakeup_bus(pci_dev->subordinate);
326 pci_pme_wakeup_bus(pci_dev->subordinate);
327} 326}
328 327
329/** 328/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 2b3c89425bb5..887e6bd95af7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1104,7 +1104,7 @@ static int pci_pm_restore(struct device *dev)
1104 1104
1105#endif /* !CONFIG_HIBERNATE_CALLBACKS */ 1105#endif /* !CONFIG_HIBERNATE_CALLBACKS */
1106 1106
1107#ifdef CONFIG_PM_RUNTIME 1107#ifdef CONFIG_PM
1108 1108
1109static int pci_pm_runtime_suspend(struct device *dev) 1109static int pci_pm_runtime_suspend(struct device *dev)
1110{ 1110{
@@ -1200,16 +1200,6 @@ static int pci_pm_runtime_idle(struct device *dev)
1200 return ret; 1200 return ret;
1201} 1201}
1202 1202
1203#else /* !CONFIG_PM_RUNTIME */
1204
1205#define pci_pm_runtime_suspend NULL
1206#define pci_pm_runtime_resume NULL
1207#define pci_pm_runtime_idle NULL
1208
1209#endif /* !CONFIG_PM_RUNTIME */
1210
1211#ifdef CONFIG_PM
1212
1213static const struct dev_pm_ops pci_dev_pm_ops = { 1203static const struct dev_pm_ops pci_dev_pm_ops = {
1214 .prepare = pci_pm_prepare, 1204 .prepare = pci_pm_prepare,
1215 .suspend = pci_pm_suspend, 1205 .suspend = pci_pm_suspend,
@@ -1231,11 +1221,15 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
1231 1221
1232#define PCI_PM_OPS_PTR (&pci_dev_pm_ops) 1222#define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
1233 1223
1234#else /* !COMFIG_PM_OPS */ 1224#else /* !CONFIG_PM */
1225
1226#define pci_pm_runtime_suspend NULL
1227#define pci_pm_runtime_resume NULL
1228#define pci_pm_runtime_idle NULL
1235 1229
1236#define PCI_PM_OPS_PTR NULL 1230#define PCI_PM_OPS_PTR NULL
1237 1231
1238#endif /* !COMFIG_PM_OPS */ 1232#endif /* !CONFIG_PM */
1239 1233
1240/** 1234/**
1241 * __pci_register_driver - register a new pci driver 1235 * __pci_register_driver - register a new pci driver
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 2c6643fdc0cf..a62acc443d5b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -221,12 +221,37 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
221static DEVICE_ATTR_RW(enable); 221static DEVICE_ATTR_RW(enable);
222 222
223#ifdef CONFIG_NUMA 223#ifdef CONFIG_NUMA
224static ssize_t numa_node_store(struct device *dev,
225 struct device_attribute *attr, const char *buf,
226 size_t count)
227{
228 struct pci_dev *pdev = to_pci_dev(dev);
229 int node, ret;
230
231 if (!capable(CAP_SYS_ADMIN))
232 return -EPERM;
233
234 ret = kstrtoint(buf, 0, &node);
235 if (ret)
236 return ret;
237
238 if (!node_online(node))
239 return -EINVAL;
240
241 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
242 dev_alert(&pdev->dev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
243 node);
244
245 dev->numa_node = node;
246 return count;
247}
248
224static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 249static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
225 char *buf) 250 char *buf)
226{ 251{
227 return sprintf(buf, "%d\n", dev->numa_node); 252 return sprintf(buf, "%d\n", dev->numa_node);
228} 253}
229static DEVICE_ATTR_RO(numa_node); 254static DEVICE_ATTR_RW(numa_node);
230#endif 255#endif
231 256
232static ssize_t dma_mask_bits_show(struct device *dev, 257static ssize_t dma_mask_bits_show(struct device *dev,
@@ -385,7 +410,7 @@ static ssize_t dev_bus_rescan_store(struct device *dev,
385} 410}
386static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 411static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
387 412
388#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 413#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
389static ssize_t d3cold_allowed_store(struct device *dev, 414static ssize_t d3cold_allowed_store(struct device *dev,
390 struct device_attribute *attr, 415 struct device_attribute *attr,
391 const char *buf, size_t count) 416 const char *buf, size_t count)
@@ -566,7 +591,7 @@ static struct attribute *pci_dev_attrs[] = {
566 &dev_attr_enable.attr, 591 &dev_attr_enable.attr,
567 &dev_attr_broken_parity_status.attr, 592 &dev_attr_broken_parity_status.attr,
568 &dev_attr_msi_bus.attr, 593 &dev_attr_msi_bus.attr,
569#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 594#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
570 &dev_attr_d3cold_allowed.attr, 595 &dev_attr_d3cold_allowed.attr,
571#endif 596#endif
572#ifdef CONFIG_OF 597#ifdef CONFIG_OF
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 625a4ace10b4..a7ac72639c52 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1012,11 +1012,7 @@ int pci_save_state(struct pci_dev *dev)
1012 if (i != 0) 1012 if (i != 0)
1013 return i; 1013 return i;
1014 1014
1015 i = pci_save_vc_state(dev); 1015 return pci_save_vc_state(dev);
1016 if (i != 0)
1017 return i;
1018
1019 return 0;
1020} 1016}
1021EXPORT_SYMBOL(pci_save_state); 1017EXPORT_SYMBOL(pci_save_state);
1022 1018
@@ -3144,12 +3140,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3144 return 0; 3140 return 0;
3145 3141
3146 if (!pci_wait_for_pending_transaction(dev)) 3142 if (!pci_wait_for_pending_transaction(dev))
3147 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 3143 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3148 3144
3149 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3145 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3150
3151 msleep(100); 3146 msleep(100);
3152
3153 return 0; 3147 return 0;
3154} 3148}
3155 3149
@@ -3174,16 +3168,12 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3174 * is used, so we use the conrol offset rather than status and shift 3168 * is used, so we use the conrol offset rather than status and shift
3175 * the test bit to match. 3169 * the test bit to match.
3176 */ 3170 */
3177 if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL, 3171 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3178 PCI_AF_STATUS_TP << 8)) 3172 PCI_AF_STATUS_TP << 8))
3179 goto clear; 3173 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3180
3181 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3182 3174
3183clear:
3184 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 3175 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3185 msleep(100); 3176 msleep(100);
3186
3187 return 0; 3177 return 0;
3188} 3178}
3189 3179
@@ -4180,7 +4170,8 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4180 return dev->rom_base_reg; 4170 return dev->rom_base_reg;
4181 } else if (resno < PCI_BRIDGE_RESOURCES) { 4171 } else if (resno < PCI_BRIDGE_RESOURCES) {
4182 /* device specific resource */ 4172 /* device specific resource */
4183 reg = pci_iov_resource_bar(dev, resno, type); 4173 *type = pci_bar_unknown;
4174 reg = pci_iov_resource_bar(dev, resno);
4184 if (reg) 4175 if (reg)
4185 return reg; 4176 return reg;
4186 } 4177 }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4a3902d8e6fe..8aff29a804ff 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -253,8 +253,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
253#ifdef CONFIG_PCI_IOV 253#ifdef CONFIG_PCI_IOV
254int pci_iov_init(struct pci_dev *dev); 254int pci_iov_init(struct pci_dev *dev);
255void pci_iov_release(struct pci_dev *dev); 255void pci_iov_release(struct pci_dev *dev);
256int pci_iov_resource_bar(struct pci_dev *dev, int resno, 256int pci_iov_resource_bar(struct pci_dev *dev, int resno);
257 enum pci_bar_type *type);
258resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 257resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
259void pci_restore_iov_state(struct pci_dev *dev); 258void pci_restore_iov_state(struct pci_dev *dev);
260int pci_iov_bus_range(struct pci_bus *bus); 259int pci_iov_bus_range(struct pci_bus *bus);
@@ -268,8 +267,7 @@ static inline void pci_iov_release(struct pci_dev *dev)
268 267
269{ 268{
270} 269}
271static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno, 270static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
272 enum pci_bar_type *type)
273{ 271{
274 return 0; 272 return 0;
275} 273}
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7958e59d6077..e294713c8143 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -79,4 +79,4 @@ endchoice
79 79
80config PCIE_PME 80config PCIE_PME
81 def_bool y 81 def_bool y
82 depends on PCIEPORTBUS && PM_RUNTIME 82 depends on PCIEPORTBUS && PM
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c8ca98c2b480..23212f8ae09b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -87,8 +87,7 @@ static void release_pcibus_dev(struct device *dev)
87{ 87{
88 struct pci_bus *pci_bus = to_pci_bus(dev); 88 struct pci_bus *pci_bus = to_pci_bus(dev);
89 89
90 if (pci_bus->bridge) 90 put_device(pci_bus->bridge);
91 put_device(pci_bus->bridge);
92 pci_bus_remove_resources(pci_bus); 91 pci_bus_remove_resources(pci_bus);
93 pci_release_bus_of_node(pci_bus); 92 pci_release_bus_of_node(pci_bus);
94 kfree(pci_bus); 93 kfree(pci_bus);
@@ -175,7 +174,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
175 u64 l64, sz64, mask64; 174 u64 l64, sz64, mask64;
176 u16 orig_cmd; 175 u16 orig_cmd;
177 struct pci_bus_region region, inverted_region; 176 struct pci_bus_region region, inverted_region;
178 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
179 177
180 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 178 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
181 179
@@ -201,8 +199,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
201 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 199 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
202 * 1 must be clear. 200 * 1 must be clear.
203 */ 201 */
204 if (!sz || sz == 0xffffffff) 202 if (sz == 0xffffffff)
205 goto fail; 203 sz = 0;
206 204
207 /* 205 /*
208 * I don't know how l can have all bits set. Copied from old code. 206 * I don't know how l can have all bits set. Copied from old code.
@@ -215,23 +213,22 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
215 res->flags = decode_bar(dev, l); 213 res->flags = decode_bar(dev, l);
216 res->flags |= IORESOURCE_SIZEALIGN; 214 res->flags |= IORESOURCE_SIZEALIGN;
217 if (res->flags & IORESOURCE_IO) { 215 if (res->flags & IORESOURCE_IO) {
218 l &= PCI_BASE_ADDRESS_IO_MASK; 216 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
219 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 217 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
218 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
220 } else { 219 } else {
221 l &= PCI_BASE_ADDRESS_MEM_MASK; 220 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
222 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 221 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
222 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
223 } 223 }
224 } else { 224 } else {
225 res->flags |= (l & IORESOURCE_ROM_ENABLE); 225 res->flags |= (l & IORESOURCE_ROM_ENABLE);
226 l &= PCI_ROM_ADDRESS_MASK; 226 l64 = l & PCI_ROM_ADDRESS_MASK;
227 mask = (u32)PCI_ROM_ADDRESS_MASK; 227 sz64 = sz & PCI_ROM_ADDRESS_MASK;
228 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
228 } 229 }
229 230
230 if (res->flags & IORESOURCE_MEM_64) { 231 if (res->flags & IORESOURCE_MEM_64) {
231 l64 = l;
232 sz64 = sz;
233 mask64 = mask | (u64)~0 << 32;
234
235 pci_read_config_dword(dev, pos + 4, &l); 232 pci_read_config_dword(dev, pos + 4, &l);
236 pci_write_config_dword(dev, pos + 4, ~0); 233 pci_write_config_dword(dev, pos + 4, ~0);
237 pci_read_config_dword(dev, pos + 4, &sz); 234 pci_read_config_dword(dev, pos + 4, &sz);
@@ -239,18 +236,30 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
239 236
240 l64 |= ((u64)l << 32); 237 l64 |= ((u64)l << 32);
241 sz64 |= ((u64)sz << 32); 238 sz64 |= ((u64)sz << 32);
239 mask64 |= ((u64)~0 << 32);
240 }
242 241
243 sz64 = pci_size(l64, sz64, mask64); 242 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
243 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
244 244
245 if (!sz64) 245 if (!sz64)
246 goto fail; 246 goto fail;
247 247
248 sz64 = pci_size(l64, sz64, mask64);
249 if (!sz64) {
250 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
251 pos);
252 goto fail;
253 }
254
255 if (res->flags & IORESOURCE_MEM_64) {
248 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && 256 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
249 sz64 > 0x100000000ULL) { 257 sz64 > 0x100000000ULL) {
250 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 258 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
251 res->start = 0; 259 res->start = 0;
252 res->end = 0; 260 res->end = 0;
253 bar_too_big = true; 261 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
262 pos, (unsigned long long)sz64);
254 goto out; 263 goto out;
255 } 264 }
256 265
@@ -259,22 +268,15 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
259 res->flags |= IORESOURCE_UNSET; 268 res->flags |= IORESOURCE_UNSET;
260 res->start = 0; 269 res->start = 0;
261 res->end = sz64; 270 res->end = sz64;
262 bar_too_high = true; 271 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
272 pos, (unsigned long long)l64);
263 goto out; 273 goto out;
264 } else {
265 region.start = l64;
266 region.end = l64 + sz64;
267 } 274 }
268 } else {
269 sz = pci_size(l, sz, mask);
270
271 if (!sz)
272 goto fail;
273
274 region.start = l;
275 region.end = l + sz;
276 } 275 }
277 276
277 region.start = l64;
278 region.end = l64 + sz64;
279
278 pcibios_bus_to_resource(dev->bus, res, &region); 280 pcibios_bus_to_resource(dev->bus, res, &region);
279 pcibios_resource_to_bus(dev->bus, &inverted_region, res); 281 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
280 282
@@ -293,7 +295,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
293 res->flags |= IORESOURCE_UNSET; 295 res->flags |= IORESOURCE_UNSET;
294 res->start = 0; 296 res->start = 0;
295 res->end = region.end - region.start; 297 res->end = region.end - region.start;
296 bar_invalid = true; 298 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
299 pos, (unsigned long long)region.start);
297 } 300 }
298 301
299 goto out; 302 goto out;
@@ -302,19 +305,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
302fail: 305fail:
303 res->flags = 0; 306 res->flags = 0;
304out: 307out:
305 if (!dev->mmio_always_on &&
306 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
307 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
308
309 if (bar_too_big)
310 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
311 pos, (unsigned long long) sz64);
312 if (bar_too_high)
313 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
314 pos, (unsigned long long) l64);
315 if (bar_invalid)
316 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
317 pos, (unsigned long long) region.start);
318 if (res->flags) 308 if (res->flags)
319 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); 309 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
320 310
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index a81f413083e4..a20ce7d5e2a7 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -271,8 +271,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
271 match_pci_dev_by_id); 271 match_pci_dev_by_id);
272 if (dev) 272 if (dev)
273 pdev = to_pci_dev(dev); 273 pdev = to_pci_dev(dev);
274 if (from) 274 pci_dev_put(from);
275 pci_dev_put(from);
276 return pdev; 275 return pdev;
277} 276}
278 277
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 116ca3746adb..b1ffebec9b9e 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -596,8 +596,7 @@ static pci_ers_result_t pcifront_common_process(int cmd,
596 pcidev = pci_get_bus_and_slot(bus, devfn); 596 pcidev = pci_get_bus_and_slot(bus, devfn);
597 if (!pcidev || !pcidev->driver) { 597 if (!pcidev || !pcidev->driver) {
598 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); 598 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
599 if (pcidev) 599 pci_dev_put(pcidev);
600 pci_dev_put(pcidev);
601 return result; 600 return result;
602 } 601 }
603 pdrv = pcidev->driver; 602 pdrv = pcidev->driver;
@@ -866,6 +865,11 @@ static int pcifront_try_connect(struct pcifront_device *pdev)
866 xenbus_dev_error(pdev->xdev, err, 865 xenbus_dev_error(pdev->xdev, err,
867 "No PCI Roots found, trying 0000:00"); 866 "No PCI Roots found, trying 0000:00");
868 err = pcifront_scan_root(pdev, 0, 0); 867 err = pcifront_scan_root(pdev, 0, 0);
868 if (err) {
869 xenbus_dev_fatal(pdev->xdev, err,
870 "Error scanning PCI root 0000:00");
871 goto out;
872 }
869 num_roots = 0; 873 num_roots = 0;
870 } else if (err != 1) { 874 } else if (err != 1) {
871 if (err == 0) 875 if (err == 0)
@@ -947,6 +951,11 @@ static int pcifront_attach_devices(struct pcifront_device *pdev)
947 xenbus_dev_error(pdev->xdev, err, 951 xenbus_dev_error(pdev->xdev, err,
948 "No PCI Roots found, trying 0000:00"); 952 "No PCI Roots found, trying 0000:00");
949 err = pcifront_rescan_root(pdev, 0, 0); 953 err = pcifront_rescan_root(pdev, 0, 0);
954 if (err) {
955 xenbus_dev_fatal(pdev->xdev, err,
956 "Error scanning PCI root 0000:00");
957 goto out;
958 }
950 num_roots = 0; 959 num_roots = 0;
951 } else if (err != 1) { 960 } else if (err != 1) {
952 if (err == 0) 961 if (err == 0)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c6a66de6ed72..d014f22f387a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -67,18 +67,6 @@ config PINCTRL_AT91
67 help 67 help
68 Say Y here to enable the at91 pinctrl driver 68 Say Y here to enable the at91 pinctrl driver
69 69
70config PINCTRL_BAYTRAIL
71 bool "Intel Baytrail GPIO pin control"
72 depends on GPIOLIB && ACPI && X86
73 select GPIOLIB_IRQCHIP
74 help
75 driver for memory mapped GPIO functionality on Intel Baytrail
76 platforms. Supports 3 banks with 102, 28 and 44 gpios.
77 Most pins are usually muxed to some other functionality by firmware,
78 so only a small amount is available for gpio use.
79
80 Requires ACPI device enumeration code to set up a platform device.
81
82config PINCTRL_BCM2835 70config PINCTRL_BCM2835
83 bool 71 bool
84 select PINMUX 72 select PINMUX
@@ -205,6 +193,7 @@ config PINCTRL_PALMAS
205 193
206source "drivers/pinctrl/berlin/Kconfig" 194source "drivers/pinctrl/berlin/Kconfig"
207source "drivers/pinctrl/freescale/Kconfig" 195source "drivers/pinctrl/freescale/Kconfig"
196source "drivers/pinctrl/intel/Kconfig"
208source "drivers/pinctrl/mvebu/Kconfig" 197source "drivers/pinctrl/mvebu/Kconfig"
209source "drivers/pinctrl/nomadik/Kconfig" 198source "drivers/pinctrl/nomadik/Kconfig"
210source "drivers/pinctrl/qcom/Kconfig" 199source "drivers/pinctrl/qcom/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 51f52d32859e..c030b3db8034 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
15obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o 15obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
16obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o 16obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
17obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o 17obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
18obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
19obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o 18obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
20obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o 19obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
21obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o 20obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
@@ -39,6 +38,7 @@ obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
39 38
40obj-$(CONFIG_ARCH_BERLIN) += berlin/ 39obj-$(CONFIG_ARCH_BERLIN) += berlin/
41obj-y += freescale/ 40obj-y += freescale/
41obj-$(CONFIG_X86) += intel/
42obj-$(CONFIG_PLAT_ORION) += mvebu/ 42obj-$(CONFIG_PLAT_ORION) += mvebu/
43obj-y += nomadik/ 43obj-y += nomadik/
44obj-$(CONFIG_ARCH_QCOM) += qcom/ 44obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index f2446769247f..52f2b9404fe0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -294,11 +294,83 @@ static int imx_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
294 return 0; 294 return 0;
295} 295}
296 296
297static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
298 struct pinctrl_gpio_range *range, unsigned offset)
299{
300 struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
301 const struct imx_pinctrl_soc_info *info = ipctl->info;
302 const struct imx_pin_reg *pin_reg;
303 struct imx_pin_group *grp;
304 struct imx_pin *imx_pin;
305 unsigned int pin, group;
306 u32 reg;
307
308 /* Currently implementation only for shared mux/conf register */
309 if (!(info->flags & SHARE_MUX_CONF_REG))
310 return -EINVAL;
311
312 pin_reg = &info->pin_regs[offset];
313 if (pin_reg->mux_reg == -1)
314 return -EINVAL;
315
316 /* Find the pinctrl config with GPIO mux mode for the requested pin */
317 for (group = 0; group < info->ngroups; group++) {
318 grp = &info->groups[group];
319 for (pin = 0; pin < grp->npins; pin++) {
320 imx_pin = &grp->pins[pin];
321 if (imx_pin->pin == offset && !imx_pin->mux_mode)
322 goto mux_pin;
323 }
324 }
325
326 return -EINVAL;
327
328mux_pin:
329 reg = readl(ipctl->base + pin_reg->mux_reg);
330 reg &= ~(0x7 << 20);
331 reg |= imx_pin->config;
332 writel(reg, ipctl->base + pin_reg->mux_reg);
333
334 return 0;
335}
336
337static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
338 struct pinctrl_gpio_range *range, unsigned offset, bool input)
339{
340 struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
341 const struct imx_pinctrl_soc_info *info = ipctl->info;
342 const struct imx_pin_reg *pin_reg;
343 u32 reg;
344
345 /*
346 * Only Vybrid has the input/output buffer enable flags (IBE/OBE)
347 * They are part of the shared mux/conf register.
348 */
349 if (!(info->flags & SHARE_MUX_CONF_REG))
350 return -EINVAL;
351
352 pin_reg = &info->pin_regs[offset];
353 if (pin_reg->mux_reg == -1)
354 return -EINVAL;
355
356 /* IBE always enabled allows us to read the value "on the wire" */
357 reg = readl(ipctl->base + pin_reg->mux_reg);
358 if (input)
359 reg &= ~0x2;
360 else
361 reg |= 0x2;
362 writel(reg, ipctl->base + pin_reg->mux_reg);
363
364 return 0;
365}
366
297static const struct pinmux_ops imx_pmx_ops = { 367static const struct pinmux_ops imx_pmx_ops = {
298 .get_functions_count = imx_pmx_get_funcs_count, 368 .get_functions_count = imx_pmx_get_funcs_count,
299 .get_function_name = imx_pmx_get_func_name, 369 .get_function_name = imx_pmx_get_func_name,
300 .get_function_groups = imx_pmx_get_groups, 370 .get_function_groups = imx_pmx_get_groups,
301 .set_mux = imx_pmx_set, 371 .set_mux = imx_pmx_set,
372 .gpio_request_enable = imx_pmx_gpio_request_enable,
373 .gpio_set_direction = imx_pmx_gpio_set_direction,
302}; 374};
303 375
304static int imx_pinconf_get(struct pinctrl_dev *pctldev, 376static int imx_pinconf_get(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index f98c6bb0f769..646d5c244af1 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -445,6 +445,31 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
445 if (of_property_read_u32(child, "reg", &val)) 445 if (of_property_read_u32(child, "reg", &val))
446 continue; 446 continue;
447 if (strcmp(fn, child->name)) { 447 if (strcmp(fn, child->name)) {
448 struct device_node *child2;
449
450 /*
451 * This reference is dropped by
452 * of_get_next_child(np, * child)
453 */
454 of_node_get(child);
455
456 /*
457 * The logic parsing the functions from dt currently
458 * doesn't handle if functions with the same name are
459 * not grouped together. Only the first contiguous
460 * cluster is usable for each function name. This is a
461 * bug that is not trivial to fix, but at least warn
462 * about it.
463 */
464 for (child2 = of_get_next_child(np, child);
465 child2 != NULL;
466 child2 = of_get_next_child(np, child2)) {
467 if (!strcmp(child2->name, fn))
468 dev_warn(&pdev->dev,
469 "function nodes must be grouped by name (failed for: %s)",
470 fn);
471 }
472
448 f = &soc->functions[idxf++]; 473 f = &soc->functions[idxf++];
449 f->name = fn = child->name; 474 f->name = fn = child->name;
450 } 475 }
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
new file mode 100644
index 000000000000..b801d869e91c
--- /dev/null
+++ b/drivers/pinctrl/intel/Kconfig
@@ -0,0 +1,27 @@
1#
2# Intel pin control drivers
3#
4
5config PINCTRL_BAYTRAIL
6 bool "Intel Baytrail GPIO pin control"
7 depends on GPIOLIB && ACPI
8 select GPIOLIB_IRQCHIP
9 help
10 driver for memory mapped GPIO functionality on Intel Baytrail
11 platforms. Supports 3 banks with 102, 28 and 44 gpios.
12 Most pins are usually muxed to some other functionality by firmware,
13 so only a small amount is available for gpio use.
14
15 Requires ACPI device enumeration code to set up a platform device.
16
17config PINCTRL_CHERRYVIEW
18 tristate "Intel Cherryview/Braswell pinctrl and GPIO driver"
19 depends on ACPI
20 select PINMUX
21 select PINCONF
22 select GENERIC_PINCONF
23 select GPIOLIB
24 select GPIOLIB_IRQCHIP
25 help
26 Cherryview/Braswell pinctrl driver provides an interface that
27 allows configuring of SoC pins and using them as GPIOs.
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
new file mode 100644
index 000000000000..4c210e4139e2
--- /dev/null
+++ b/drivers/pinctrl/intel/Makefile
@@ -0,0 +1,4 @@
1# Intel pin control drivers
2
3obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
4obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 9dc38140194b..7db000431da7 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -616,5 +616,10 @@ static int __init byt_gpio_init(void)
616{ 616{
617 return platform_driver_register(&byt_gpio_driver); 617 return platform_driver_register(&byt_gpio_driver);
618} 618}
619
620subsys_initcall(byt_gpio_init); 619subsys_initcall(byt_gpio_init);
620
621static void __exit byt_gpio_exit(void)
622{
623 platform_driver_unregister(&byt_gpio_driver);
624}
625module_exit(byt_gpio_exit);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
new file mode 100644
index 000000000000..e9f8b39d1a9f
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -0,0 +1,1519 @@
1/*
2 * Cherryview/Braswell pinctrl driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This driver is based on the original Cherryview GPIO driver by
8 * Ning Li <ning.li@intel.com>
9 * Alan Cox <alan@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/gpio.h>
21#include <linux/gpio/driver.h>
22#include <linux/acpi.h>
23#include <linux/pinctrl/pinctrl.h>
24#include <linux/pinctrl/pinmux.h>
25#include <linux/pinctrl/pinconf.h>
26#include <linux/pinctrl/pinconf-generic.h>
27#include <linux/platform_device.h>
28
29#define CHV_INTSTAT 0x300
30#define CHV_INTMASK 0x380
31
32#define FAMILY_PAD_REGS_OFF 0x4400
33#define FAMILY_PAD_REGS_SIZE 0x400
34#define MAX_FAMILY_PAD_GPIO_NO 15
35#define GPIO_REGS_SIZE 8
36
37#define CHV_PADCTRL0 0x000
38#define CHV_PADCTRL0_INTSEL_SHIFT 28
39#define CHV_PADCTRL0_INTSEL_MASK (0xf << CHV_PADCTRL0_INTSEL_SHIFT)
40#define CHV_PADCTRL0_TERM_UP BIT(23)
41#define CHV_PADCTRL0_TERM_SHIFT 20
42#define CHV_PADCTRL0_TERM_MASK (7 << CHV_PADCTRL0_TERM_SHIFT)
43#define CHV_PADCTRL0_TERM_20K 1
44#define CHV_PADCTRL0_TERM_5K 2
45#define CHV_PADCTRL0_TERM_1K 4
46#define CHV_PADCTRL0_PMODE_SHIFT 16
47#define CHV_PADCTRL0_PMODE_MASK (0xf << CHV_PADCTRL0_PMODE_SHIFT)
48#define CHV_PADCTRL0_GPIOEN BIT(15)
49#define CHV_PADCTRL0_GPIOCFG_SHIFT 8
50#define CHV_PADCTRL0_GPIOCFG_MASK (7 << CHV_PADCTRL0_GPIOCFG_SHIFT)
51#define CHV_PADCTRL0_GPIOCFG_GPIO 0
52#define CHV_PADCTRL0_GPIOCFG_GPO 1
53#define CHV_PADCTRL0_GPIOCFG_GPI 2
54#define CHV_PADCTRL0_GPIOCFG_HIZ 3
55#define CHV_PADCTRL0_GPIOTXSTATE BIT(1)
56#define CHV_PADCTRL0_GPIORXSTATE BIT(0)
57
58#define CHV_PADCTRL1 0x004
59#define CHV_PADCTRL1_CFGLOCK BIT(31)
60#define CHV_PADCTRL1_INVRXTX_SHIFT 4
61#define CHV_PADCTRL1_INVRXTX_MASK (0xf << CHV_PADCTRL1_INVRXTX_SHIFT)
62#define CHV_PADCTRL1_INVRXTX_TXENABLE (2 << CHV_PADCTRL1_INVRXTX_SHIFT)
63#define CHV_PADCTRL1_ODEN BIT(3)
64#define CHV_PADCTRL1_INVRXTX_RXDATA (4 << CHV_PADCTRL1_INVRXTX_SHIFT)
65#define CHV_PADCTRL1_INTWAKECFG_MASK 7
66#define CHV_PADCTRL1_INTWAKECFG_FALLING 1
67#define CHV_PADCTRL1_INTWAKECFG_RISING 2
68#define CHV_PADCTRL1_INTWAKECFG_BOTH 3
69#define CHV_PADCTRL1_INTWAKECFG_LEVEL 4
70
71/**
72 * struct chv_alternate_function - A per group or per pin alternate function
73 * @pin: Pin number (only used in per pin configs)
74 * @mode: Mode the pin should be set in
75 * @invert_oe: Invert OE for this pin
76 */
77struct chv_alternate_function {
78 unsigned pin;
79 u8 mode;
80 bool invert_oe;
81};
82
83/**
84 * struct chv_pincgroup - describes a CHV pin group
85 * @name: Name of the group
86 * @pins: An array of pins in this group
87 * @npins: Number of pins in this group
88 * @altfunc: Alternate function applied to all pins in this group
89 * @overrides: Alternate function override per pin or %NULL if not used
90 * @noverrides: Number of per pin alternate function overrides if
91 * @overrides != NULL.
92 */
93struct chv_pingroup {
94 const char *name;
95 const unsigned *pins;
96 size_t npins;
97 struct chv_alternate_function altfunc;
98 const struct chv_alternate_function *overrides;
99 size_t noverrides;
100};
101
102/**
103 * struct chv_function - A CHV pinmux function
104 * @name: Name of the function
105 * @groups: An array of groups for this function
106 * @ngroups: Number of groups in @groups
107 */
108struct chv_function {
109 const char *name;
110 const char * const *groups;
111 size_t ngroups;
112};
113
114/**
115 * struct chv_gpio_pinrange - A range of pins that can be used as GPIOs
116 * @base: Start pin number
117 * @npins: Number of pins in this range
118 */
119struct chv_gpio_pinrange {
120 unsigned base;
121 unsigned npins;
122};
123
124/**
125 * struct chv_community - A community specific configuration
126 * @uid: ACPI _UID used to match the community
127 * @pins: All pins in this community
128 * @npins: Number of pins
129 * @groups: All groups in this community
130 * @ngroups: Number of groups
131 * @functions: All functions in this community
132 * @nfunctions: Number of functions
133 * @ngpios: Number of GPIOs in this community
134 * @gpio_ranges: An array of GPIO ranges in this community
135 * @ngpio_ranges: Number of GPIO ranges
136 * @ngpios: Total number of GPIOs in this community
137 */
138struct chv_community {
139 const char *uid;
140 const struct pinctrl_pin_desc *pins;
141 size_t npins;
142 const struct chv_pingroup *groups;
143 size_t ngroups;
144 const struct chv_function *functions;
145 size_t nfunctions;
146 const struct chv_gpio_pinrange *gpio_ranges;
147 size_t ngpio_ranges;
148 size_t ngpios;
149};
150
151/**
152 * struct chv_pinctrl - CHV pinctrl private structure
153 * @dev: Pointer to the parent device
154 * @pctldesc: Pin controller description
155 * @pctldev: Pointer to the pin controller device
156 * @chip: GPIO chip in this pin controller
157 * @regs: MMIO registers
158 * @lock: Lock to serialize register accesses
159 * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
160 * offset (in GPIO number space)
161 * @community: Community this pinctrl instance represents
162 *
163 * The first group in @groups is expected to contain all pins that can be
164 * used as GPIOs.
165 */
166struct chv_pinctrl {
167 struct device *dev;
168 struct pinctrl_desc pctldesc;
169 struct pinctrl_dev *pctldev;
170 struct gpio_chip chip;
171 void __iomem *regs;
172 spinlock_t lock;
173 unsigned intr_lines[16];
174 const struct chv_community *community;
175};
176
177#define gpiochip_to_pinctrl(c) container_of(c, struct chv_pinctrl, chip)
178
179#define ALTERNATE_FUNCTION(p, m, i) \
180 { \
181 .pin = (p), \
182 .mode = (m), \
183 .invert_oe = (i), \
184 }
185
186#define PIN_GROUP(n, p, m, i) \
187 { \
188 .name = (n), \
189 .pins = (p), \
190 .npins = ARRAY_SIZE((p)), \
191 .altfunc.mode = (m), \
192 .altfunc.invert_oe = (i), \
193 }
194
195#define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o) \
196 { \
197 .name = (n), \
198 .pins = (p), \
199 .npins = ARRAY_SIZE((p)), \
200 .altfunc.mode = (m), \
201 .altfunc.invert_oe = (i), \
202 .overrides = (o), \
203 .noverrides = ARRAY_SIZE((o)), \
204 }
205
206#define FUNCTION(n, g) \
207 { \
208 .name = (n), \
209 .groups = (g), \
210 .ngroups = ARRAY_SIZE((g)), \
211 }
212
213#define GPIO_PINRANGE(start, end) \
214 { \
215 .base = (start), \
216 .npins = (end) - (start) + 1, \
217 }
218
219static const struct pinctrl_pin_desc southwest_pins[] = {
220 PINCTRL_PIN(0, "FST_SPI_D2"),
221 PINCTRL_PIN(1, "FST_SPI_D0"),
222 PINCTRL_PIN(2, "FST_SPI_CLK"),
223 PINCTRL_PIN(3, "FST_SPI_D3"),
224 PINCTRL_PIN(4, "FST_SPI_CS1_B"),
225 PINCTRL_PIN(5, "FST_SPI_D1"),
226 PINCTRL_PIN(6, "FST_SPI_CS0_B"),
227 PINCTRL_PIN(7, "FST_SPI_CS2_B"),
228
229 PINCTRL_PIN(15, "UART1_RTS_B"),
230 PINCTRL_PIN(16, "UART1_RXD"),
231 PINCTRL_PIN(17, "UART2_RXD"),
232 PINCTRL_PIN(18, "UART1_CTS_B"),
233 PINCTRL_PIN(19, "UART2_RTS_B"),
234 PINCTRL_PIN(20, "UART1_TXD"),
235 PINCTRL_PIN(21, "UART2_TXD"),
236 PINCTRL_PIN(22, "UART2_CTS_B"),
237
238 PINCTRL_PIN(30, "MF_HDA_CLK"),
239 PINCTRL_PIN(31, "MF_HDA_RSTB"),
240 PINCTRL_PIN(32, "MF_HDA_SDIO"),
241 PINCTRL_PIN(33, "MF_HDA_SDO"),
242 PINCTRL_PIN(34, "MF_HDA_DOCKRSTB"),
243 PINCTRL_PIN(35, "MF_HDA_SYNC"),
244 PINCTRL_PIN(36, "MF_HDA_SDI1"),
245 PINCTRL_PIN(37, "MF_HDA_DOCKENB"),
246
247 PINCTRL_PIN(45, "I2C5_SDA"),
248 PINCTRL_PIN(46, "I2C4_SDA"),
249 PINCTRL_PIN(47, "I2C6_SDA"),
250 PINCTRL_PIN(48, "I2C5_SCL"),
251 PINCTRL_PIN(49, "I2C_NFC_SDA"),
252 PINCTRL_PIN(50, "I2C4_SCL"),
253 PINCTRL_PIN(51, "I2C6_SCL"),
254 PINCTRL_PIN(52, "I2C_NFC_SCL"),
255
256 PINCTRL_PIN(60, "I2C1_SDA"),
257 PINCTRL_PIN(61, "I2C0_SDA"),
258 PINCTRL_PIN(62, "I2C2_SDA"),
259 PINCTRL_PIN(63, "I2C1_SCL"),
260 PINCTRL_PIN(64, "I2C3_SDA"),
261 PINCTRL_PIN(65, "I2C0_SCL"),
262 PINCTRL_PIN(66, "I2C2_SCL"),
263 PINCTRL_PIN(67, "I2C3_SCL"),
264
265 PINCTRL_PIN(75, "SATA_GP0"),
266 PINCTRL_PIN(76, "SATA_GP1"),
267 PINCTRL_PIN(77, "SATA_LEDN"),
268 PINCTRL_PIN(78, "SATA_GP2"),
269 PINCTRL_PIN(79, "MF_SMB_ALERTB"),
270 PINCTRL_PIN(80, "SATA_GP3"),
271 PINCTRL_PIN(81, "MF_SMB_CLK"),
272 PINCTRL_PIN(82, "MF_SMB_DATA"),
273
274 PINCTRL_PIN(90, "PCIE_CLKREQ0B"),
275 PINCTRL_PIN(91, "PCIE_CLKREQ1B"),
276 PINCTRL_PIN(92, "GP_SSP_2_CLK"),
277 PINCTRL_PIN(93, "PCIE_CLKREQ2B"),
278 PINCTRL_PIN(94, "GP_SSP_2_RXD"),
279 PINCTRL_PIN(95, "PCIE_CLKREQ3B"),
280 PINCTRL_PIN(96, "GP_SSP_2_FS"),
281 PINCTRL_PIN(97, "GP_SSP_2_TXD"),
282};
283
284static const unsigned southwest_fspi_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
285static const unsigned southwest_uart0_pins[] = { 16, 20 };
286static const unsigned southwest_uart1_pins[] = { 15, 16, 18, 20 };
287static const unsigned southwest_uart2_pins[] = { 17, 19, 21, 22 };
288static const unsigned southwest_i2c0_pins[] = { 61, 65 };
289static const unsigned southwest_hda_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37 };
290static const unsigned southwest_lpe_pins[] = {
291 30, 31, 32, 33, 34, 35, 36, 37, 92, 94, 96, 97,
292};
293static const unsigned southwest_i2c1_pins[] = { 60, 63 };
294static const unsigned southwest_i2c2_pins[] = { 62, 66 };
295static const unsigned southwest_i2c3_pins[] = { 64, 67 };
296static const unsigned southwest_i2c4_pins[] = { 46, 50 };
297static const unsigned southwest_i2c5_pins[] = { 45, 48 };
298static const unsigned southwest_i2c6_pins[] = { 47, 51 };
299static const unsigned southwest_i2c_nfc_pins[] = { 49, 52 };
300static const unsigned southwest_smbus_pins[] = { 79, 81, 82 };
301static const unsigned southwest_spi3_pins[] = { 76, 79, 80, 81, 82 };
302
303/* LPE I2S TXD pins need to have invert_oe set */
304static const struct chv_alternate_function southwest_lpe_altfuncs[] = {
305 ALTERNATE_FUNCTION(30, 1, true),
306 ALTERNATE_FUNCTION(34, 1, true),
307 ALTERNATE_FUNCTION(97, 1, true),
308};
309
310/*
311 * Two spi3 chipselects are available in different mode than the main spi3
312 * functionality, which is using mode 1.
313 */
314static const struct chv_alternate_function southwest_spi3_altfuncs[] = {
315 ALTERNATE_FUNCTION(76, 3, false),
316 ALTERNATE_FUNCTION(80, 3, false),
317};
318
319static const struct chv_pingroup southwest_groups[] = {
320 PIN_GROUP("uart0_grp", southwest_uart0_pins, 2, false),
321 PIN_GROUP("uart1_grp", southwest_uart1_pins, 1, false),
322 PIN_GROUP("uart2_grp", southwest_uart2_pins, 1, false),
323 PIN_GROUP("hda_grp", southwest_hda_pins, 2, false),
324 PIN_GROUP("i2c0_grp", southwest_i2c0_pins, 1, true),
325 PIN_GROUP("i2c1_grp", southwest_i2c1_pins, 1, true),
326 PIN_GROUP("i2c2_grp", southwest_i2c2_pins, 1, true),
327 PIN_GROUP("i2c3_grp", southwest_i2c3_pins, 1, true),
328 PIN_GROUP("i2c4_grp", southwest_i2c4_pins, 1, true),
329 PIN_GROUP("i2c5_grp", southwest_i2c5_pins, 1, true),
330 PIN_GROUP("i2c6_grp", southwest_i2c6_pins, 1, true),
331 PIN_GROUP("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
332
333 PIN_GROUP_WITH_OVERRIDE("lpe_grp", southwest_lpe_pins, 1, false,
334 southwest_lpe_altfuncs),
335 PIN_GROUP_WITH_OVERRIDE("spi3_grp", southwest_spi3_pins, 2, false,
336 southwest_spi3_altfuncs),
337};
338
339static const char * const southwest_uart0_groups[] = { "uart0_grp" };
340static const char * const southwest_uart1_groups[] = { "uart1_grp" };
341static const char * const southwest_uart2_groups[] = { "uart2_grp" };
342static const char * const southwest_hda_groups[] = { "hda_grp" };
343static const char * const southwest_lpe_groups[] = { "lpe_grp" };
344static const char * const southwest_i2c0_groups[] = { "i2c0_grp" };
345static const char * const southwest_i2c1_groups[] = { "i2c1_grp" };
346static const char * const southwest_i2c2_groups[] = { "i2c2_grp" };
347static const char * const southwest_i2c3_groups[] = { "i2c3_grp" };
348static const char * const southwest_i2c4_groups[] = { "i2c4_grp" };
349static const char * const southwest_i2c5_groups[] = { "i2c5_grp" };
350static const char * const southwest_i2c6_groups[] = { "i2c6_grp" };
351static const char * const southwest_i2c_nfc_groups[] = { "i2c_nfc_grp" };
352static const char * const southwest_spi3_groups[] = { "spi3_grp" };
353
354/*
355 * Only do pinmuxing for certain LPSS devices for now. Rest of the pins are
356 * enabled only as GPIOs.
357 */
358static const struct chv_function southwest_functions[] = {
359 FUNCTION("uart0", southwest_uart0_groups),
360 FUNCTION("uart1", southwest_uart1_groups),
361 FUNCTION("uart2", southwest_uart2_groups),
362 FUNCTION("hda", southwest_hda_groups),
363 FUNCTION("lpe", southwest_lpe_groups),
364 FUNCTION("i2c0", southwest_i2c0_groups),
365 FUNCTION("i2c1", southwest_i2c1_groups),
366 FUNCTION("i2c2", southwest_i2c2_groups),
367 FUNCTION("i2c3", southwest_i2c3_groups),
368 FUNCTION("i2c4", southwest_i2c4_groups),
369 FUNCTION("i2c5", southwest_i2c5_groups),
370 FUNCTION("i2c6", southwest_i2c6_groups),
371 FUNCTION("i2c_nfc", southwest_i2c_nfc_groups),
372 FUNCTION("spi3", southwest_spi3_groups),
373};
374
375static const struct chv_gpio_pinrange southwest_gpio_ranges[] = {
376 GPIO_PINRANGE(0, 7),
377 GPIO_PINRANGE(15, 22),
378 GPIO_PINRANGE(30, 37),
379 GPIO_PINRANGE(45, 52),
380 GPIO_PINRANGE(60, 67),
381 GPIO_PINRANGE(75, 82),
382 GPIO_PINRANGE(90, 97),
383};
384
385static const struct chv_community southwest_community = {
386 .uid = "1",
387 .pins = southwest_pins,
388 .npins = ARRAY_SIZE(southwest_pins),
389 .groups = southwest_groups,
390 .ngroups = ARRAY_SIZE(southwest_groups),
391 .functions = southwest_functions,
392 .nfunctions = ARRAY_SIZE(southwest_functions),
393 .gpio_ranges = southwest_gpio_ranges,
394 .ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
395 .ngpios = ARRAY_SIZE(southwest_pins),
396};
397
398static const struct pinctrl_pin_desc north_pins[] = {
399 PINCTRL_PIN(0, "GPIO_DFX_0"),
400 PINCTRL_PIN(1, "GPIO_DFX_3"),
401 PINCTRL_PIN(2, "GPIO_DFX_7"),
402 PINCTRL_PIN(3, "GPIO_DFX_1"),
403 PINCTRL_PIN(4, "GPIO_DFX_5"),
404 PINCTRL_PIN(5, "GPIO_DFX_4"),
405 PINCTRL_PIN(6, "GPIO_DFX_8"),
406 PINCTRL_PIN(7, "GPIO_DFX_2"),
407 PINCTRL_PIN(8, "GPIO_DFX_6"),
408
409 PINCTRL_PIN(15, "GPIO_SUS0"),
410 PINCTRL_PIN(16, "SEC_GPIO_SUS10"),
411 PINCTRL_PIN(17, "GPIO_SUS3"),
412 PINCTRL_PIN(18, "GPIO_SUS7"),
413 PINCTRL_PIN(19, "GPIO_SUS1"),
414 PINCTRL_PIN(20, "GPIO_SUS5"),
415 PINCTRL_PIN(21, "SEC_GPIO_SUS11"),
416 PINCTRL_PIN(22, "GPIO_SUS4"),
417 PINCTRL_PIN(23, "SEC_GPIO_SUS8"),
418 PINCTRL_PIN(24, "GPIO_SUS2"),
419 PINCTRL_PIN(25, "GPIO_SUS6"),
420 PINCTRL_PIN(26, "CX_PREQ_B"),
421 PINCTRL_PIN(27, "SEC_GPIO_SUS9"),
422
423 PINCTRL_PIN(30, "TRST_B"),
424 PINCTRL_PIN(31, "TCK"),
425 PINCTRL_PIN(32, "PROCHOT_B"),
426 PINCTRL_PIN(33, "SVIDO_DATA"),
427 PINCTRL_PIN(34, "TMS"),
428 PINCTRL_PIN(35, "CX_PRDY_B_2"),
429 PINCTRL_PIN(36, "TDO_2"),
430 PINCTRL_PIN(37, "CX_PRDY_B"),
431 PINCTRL_PIN(38, "SVIDO_ALERT_B"),
432 PINCTRL_PIN(39, "TDO"),
433 PINCTRL_PIN(40, "SVIDO_CLK"),
434 PINCTRL_PIN(41, "TDI"),
435
436 PINCTRL_PIN(45, "GP_CAMERASB_05"),
437 PINCTRL_PIN(46, "GP_CAMERASB_02"),
438 PINCTRL_PIN(47, "GP_CAMERASB_08"),
439 PINCTRL_PIN(48, "GP_CAMERASB_00"),
440 PINCTRL_PIN(49, "GP_CAMERASB_06"),
441 PINCTRL_PIN(50, "GP_CAMERASB_10"),
442 PINCTRL_PIN(51, "GP_CAMERASB_03"),
443 PINCTRL_PIN(52, "GP_CAMERASB_09"),
444 PINCTRL_PIN(53, "GP_CAMERASB_01"),
445 PINCTRL_PIN(54, "GP_CAMERASB_07"),
446 PINCTRL_PIN(55, "GP_CAMERASB_11"),
447 PINCTRL_PIN(56, "GP_CAMERASB_04"),
448
449 PINCTRL_PIN(60, "PANEL0_BKLTEN"),
450 PINCTRL_PIN(61, "HV_DDI0_HPD"),
451 PINCTRL_PIN(62, "HV_DDI2_DDC_SDA"),
452 PINCTRL_PIN(63, "PANEL1_BKLTCTL"),
453 PINCTRL_PIN(64, "HV_DDI1_HPD"),
454 PINCTRL_PIN(65, "PANEL0_BKLTCTL"),
455 PINCTRL_PIN(66, "HV_DDI0_DDC_SDA"),
456 PINCTRL_PIN(67, "HV_DDI2_DDC_SCL"),
457 PINCTRL_PIN(68, "HV_DDI2_HPD"),
458 PINCTRL_PIN(69, "PANEL1_VDDEN"),
459 PINCTRL_PIN(70, "PANEL1_BKLTEN"),
460 PINCTRL_PIN(71, "HV_DDI0_DDC_SCL"),
461 PINCTRL_PIN(72, "PANEL0_VDDEN"),
462};
463
464static const struct chv_gpio_pinrange north_gpio_ranges[] = {
465 GPIO_PINRANGE(0, 8),
466 GPIO_PINRANGE(15, 27),
467 GPIO_PINRANGE(30, 41),
468 GPIO_PINRANGE(45, 56),
469 GPIO_PINRANGE(60, 72),
470};
471
472static const struct chv_community north_community = {
473 .uid = "2",
474 .pins = north_pins,
475 .npins = ARRAY_SIZE(north_pins),
476 .gpio_ranges = north_gpio_ranges,
477 .ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
478 .ngpios = ARRAY_SIZE(north_pins),
479};
480
481static const struct pinctrl_pin_desc east_pins[] = {
482 PINCTRL_PIN(0, "PMU_SLP_S3_B"),
483 PINCTRL_PIN(1, "PMU_BATLOW_B"),
484 PINCTRL_PIN(2, "SUS_STAT_B"),
485 PINCTRL_PIN(3, "PMU_SLP_S0IX_B"),
486 PINCTRL_PIN(4, "PMU_AC_PRESENT"),
487 PINCTRL_PIN(5, "PMU_PLTRST_B"),
488 PINCTRL_PIN(6, "PMU_SUSCLK"),
489 PINCTRL_PIN(7, "PMU_SLP_LAN_B"),
490 PINCTRL_PIN(8, "PMU_PWRBTN_B"),
491 PINCTRL_PIN(9, "PMU_SLP_S4_B"),
492 PINCTRL_PIN(10, "PMU_WAKE_B"),
493 PINCTRL_PIN(11, "PMU_WAKE_LAN_B"),
494
495 PINCTRL_PIN(15, "MF_ISH_GPIO_3"),
496 PINCTRL_PIN(16, "MF_ISH_GPIO_7"),
497 PINCTRL_PIN(17, "MF_ISH_I2C1_SCL"),
498 PINCTRL_PIN(18, "MF_ISH_GPIO_1"),
499 PINCTRL_PIN(19, "MF_ISH_GPIO_5"),
500 PINCTRL_PIN(20, "MF_ISH_GPIO_9"),
501 PINCTRL_PIN(21, "MF_ISH_GPIO_0"),
502 PINCTRL_PIN(22, "MF_ISH_GPIO_4"),
503 PINCTRL_PIN(23, "MF_ISH_GPIO_8"),
504 PINCTRL_PIN(24, "MF_ISH_GPIO_2"),
505 PINCTRL_PIN(25, "MF_ISH_GPIO_6"),
506 PINCTRL_PIN(26, "MF_ISH_I2C1_SDA"),
507};
508
509static const struct chv_gpio_pinrange east_gpio_ranges[] = {
510 GPIO_PINRANGE(0, 11),
511 GPIO_PINRANGE(15, 26),
512};
513
514static const struct chv_community east_community = {
515 .uid = "3",
516 .pins = east_pins,
517 .npins = ARRAY_SIZE(east_pins),
518 .gpio_ranges = east_gpio_ranges,
519 .ngpio_ranges = ARRAY_SIZE(east_gpio_ranges),
520 .ngpios = ARRAY_SIZE(east_pins),
521};
522
523static const struct pinctrl_pin_desc southeast_pins[] = {
524 PINCTRL_PIN(0, "MF_PLT_CLK0"),
525 PINCTRL_PIN(1, "PWM1"),
526 PINCTRL_PIN(2, "MF_PLT_CLK1"),
527 PINCTRL_PIN(3, "MF_PLT_CLK4"),
528 PINCTRL_PIN(4, "MF_PLT_CLK3"),
529 PINCTRL_PIN(5, "PWM0"),
530 PINCTRL_PIN(6, "MF_PLT_CLK5"),
531 PINCTRL_PIN(7, "MF_PLT_CLK2"),
532
533 PINCTRL_PIN(15, "SDMMC2_D3_CD_B"),
534 PINCTRL_PIN(16, "SDMMC1_CLK"),
535 PINCTRL_PIN(17, "SDMMC1_D0"),
536 PINCTRL_PIN(18, "SDMMC2_D1"),
537 PINCTRL_PIN(19, "SDMMC2_CLK"),
538 PINCTRL_PIN(20, "SDMMC1_D2"),
539 PINCTRL_PIN(21, "SDMMC2_D2"),
540 PINCTRL_PIN(22, "SDMMC2_CMD"),
541 PINCTRL_PIN(23, "SDMMC1_CMD"),
542 PINCTRL_PIN(24, "SDMMC1_D1"),
543 PINCTRL_PIN(25, "SDMMC2_D0"),
544 PINCTRL_PIN(26, "SDMMC1_D3_CD_B"),
545
546 PINCTRL_PIN(30, "SDMMC3_D1"),
547 PINCTRL_PIN(31, "SDMMC3_CLK"),
548 PINCTRL_PIN(32, "SDMMC3_D3"),
549 PINCTRL_PIN(33, "SDMMC3_D2"),
550 PINCTRL_PIN(34, "SDMMC3_CMD"),
551 PINCTRL_PIN(35, "SDMMC3_D0"),
552
553 PINCTRL_PIN(45, "MF_LPC_AD2"),
554 PINCTRL_PIN(46, "LPC_CLKRUNB"),
555 PINCTRL_PIN(47, "MF_LPC_AD0"),
556 PINCTRL_PIN(48, "LPC_FRAMEB"),
557 PINCTRL_PIN(49, "MF_LPC_CLKOUT1"),
558 PINCTRL_PIN(50, "MF_LPC_AD3"),
559 PINCTRL_PIN(51, "MF_LPC_CLKOUT0"),
560 PINCTRL_PIN(52, "MF_LPC_AD1"),
561
562 PINCTRL_PIN(60, "SPI1_MISO"),
563 PINCTRL_PIN(61, "SPI1_CSO_B"),
564 PINCTRL_PIN(62, "SPI1_CLK"),
565 PINCTRL_PIN(63, "MMC1_D6"),
566 PINCTRL_PIN(64, "SPI1_MOSI"),
567 PINCTRL_PIN(65, "MMC1_D5"),
568 PINCTRL_PIN(66, "SPI1_CS1_B"),
569 PINCTRL_PIN(67, "MMC1_D4_SD_WE"),
570 PINCTRL_PIN(68, "MMC1_D7"),
571 PINCTRL_PIN(69, "MMC1_RCLK"),
572
573 PINCTRL_PIN(75, "USB_OC1_B"),
574 PINCTRL_PIN(76, "PMU_RESETBUTTON_B"),
575 PINCTRL_PIN(77, "GPIO_ALERT"),
576 PINCTRL_PIN(78, "SDMMC3_PWR_EN_B"),
577 PINCTRL_PIN(79, "ILB_SERIRQ"),
578 PINCTRL_PIN(80, "USB_OC0_B"),
579 PINCTRL_PIN(81, "SDMMC3_CD_B"),
580 PINCTRL_PIN(82, "SPKR"),
581 PINCTRL_PIN(83, "SUSPWRDNACK"),
582 PINCTRL_PIN(84, "SPARE_PIN"),
583 PINCTRL_PIN(85, "SDMMC3_1P8_EN"),
584};
585
586static const unsigned southeast_pwm0_pins[] = { 5 };
587static const unsigned southeast_pwm1_pins[] = { 1 };
588static const unsigned southeast_sdmmc1_pins[] = {
589 16, 17, 20, 23, 24, 26, 63, 65, 67, 68, 69,
590};
591static const unsigned southeast_sdmmc2_pins[] = { 15, 18, 19, 21, 22, 25 };
592static const unsigned southeast_sdmmc3_pins[] = {
593 30, 31, 32, 33, 34, 35, 78, 81, 85,
594};
595static const unsigned southeast_spi1_pins[] = { 60, 61, 62, 64, 66 };
596static const unsigned southeast_spi2_pins[] = { 2, 3, 4, 6, 7 };
597
598static const struct chv_pingroup southeast_groups[] = {
599 PIN_GROUP("pwm0_grp", southeast_pwm0_pins, 1, false),
600 PIN_GROUP("pwm1_grp", southeast_pwm1_pins, 1, false),
601 PIN_GROUP("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
602 PIN_GROUP("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
603 PIN_GROUP("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
604 PIN_GROUP("spi1_grp", southeast_spi1_pins, 1, false),
605 PIN_GROUP("spi2_grp", southeast_spi2_pins, 4, false),
606};
607
608static const char * const southeast_pwm0_groups[] = { "pwm0_grp" };
609static const char * const southeast_pwm1_groups[] = { "pwm1_grp" };
610static const char * const southeast_sdmmc1_groups[] = { "sdmmc1_grp" };
611static const char * const southeast_sdmmc2_groups[] = { "sdmmc2_grp" };
612static const char * const southeast_sdmmc3_groups[] = { "sdmmc3_grp" };
613static const char * const southeast_spi1_groups[] = { "spi1_grp" };
614static const char * const southeast_spi2_groups[] = { "spi2_grp" };
615
616static const struct chv_function southeast_functions[] = {
617 FUNCTION("pwm0", southeast_pwm0_groups),
618 FUNCTION("pwm1", southeast_pwm1_groups),
619 FUNCTION("sdmmc1", southeast_sdmmc1_groups),
620 FUNCTION("sdmmc2", southeast_sdmmc2_groups),
621 FUNCTION("sdmmc3", southeast_sdmmc3_groups),
622 FUNCTION("spi1", southeast_spi1_groups),
623 FUNCTION("spi2", southeast_spi2_groups),
624};
625
626static const struct chv_gpio_pinrange southeast_gpio_ranges[] = {
627 GPIO_PINRANGE(0, 7),
628 GPIO_PINRANGE(15, 26),
629 GPIO_PINRANGE(30, 35),
630 GPIO_PINRANGE(45, 52),
631 GPIO_PINRANGE(60, 69),
632 GPIO_PINRANGE(75, 85),
633};
634
635static const struct chv_community southeast_community = {
636 .uid = "4",
637 .pins = southeast_pins,
638 .npins = ARRAY_SIZE(southeast_pins),
639 .groups = southeast_groups,
640 .ngroups = ARRAY_SIZE(southeast_groups),
641 .functions = southeast_functions,
642 .nfunctions = ARRAY_SIZE(southeast_functions),
643 .gpio_ranges = southeast_gpio_ranges,
644 .ngpio_ranges = ARRAY_SIZE(southeast_gpio_ranges),
645 .ngpios = ARRAY_SIZE(southeast_pins),
646};
647
648static const struct chv_community *chv_communities[] = {
649 &southwest_community,
650 &north_community,
651 &east_community,
652 &southeast_community,
653};
654
655static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
656 unsigned reg)
657{
658 unsigned family_no = offset / MAX_FAMILY_PAD_GPIO_NO;
659 unsigned pad_no = offset % MAX_FAMILY_PAD_GPIO_NO;
660
661 offset = FAMILY_PAD_REGS_OFF + FAMILY_PAD_REGS_SIZE * family_no +
662 GPIO_REGS_SIZE * pad_no;
663
664 return pctrl->regs + offset + reg;
665}
666
667static void chv_writel(u32 value, void __iomem *reg)
668{
669 writel(value, reg);
670 /* simple readback to confirm the bus transferring done */
671 readl(reg);
672}
673
674/* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
675static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned offset)
676{
677 void __iomem *reg;
678
679 reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
680 return readl(reg) & CHV_PADCTRL1_CFGLOCK;
681}
682
683static int chv_get_groups_count(struct pinctrl_dev *pctldev)
684{
685 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
686
687 return pctrl->community->ngroups;
688}
689
690static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
691 unsigned group)
692{
693 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
694
695 return pctrl->community->groups[group].name;
696}
697
698static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
699 const unsigned **pins, unsigned *npins)
700{
701 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
702
703 *pins = pctrl->community->groups[group].pins;
704 *npins = pctrl->community->groups[group].npins;
705 return 0;
706}
707
708static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
709 unsigned offset)
710{
711 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
712 unsigned long flags;
713 u32 ctrl0, ctrl1;
714 bool locked;
715
716 spin_lock_irqsave(&pctrl->lock, flags);
717
718 ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
719 ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
720 locked = chv_pad_locked(pctrl, offset);
721
722 spin_unlock_irqrestore(&pctrl->lock, flags);
723
724 if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
725 seq_puts(s, "GPIO ");
726 } else {
727 u32 mode;
728
729 mode = ctrl0 & CHV_PADCTRL0_PMODE_MASK;
730 mode >>= CHV_PADCTRL0_PMODE_SHIFT;
731
732 seq_printf(s, "mode %d ", mode);
733 }
734
735 seq_printf(s, "ctrl0 0x%08x ctrl1 0x%08x", ctrl0, ctrl1);
736
737 if (locked)
738 seq_puts(s, " [LOCKED]");
739}
740
741static const struct pinctrl_ops chv_pinctrl_ops = {
742 .get_groups_count = chv_get_groups_count,
743 .get_group_name = chv_get_group_name,
744 .get_group_pins = chv_get_group_pins,
745 .pin_dbg_show = chv_pin_dbg_show,
746};
747
748static int chv_get_functions_count(struct pinctrl_dev *pctldev)
749{
750 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
751
752 return pctrl->community->nfunctions;
753}
754
755static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
756 unsigned function)
757{
758 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
759
760 return pctrl->community->functions[function].name;
761}
762
763static int chv_get_function_groups(struct pinctrl_dev *pctldev,
764 unsigned function,
765 const char * const **groups,
766 unsigned * const ngroups)
767{
768 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
769
770 *groups = pctrl->community->functions[function].groups;
771 *ngroups = pctrl->community->functions[function].ngroups;
772 return 0;
773}
774
775static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
776 unsigned group)
777{
778 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
779 const struct chv_pingroup *grp;
780 unsigned long flags;
781 int i;
782
783 grp = &pctrl->community->groups[group];
784
785 spin_lock_irqsave(&pctrl->lock, flags);
786
787 /* Check first that the pad is not locked */
788 for (i = 0; i < grp->npins; i++) {
789 if (chv_pad_locked(pctrl, grp->pins[i])) {
790 dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
791 grp->pins[i]);
792 spin_unlock_irqrestore(&pctrl->lock, flags);
793 return -EBUSY;
794 }
795 }
796
797 for (i = 0; i < grp->npins; i++) {
798 const struct chv_alternate_function *altfunc = &grp->altfunc;
799 int pin = grp->pins[i];
800 void __iomem *reg;
801 u32 value;
802
803 /* Check if there is pin-specific config */
804 if (grp->overrides) {
805 int j;
806
807 for (j = 0; j < grp->noverrides; j++) {
808 if (grp->overrides[j].pin == pin) {
809 altfunc = &grp->overrides[j];
810 break;
811 }
812 }
813 }
814
815 reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
816 value = readl(reg);
817 /* Disable GPIO mode */
818 value &= ~CHV_PADCTRL0_GPIOEN;
819 /* Set to desired mode */
820 value &= ~CHV_PADCTRL0_PMODE_MASK;
821 value |= altfunc->mode << CHV_PADCTRL0_PMODE_SHIFT;
822 chv_writel(value, reg);
823
824 /* Update for invert_oe */
825 reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
826 value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK;
827 if (altfunc->invert_oe)
828 value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
829 chv_writel(value, reg);
830
831 dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
832 pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
833 }
834
835 spin_unlock_irqrestore(&pctrl->lock, flags);
836
837 return 0;
838}
839
840static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
841 struct pinctrl_gpio_range *range,
842 unsigned offset)
843{
844 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
845 unsigned long flags;
846 void __iomem *reg;
847 u32 value;
848
849 spin_lock_irqsave(&pctrl->lock, flags);
850
851 if (chv_pad_locked(pctrl, offset)) {
852 value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
853 if (!(value & CHV_PADCTRL0_GPIOEN)) {
854 /* Locked so cannot enable */
855 spin_unlock_irqrestore(&pctrl->lock, flags);
856 return -EBUSY;
857 }
858 } else {
859 int i;
860
861 /* Reset the interrupt mapping */
862 for (i = 0; i < ARRAY_SIZE(pctrl->intr_lines); i++) {
863 if (pctrl->intr_lines[i] == offset) {
864 pctrl->intr_lines[i] = 0;
865 break;
866 }
867 }
868
869 /* Disable interrupt generation */
870 reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
871 value = readl(reg);
872 value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
873 value &= ~CHV_PADCTRL1_INVRXTX_MASK;
874 chv_writel(value, reg);
875
876 /* Switch to a GPIO mode */
877 reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
878 value = readl(reg) | CHV_PADCTRL0_GPIOEN;
879 chv_writel(value, reg);
880 }
881
882 spin_unlock_irqrestore(&pctrl->lock, flags);
883
884 return 0;
885}
886
887static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
888 struct pinctrl_gpio_range *range,
889 unsigned offset)
890{
891 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
892 unsigned long flags;
893 void __iomem *reg;
894 u32 value;
895
896 spin_lock_irqsave(&pctrl->lock, flags);
897
898 reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
899 value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
900 chv_writel(value, reg);
901
902 spin_unlock_irqrestore(&pctrl->lock, flags);
903}
904
905static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
906 struct pinctrl_gpio_range *range,
907 unsigned offset, bool input)
908{
909 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
910 void __iomem *reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
911 unsigned long flags;
912 u32 ctrl0;
913
914 spin_lock_irqsave(&pctrl->lock, flags);
915
916 ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
917 if (input)
918 ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT;
919 else
920 ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
921 chv_writel(ctrl0, reg);
922
923 spin_unlock_irqrestore(&pctrl->lock, flags);
924
925 return 0;
926}
927
928static const struct pinmux_ops chv_pinmux_ops = {
929 .get_functions_count = chv_get_functions_count,
930 .get_function_name = chv_get_function_name,
931 .get_function_groups = chv_get_function_groups,
932 .set_mux = chv_pinmux_set_mux,
933 .gpio_request_enable = chv_gpio_request_enable,
934 .gpio_disable_free = chv_gpio_disable_free,
935 .gpio_set_direction = chv_gpio_set_direction,
936};
937
938static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
939 unsigned long *config)
940{
941 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
942 enum pin_config_param param = pinconf_to_config_param(*config);
943 unsigned long flags;
944 u32 ctrl0, ctrl1;
945 u16 arg = 0;
946 u32 term;
947
948 spin_lock_irqsave(&pctrl->lock, flags);
949 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
950 ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
951 spin_unlock_irqrestore(&pctrl->lock, flags);
952
953 term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
954
955 switch (param) {
956 case PIN_CONFIG_BIAS_DISABLE:
957 if (term)
958 return -EINVAL;
959 break;
960
961 case PIN_CONFIG_BIAS_PULL_UP:
962 if (!(ctrl0 & CHV_PADCTRL0_TERM_UP))
963 return -EINVAL;
964
965 switch (term) {
966 case CHV_PADCTRL0_TERM_20K:
967 arg = 20000;
968 break;
969 case CHV_PADCTRL0_TERM_5K:
970 arg = 5000;
971 break;
972 case CHV_PADCTRL0_TERM_1K:
973 arg = 1000;
974 break;
975 }
976
977 break;
978
979 case PIN_CONFIG_BIAS_PULL_DOWN:
980 if (!term || (ctrl0 & CHV_PADCTRL0_TERM_UP))
981 return -EINVAL;
982
983 switch (term) {
984 case CHV_PADCTRL0_TERM_20K:
985 arg = 20000;
986 break;
987 case CHV_PADCTRL0_TERM_5K:
988 arg = 5000;
989 break;
990 }
991
992 break;
993
994 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
995 if (!(ctrl1 & CHV_PADCTRL1_ODEN))
996 return -EINVAL;
997 break;
998
999 case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: {
1000 u32 cfg;
1001
1002 cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
1003 cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
1004 if (cfg != CHV_PADCTRL0_GPIOCFG_HIZ)
1005 return -EINVAL;
1006
1007 break;
1008 }
1009
1010 default:
1011 return -ENOTSUPP;
1012 }
1013
1014 *config = pinconf_to_config_packed(param, arg);
1015 return 0;
1016}
1017
1018static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
1019 enum pin_config_param param, u16 arg)
1020{
1021 void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
1022 unsigned long flags;
1023 u32 ctrl0, pull;
1024
1025 spin_lock_irqsave(&pctrl->lock, flags);
1026 ctrl0 = readl(reg);
1027
1028 switch (param) {
1029 case PIN_CONFIG_BIAS_DISABLE:
1030 ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
1031 break;
1032
1033 case PIN_CONFIG_BIAS_PULL_UP:
1034 ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
1035
1036 switch (arg) {
1037 case 1000:
1038 /* For 1k there is only pull up */
1039 pull = CHV_PADCTRL0_TERM_1K << CHV_PADCTRL0_TERM_SHIFT;
1040 break;
1041 case 5000:
1042 pull = CHV_PADCTRL0_TERM_5K << CHV_PADCTRL0_TERM_SHIFT;
1043 break;
1044 case 20000:
1045 pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
1046 break;
1047 default:
1048 spin_unlock_irqrestore(&pctrl->lock, flags);
1049 return -EINVAL;
1050 }
1051
1052 ctrl0 |= CHV_PADCTRL0_TERM_UP | pull;
1053 break;
1054
1055 case PIN_CONFIG_BIAS_PULL_DOWN:
1056 ctrl0 &= ~(CHV_PADCTRL0_TERM_MASK | CHV_PADCTRL0_TERM_UP);
1057
1058 switch (arg) {
1059 case 5000:
1060 pull = CHV_PADCTRL0_TERM_5K << CHV_PADCTRL0_TERM_SHIFT;
1061 break;
1062 case 20000:
1063 pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
1064 break;
1065 default:
1066 spin_unlock_irqrestore(&pctrl->lock, flags);
1067 return -EINVAL;
1068 }
1069
1070 ctrl0 |= pull;
1071 break;
1072
1073 default:
1074 spin_unlock_irqrestore(&pctrl->lock, flags);
1075 return -EINVAL;
1076 }
1077
1078 chv_writel(ctrl0, reg);
1079 spin_unlock_irqrestore(&pctrl->lock, flags);
1080
1081 return 0;
1082}
1083
1084static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
1085 unsigned long *configs, unsigned nconfigs)
1086{
1087 struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
1088 enum pin_config_param param;
1089 int i, ret;
1090 u16 arg;
1091
1092 if (chv_pad_locked(pctrl, pin))
1093 return -EBUSY;
1094
1095 for (i = 0; i < nconfigs; i++) {
1096 param = pinconf_to_config_param(configs[i]);
1097 arg = pinconf_to_config_argument(configs[i]);
1098
1099 switch (param) {
1100 case PIN_CONFIG_BIAS_DISABLE:
1101 case PIN_CONFIG_BIAS_PULL_UP:
1102 case PIN_CONFIG_BIAS_PULL_DOWN:
1103 ret = chv_config_set_pull(pctrl, pin, param, arg);
1104 if (ret)
1105 return ret;
1106 break;
1107
1108 default:
1109 return -ENOTSUPP;
1110 }
1111
1112 dev_dbg(pctrl->dev, "pin %d set config %d arg %u\n", pin,
1113 param, arg);
1114 }
1115
1116 return 0;
1117}
1118
1119static const struct pinconf_ops chv_pinconf_ops = {
1120 .is_generic = true,
1121 .pin_config_set = chv_config_set,
1122 .pin_config_get = chv_config_get,
1123};
1124
1125static struct pinctrl_desc chv_pinctrl_desc = {
1126 .pctlops = &chv_pinctrl_ops,
1127 .pmxops = &chv_pinmux_ops,
1128 .confops = &chv_pinconf_ops,
1129 .owner = THIS_MODULE,
1130};
1131
1132static int chv_gpio_request(struct gpio_chip *chip, unsigned offset)
1133{
1134 return pinctrl_request_gpio(chip->base + offset);
1135}
1136
1137static void chv_gpio_free(struct gpio_chip *chip, unsigned offset)
1138{
1139 pinctrl_free_gpio(chip->base + offset);
1140}
1141
1142static unsigned chv_gpio_offset_to_pin(struct chv_pinctrl *pctrl,
1143 unsigned offset)
1144{
1145 return pctrl->community->pins[offset].number;
1146}
1147
1148static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
1149{
1150 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
1151 int pin = chv_gpio_offset_to_pin(pctrl, offset);
1152 u32 ctrl0, cfg;
1153
1154 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1155
1156 cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
1157 cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
1158
1159 if (cfg == CHV_PADCTRL0_GPIOCFG_GPO)
1160 return !!(ctrl0 & CHV_PADCTRL0_GPIOTXSTATE);
1161 return !!(ctrl0 & CHV_PADCTRL0_GPIORXSTATE);
1162}
1163
1164static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1165{
1166 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
1167 unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
1168 unsigned long flags;
1169 void __iomem *reg;
1170 u32 ctrl0;
1171
1172 spin_lock_irqsave(&pctrl->lock, flags);
1173
1174 reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
1175 ctrl0 = readl(reg);
1176
1177 if (value)
1178 ctrl0 |= CHV_PADCTRL0_GPIOTXSTATE;
1179 else
1180 ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE;
1181
1182 chv_writel(ctrl0, reg);
1183
1184 spin_unlock_irqrestore(&pctrl->lock, flags);
1185}
1186
1187static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
1188{
1189 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
1190 unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
1191 u32 ctrl0, direction;
1192
1193 ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1194
1195 direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
1196 direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
1197
1198 return direction != CHV_PADCTRL0_GPIOCFG_GPO;
1199}
1200
1201static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1202{
1203 return pinctrl_gpio_direction_input(chip->base + offset);
1204}
1205
1206static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1207 int value)
1208{
1209 return pinctrl_gpio_direction_output(chip->base + offset);
1210}
1211
1212static const struct gpio_chip chv_gpio_chip = {
1213 .owner = THIS_MODULE,
1214 .request = chv_gpio_request,
1215 .free = chv_gpio_free,
1216 .get_direction = chv_gpio_get_direction,
1217 .direction_input = chv_gpio_direction_input,
1218 .direction_output = chv_gpio_direction_output,
1219 .get = chv_gpio_get,
1220 .set = chv_gpio_set,
1221};
1222
1223static void chv_gpio_irq_ack(struct irq_data *d)
1224{
1225 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1226 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
1227 int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
1228 u32 intr_line;
1229
1230 spin_lock(&pctrl->lock);
1231
1232 intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1233 intr_line &= CHV_PADCTRL0_INTSEL_MASK;
1234 intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
1235 chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
1236
1237 spin_unlock(&pctrl->lock);
1238}
1239
1240static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
1241{
1242 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1243 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
1244 int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
1245 u32 value, intr_line;
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&pctrl->lock, flags);
1249
1250 intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1251 intr_line &= CHV_PADCTRL0_INTSEL_MASK;
1252 intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
1253
1254 value = readl(pctrl->regs + CHV_INTMASK);
1255 if (mask)
1256 value &= ~BIT(intr_line);
1257 else
1258 value |= BIT(intr_line);
1259 chv_writel(value, pctrl->regs + CHV_INTMASK);
1260
1261 spin_unlock_irqrestore(&pctrl->lock, flags);
1262}
1263
1264static void chv_gpio_irq_mask(struct irq_data *d)
1265{
1266 chv_gpio_irq_mask_unmask(d, true);
1267}
1268
1269static void chv_gpio_irq_unmask(struct irq_data *d)
1270{
1271 chv_gpio_irq_mask_unmask(d, false);
1272}
1273
1274static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
1275{
1276 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1277 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
1278 unsigned offset = irqd_to_hwirq(d);
1279 int pin = chv_gpio_offset_to_pin(pctrl, offset);
1280 unsigned long flags;
1281 u32 value;
1282
1283 spin_lock_irqsave(&pctrl->lock, flags);
1284
1285 /*
1286 * Pins which can be used as shared interrupt are configured in
1287 * BIOS. Driver trusts BIOS configurations and assigns different
1288 * handler according to the irq type.
1289 *
1290 * Driver needs to save the mapping between each pin and
1291 * its interrupt line.
1292 * 1. If the pin cfg is locked in BIOS:
1293 * Trust BIOS has programmed IntWakeCfg bits correctly,
1294 * driver just needs to save the mapping.
1295 * 2. If the pin cfg is not locked in BIOS:
1296 * Driver programs the IntWakeCfg bits and save the mapping.
1297 */
1298 if (!chv_pad_locked(pctrl, pin)) {
1299 void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
1300
1301 value = readl(reg);
1302 value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
1303 value &= ~CHV_PADCTRL1_INVRXTX_MASK;
1304
1305 if (type & IRQ_TYPE_EDGE_BOTH) {
1306 if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
1307 value |= CHV_PADCTRL1_INTWAKECFG_BOTH;
1308 else if (type & IRQ_TYPE_EDGE_RISING)
1309 value |= CHV_PADCTRL1_INTWAKECFG_RISING;
1310 else if (type & IRQ_TYPE_EDGE_FALLING)
1311 value |= CHV_PADCTRL1_INTWAKECFG_FALLING;
1312 } else if (type & IRQ_TYPE_LEVEL_MASK) {
1313 value |= CHV_PADCTRL1_INTWAKECFG_LEVEL;
1314 if (type & IRQ_TYPE_LEVEL_LOW)
1315 value |= CHV_PADCTRL1_INVRXTX_RXDATA;
1316 }
1317
1318 chv_writel(value, reg);
1319 }
1320
1321 value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
1322 value &= CHV_PADCTRL0_INTSEL_MASK;
1323 value >>= CHV_PADCTRL0_INTSEL_SHIFT;
1324
1325 pctrl->intr_lines[value] = offset;
1326
1327 if (type & IRQ_TYPE_EDGE_BOTH)
1328 __irq_set_handler_locked(d->irq, handle_edge_irq);
1329 else if (type & IRQ_TYPE_LEVEL_MASK)
1330 __irq_set_handler_locked(d->irq, handle_level_irq);
1331
1332 spin_unlock_irqrestore(&pctrl->lock, flags);
1333
1334 return 0;
1335}
1336
1337static struct irq_chip chv_gpio_irqchip = {
1338 .name = "chv-gpio",
1339 .irq_ack = chv_gpio_irq_ack,
1340 .irq_mask = chv_gpio_irq_mask,
1341 .irq_unmask = chv_gpio_irq_unmask,
1342 .irq_set_type = chv_gpio_irq_type,
1343 .flags = IRQCHIP_SKIP_SET_WAKE,
1344};
1345
1346static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
1347{
1348 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
1349 struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
1350 struct irq_chip *chip = irq_get_chip(irq);
1351 unsigned long pending;
1352 u32 intr_line;
1353
1354 chained_irq_enter(chip, desc);
1355
1356 pending = readl(pctrl->regs + CHV_INTSTAT);
1357 for_each_set_bit(intr_line, &pending, 16) {
1358 unsigned irq, offset;
1359
1360 offset = pctrl->intr_lines[intr_line];
1361 irq = irq_find_mapping(gc->irqdomain, offset);
1362 generic_handle_irq(irq);
1363 }
1364
1365 chained_irq_exit(chip, desc);
1366}
1367
1368static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1369{
1370 const struct chv_gpio_pinrange *range;
1371 struct gpio_chip *chip = &pctrl->chip;
1372 int ret, i, offset;
1373
1374 *chip = chv_gpio_chip;
1375
1376 chip->ngpio = pctrl->community->ngpios;
1377 chip->label = dev_name(pctrl->dev);
1378 chip->dev = pctrl->dev;
1379 chip->base = -1;
1380
1381 ret = gpiochip_add(chip);
1382 if (ret) {
1383 dev_err(pctrl->dev, "Failed to register gpiochip\n");
1384 return ret;
1385 }
1386
1387 for (i = 0, offset = 0; i < pctrl->community->ngpio_ranges; i++) {
1388 range = &pctrl->community->gpio_ranges[i];
1389 ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), offset,
1390 range->base, range->npins);
1391 if (ret) {
1392 dev_err(pctrl->dev, "failed to add GPIO pin range\n");
1393 goto fail;
1394 }
1395
1396 offset += range->npins;
1397 }
1398
1399 /* Mask and clear all interrupts */
1400 chv_writel(0, pctrl->regs + CHV_INTMASK);
1401 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1402
1403 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
1404 handle_simple_irq, IRQ_TYPE_NONE);
1405 if (ret) {
1406 dev_err(pctrl->dev, "failed to add IRQ chip\n");
1407 goto fail;
1408 }
1409
1410 gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
1411 chv_gpio_irq_handler);
1412 return 0;
1413
1414fail:
1415 gpiochip_remove(chip);
1416
1417 return ret;
1418}
1419
1420static int chv_pinctrl_probe(struct platform_device *pdev)
1421{
1422 struct chv_pinctrl *pctrl;
1423 struct acpi_device *adev;
1424 struct resource *res;
1425 int ret, irq, i;
1426
1427 adev = ACPI_COMPANION(&pdev->dev);
1428 if (!adev)
1429 return -ENODEV;
1430
1431 pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
1432 if (!pctrl)
1433 return -ENOMEM;
1434
1435 for (i = 0; i < ARRAY_SIZE(chv_communities); i++)
1436 if (!strcmp(adev->pnp.unique_id, chv_communities[i]->uid)) {
1437 pctrl->community = chv_communities[i];
1438 break;
1439 }
1440 if (i == ARRAY_SIZE(chv_communities))
1441 return -ENODEV;
1442
1443 spin_lock_init(&pctrl->lock);
1444 pctrl->dev = &pdev->dev;
1445
1446 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1447 pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
1448 if (IS_ERR(pctrl->regs))
1449 return PTR_ERR(pctrl->regs);
1450
1451 irq = platform_get_irq(pdev, 0);
1452 if (irq < 0) {
1453 dev_err(&pdev->dev, "failed to get interrupt number\n");
1454 return irq;
1455 }
1456
1457 pctrl->pctldesc = chv_pinctrl_desc;
1458 pctrl->pctldesc.name = dev_name(&pdev->dev);
1459 pctrl->pctldesc.pins = pctrl->community->pins;
1460 pctrl->pctldesc.npins = pctrl->community->npins;
1461
1462 pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
1463 if (!pctrl->pctldev) {
1464 dev_err(&pdev->dev, "failed to register pinctrl driver\n");
1465 return -ENODEV;
1466 }
1467
1468 ret = chv_gpio_probe(pctrl, irq);
1469 if (ret) {
1470 pinctrl_unregister(pctrl->pctldev);
1471 return ret;
1472 }
1473
1474 platform_set_drvdata(pdev, pctrl);
1475
1476 return 0;
1477}
1478
1479static int chv_pinctrl_remove(struct platform_device *pdev)
1480{
1481 struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
1482
1483 gpiochip_remove(&pctrl->chip);
1484 pinctrl_unregister(pctrl->pctldev);
1485
1486 return 0;
1487}
1488
1489static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
1490 { "INT33FF" },
1491 { }
1492};
1493MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match);
1494
1495static struct platform_driver chv_pinctrl_driver = {
1496 .probe = chv_pinctrl_probe,
1497 .remove = chv_pinctrl_remove,
1498 .driver = {
1499 .name = "cherryview-pinctrl",
1500 .owner = THIS_MODULE,
1501 .acpi_match_table = chv_pinctrl_acpi_match,
1502 },
1503};
1504
1505static int __init chv_pinctrl_init(void)
1506{
1507 return platform_driver_register(&chv_pinctrl_driver);
1508}
1509subsys_initcall(chv_pinctrl_init);
1510
1511static void __exit chv_pinctrl_exit(void)
1512{
1513 platform_driver_unregister(&chv_pinctrl_driver);
1514}
1515module_exit(chv_pinctrl_exit);
1516
1517MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1518MODULE_DESCRIPTION("Intel Cherryview/Braswell pinctrl driver");
1519MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 228972827132..e1087c75e4f4 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -891,14 +891,13 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
891 const char *function = NULL; 891 const char *function = NULL;
892 unsigned long *configs; 892 unsigned long *configs;
893 unsigned int nconfigs = 0; 893 unsigned int nconfigs = 0;
894 bool has_config = 0;
895 struct property *prop; 894 struct property *prop;
896 const char *group, *gpio_name;
897 struct device_node *np_config;
898 895
899 ret = of_property_read_string(np, "ste,function", &function); 896 ret = of_property_read_string(np, "function", &function);
900 if (ret >= 0) { 897 if (ret >= 0) {
901 ret = of_property_count_strings(np, "ste,pins"); 898 const char *group;
899
900 ret = of_property_count_strings(np, "groups");
902 if (ret < 0) 901 if (ret < 0)
903 goto exit; 902 goto exit;
904 903
@@ -907,7 +906,7 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
907 if (ret < 0) 906 if (ret < 0)
908 goto exit; 907 goto exit;
909 908
910 of_property_for_each_string(np, "ste,pins", prop, group) { 909 of_property_for_each_string(np, "groups", prop, group) {
911 ret = abx500_dt_add_map_mux(map, reserved_maps, 910 ret = abx500_dt_add_map_mux(map, reserved_maps,
912 num_maps, group, function); 911 num_maps, group, function);
913 if (ret < 0) 912 if (ret < 0)
@@ -916,18 +915,11 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
916 } 915 }
917 916
918 ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs); 917 ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs);
919 if (nconfigs) 918 if (nconfigs) {
920 has_config = 1; 919 const char *gpio_name;
921 np_config = of_parse_phandle(np, "ste,config", 0); 920 const char *pin;
922 if (np_config) { 921
923 ret = pinconf_generic_parse_dt_config(np_config, &configs, 922 ret = of_property_count_strings(np, "pins");
924 &nconfigs);
925 if (ret)
926 goto exit;
927 has_config |= nconfigs;
928 }
929 if (has_config) {
930 ret = of_property_count_strings(np, "ste,pins");
931 if (ret < 0) 923 if (ret < 0)
932 goto exit; 924 goto exit;
933 925
@@ -937,8 +929,8 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev,
937 if (ret < 0) 929 if (ret < 0)
938 goto exit; 930 goto exit;
939 931
940 of_property_for_each_string(np, "ste,pins", prop, group) { 932 of_property_for_each_string(np, "pins", prop, pin) {
941 gpio_name = abx500_find_pin_name(pctldev, group); 933 gpio_name = abx500_find_pin_name(pctldev, pin);
942 934
943 ret = abx500_dt_add_map_configs(map, reserved_maps, 935 ret = abx500_dt_add_map_configs(map, reserved_maps,
944 num_maps, gpio_name, configs, 1); 936 num_maps, gpio_name, configs, 1);
@@ -1112,6 +1104,7 @@ out:
1112static const struct pinconf_ops abx500_pinconf_ops = { 1104static const struct pinconf_ops abx500_pinconf_ops = {
1113 .pin_config_get = abx500_pin_config_get, 1105 .pin_config_get = abx500_pin_config_get,
1114 .pin_config_set = abx500_pin_config_set, 1106 .pin_config_set = abx500_pin_config_set,
1107 .is_generic = true,
1115}; 1108};
1116 1109
1117static struct pinctrl_desc abx500_pinctrl_desc = { 1110static struct pinctrl_desc abx500_pinctrl_desc = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index ed39dcafd4f8..2cd71470f270 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -291,6 +291,7 @@ static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
291static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10, 291static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
292 STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12, 292 STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
293 STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 }; 293 STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
294static const unsigned mmcsd_b_1_pins[] = { STN8815_PIN_D15 };
294static const unsigned u1_a_1_pins[] = { STN8815_PIN_M2, STN8815_PIN_L1, 295static const unsigned u1_a_1_pins[] = { STN8815_PIN_M2, STN8815_PIN_L1,
295 STN8815_PIN_F3, STN8815_PIN_F2 }; 296 STN8815_PIN_F3, STN8815_PIN_F2 };
296static const unsigned i2c1_a_1_pins[] = { STN8815_PIN_L4, STN8815_PIN_L3 }; 297static const unsigned i2c1_a_1_pins[] = { STN8815_PIN_L4, STN8815_PIN_L3 };
@@ -305,6 +306,7 @@ static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
305static const struct nmk_pingroup nmk_stn8815_groups[] = { 306static const struct nmk_pingroup nmk_stn8815_groups[] = {
306 STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A), 307 STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
307 STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A), 308 STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
309 STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
308 STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A), 310 STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
309 STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A), 311 STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
310 STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A), 312 STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
@@ -317,7 +319,7 @@ static const struct nmk_pingroup nmk_stn8815_groups[] = {
317static const char * const a##_groups[] = { b }; 319static const char * const a##_groups[] = { b };
318 320
319STN8815_FUNC_GROUPS(u0, "u0_a_1"); 321STN8815_FUNC_GROUPS(u0, "u0_a_1");
320STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1"); 322STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
321STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1"); 323STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
322STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1"); 324STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
323STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1"); 325STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 746db6acf648..ad99ba886e50 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1520,12 +1520,13 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
1520 unsigned long configs = 0; 1520 unsigned long configs = 0;
1521 bool has_config = 0; 1521 bool has_config = 0;
1522 struct property *prop; 1522 struct property *prop;
1523 const char *group, *gpio_name;
1524 struct device_node *np_config; 1523 struct device_node *np_config;
1525 1524
1526 ret = of_property_read_string(np, "ste,function", &function); 1525 ret = of_property_read_string(np, "function", &function);
1527 if (ret >= 0) { 1526 if (ret >= 0) {
1528 ret = of_property_count_strings(np, "ste,pins"); 1527 const char *group;
1528
1529 ret = of_property_count_strings(np, "groups");
1529 if (ret < 0) 1530 if (ret < 0)
1530 goto exit; 1531 goto exit;
1531 1532
@@ -1535,7 +1536,7 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
1535 if (ret < 0) 1536 if (ret < 0)
1536 goto exit; 1537 goto exit;
1537 1538
1538 of_property_for_each_string(np, "ste,pins", prop, group) { 1539 of_property_for_each_string(np, "groups", prop, group) {
1539 ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps, 1540 ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
1540 group, function); 1541 group, function);
1541 if (ret < 0) 1542 if (ret < 0)
@@ -1548,7 +1549,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
1548 if (np_config) 1549 if (np_config)
1549 has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); 1550 has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
1550 if (has_config) { 1551 if (has_config) {
1551 ret = of_property_count_strings(np, "ste,pins"); 1552 const char *gpio_name;
1553 const char *pin;
1554
1555 ret = of_property_count_strings(np, "pins");
1552 if (ret < 0) 1556 if (ret < 0)
1553 goto exit; 1557 goto exit;
1554 ret = pinctrl_utils_reserve_map(pctldev, map, 1558 ret = pinctrl_utils_reserve_map(pctldev, map,
@@ -1557,8 +1561,8 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
1557 if (ret < 0) 1561 if (ret < 0)
1558 goto exit; 1562 goto exit;
1559 1563
1560 of_property_for_each_string(np, "ste,pins", prop, group) { 1564 of_property_for_each_string(np, "pins", prop, pin) {
1561 gpio_name = nmk_find_pin_name(pctldev, group); 1565 gpio_name = nmk_find_pin_name(pctldev, pin);
1562 1566
1563 ret = nmk_dt_add_map_configs(map, reserved_maps, 1567 ret = nmk_dt_add_map_configs(map, reserved_maps,
1564 num_maps, 1568 num_maps,
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 29ff77f90fcb..f78b416d7984 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -32,30 +32,32 @@ struct pin_config_item {
32 const enum pin_config_param param; 32 const enum pin_config_param param;
33 const char * const display; 33 const char * const display;
34 const char * const format; 34 const char * const format;
35 bool has_arg;
35}; 36};
36 37
37#define PCONFDUMP(a, b, c) { .param = a, .display = b, .format = c } 38#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
39 .has_arg = d }
38 40
39static struct pin_config_item conf_items[] = { 41static const struct pin_config_item conf_items[] = {
40 PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL), 42 PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
41 PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL), 43 PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
42 PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL), 44 PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
43 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL), 45 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
44 PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL), 46 PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
45 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 47 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
46 "input bias pull to pin specific state", NULL), 48 "input bias pull to pin specific state", NULL, false),
47 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL), 49 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
48 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL), 50 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
49 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL), 51 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
50 PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA"), 52 PCONFDUMP(PIN_CONFIG_DRIVE_STRENGTH, "output drive strength", "mA", true),
51 PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL), 53 PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
52 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL), 54 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
53 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL), 55 PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
54 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec"), 56 PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
55 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"), 57 PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
56 PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL), 58 PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
57 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"), 59 PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
58 PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level"), 60 PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
59}; 61};
60 62
61void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, 63void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
@@ -85,11 +87,14 @@ void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
85 seq_puts(s, " "); 87 seq_puts(s, " ");
86 seq_puts(s, conf_items[i].display); 88 seq_puts(s, conf_items[i].display);
87 /* Print unit if available */ 89 /* Print unit if available */
88 if (conf_items[i].format && 90 if (conf_items[i].has_arg) {
89 pinconf_to_config_argument(config) != 0) 91 seq_printf(s, " (%u",
90 seq_printf(s, " (%u %s)", 92 pinconf_to_config_argument(config));
91 pinconf_to_config_argument(config), 93 if (conf_items[i].format)
92 conf_items[i].format); 94 seq_printf(s, " %s)", conf_items[i].format);
95 else
96 seq_puts(s, ")");
97 }
93 } 98 }
94} 99}
95 100
@@ -121,10 +126,14 @@ void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
121 seq_puts(s, " "); 126 seq_puts(s, " ");
122 seq_puts(s, conf_items[i].display); 127 seq_puts(s, conf_items[i].display);
123 /* Print unit if available */ 128 /* Print unit if available */
124 if (conf_items[i].format && config != 0) 129 if (conf_items[i].has_arg) {
125 seq_printf(s, " (%u %s)", 130 seq_printf(s, " (%u",
126 pinconf_to_config_argument(config), 131 pinconf_to_config_argument(config));
127 conf_items[i].format); 132 if (conf_items[i].format)
133 seq_printf(s, " %s)", conf_items[i].format);
134 else
135 seq_puts(s, ")");
136 }
128 } 137 }
129} 138}
130 139
@@ -150,7 +159,7 @@ struct pinconf_generic_dt_params {
150 u32 default_value; 159 u32 default_value;
151}; 160};
152 161
153static struct pinconf_generic_dt_params dt_params[] = { 162static const struct pinconf_generic_dt_params dt_params[] = {
154 { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 }, 163 { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 },
155 { "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 }, 164 { "bias-high-impedance", PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0 },
156 { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 }, 165 { "bias-bus-hold", PIN_CONFIG_BIAS_BUS_HOLD, 0 },
@@ -200,7 +209,7 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
200 return -ENOMEM; 209 return -ENOMEM;
201 210
202 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 211 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
203 struct pinconf_generic_dt_params *par = &dt_params[i]; 212 const struct pinconf_generic_dt_params *par = &dt_params[i];
204 ret = of_property_read_u32(np, par->property, &val); 213 ret = of_property_read_u32(np, par->property, &val);
205 214
206 /* property not found */ 215 /* property not found */
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 354a81d40925..66db9849aca8 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -25,9 +25,7 @@
25/* Since we request GPIOs from ourself */ 25/* Since we request GPIOs from ourself */
26#include <linux/pinctrl/consumer.h> 26#include <linux/pinctrl/consumer.h>
27 27
28#include <mach/hardware.h> 28#include "pinctrl-at91.h"
29#include <mach/at91_pio.h>
30
31#include "core.h" 29#include "core.h"
32 30
33#define MAX_GPIO_BANKS 5 31#define MAX_GPIO_BANKS 5
@@ -1344,7 +1342,6 @@ static void at91_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
1344 for (i = 0; i < chip->ngpio; i++) { 1342 for (i = 0; i < chip->ngpio; i++) {
1345 unsigned mask = pin_to_mask(i); 1343 unsigned mask = pin_to_mask(i);
1346 const char *gpio_label; 1344 const char *gpio_label;
1347 u32 pdsr;
1348 1345
1349 gpio_label = gpiochip_is_requested(chip, i); 1346 gpio_label = gpiochip_is_requested(chip, i);
1350 if (!gpio_label) 1347 if (!gpio_label)
@@ -1353,11 +1350,13 @@ static void at91_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
1353 seq_printf(s, "[%s] GPIO%s%d: ", 1350 seq_printf(s, "[%s] GPIO%s%d: ",
1354 gpio_label, chip->label, i); 1351 gpio_label, chip->label, i);
1355 if (mode == AT91_MUX_GPIO) { 1352 if (mode == AT91_MUX_GPIO) {
1356 pdsr = readl_relaxed(pio + PIO_PDSR); 1353 seq_printf(s, "[gpio] ");
1357 1354 seq_printf(s, "%s ",
1358 seq_printf(s, "[gpio] %s\n", 1355 readl_relaxed(pio + PIO_OSR) & mask ?
1359 pdsr & mask ? 1356 "output" : "input");
1360 "set" : "clear"); 1357 seq_printf(s, "%s\n",
1358 readl_relaxed(pio + PIO_PDSR) & mask ?
1359 "set" : "clear");
1361 } else { 1360 } else {
1362 seq_printf(s, "[periph %c]\n", 1361 seq_printf(s, "[periph %c]\n",
1363 mode + 'A' - 1); 1362 mode + 'A' - 1);
diff --git a/drivers/pinctrl/pinctrl-at91.h b/drivers/pinctrl/pinctrl-at91.h
new file mode 100644
index 000000000000..79b957f1dfa2
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-at91.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2005 Ivan Kokshaysky
3 * Copyright (C) SAN People
4 *
5 * Parallel I/O Controller (PIO) - System peripherals registers.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef __PINCTRL_AT91_H
14#define __PINCTRL_AT91_H
15
16#define PIO_PER 0x00 /* Enable Register */
17#define PIO_PDR 0x04 /* Disable Register */
18#define PIO_PSR 0x08 /* Status Register */
19#define PIO_OER 0x10 /* Output Enable Register */
20#define PIO_ODR 0x14 /* Output Disable Register */
21#define PIO_OSR 0x18 /* Output Status Register */
22#define PIO_IFER 0x20 /* Glitch Input Filter Enable */
23#define PIO_IFDR 0x24 /* Glitch Input Filter Disable */
24#define PIO_IFSR 0x28 /* Glitch Input Filter Status */
25#define PIO_SODR 0x30 /* Set Output Data Register */
26#define PIO_CODR 0x34 /* Clear Output Data Register */
27#define PIO_ODSR 0x38 /* Output Data Status Register */
28#define PIO_PDSR 0x3c /* Pin Data Status Register */
29#define PIO_IER 0x40 /* Interrupt Enable Register */
30#define PIO_IDR 0x44 /* Interrupt Disable Register */
31#define PIO_IMR 0x48 /* Interrupt Mask Register */
32#define PIO_ISR 0x4c /* Interrupt Status Register */
33#define PIO_MDER 0x50 /* Multi-driver Enable Register */
34#define PIO_MDDR 0x54 /* Multi-driver Disable Register */
35#define PIO_MDSR 0x58 /* Multi-driver Status Register */
36#define PIO_PUDR 0x60 /* Pull-up Disable Register */
37#define PIO_PUER 0x64 /* Pull-up Enable Register */
38#define PIO_PUSR 0x68 /* Pull-up Status Register */
39#define PIO_ASR 0x70 /* Peripheral A Select Register */
40#define PIO_ABCDSR1 0x70 /* Peripheral ABCD Select Register 1 [some sam9 only] */
41#define PIO_BSR 0x74 /* Peripheral B Select Register */
42#define PIO_ABCDSR2 0x74 /* Peripheral ABCD Select Register 2 [some sam9 only] */
43#define PIO_ABSR 0x78 /* AB Status Register */
44#define PIO_IFSCDR 0x80 /* Input Filter Slow Clock Disable Register */
45#define PIO_IFSCER 0x84 /* Input Filter Slow Clock Enable Register */
46#define PIO_IFSCSR 0x88 /* Input Filter Slow Clock Status Register */
47#define PIO_SCDR 0x8c /* Slow Clock Divider Debouncing Register */
48#define PIO_SCDR_DIV (0x3fff << 0) /* Slow Clock Divider Mask */
49#define PIO_PPDDR 0x90 /* Pad Pull-down Disable Register */
50#define PIO_PPDER 0x94 /* Pad Pull-down Enable Register */
51#define PIO_PPDSR 0x98 /* Pad Pull-down Status Register */
52#define PIO_OWER 0xa0 /* Output Write Enable Register */
53#define PIO_OWDR 0xa4 /* Output Write Disable Register */
54#define PIO_OWSR 0xa8 /* Output Write Status Register */
55#define PIO_AIMER 0xb0 /* Additional Interrupt Modes Enable Register */
56#define PIO_AIMDR 0xb4 /* Additional Interrupt Modes Disable Register */
57#define PIO_AIMMR 0xb8 /* Additional Interrupt Modes Mask Register */
58#define PIO_ESR 0xc0 /* Edge Select Register */
59#define PIO_LSR 0xc4 /* Level Select Register */
60#define PIO_ELSR 0xc8 /* Edge/Level Status Register */
61#define PIO_FELLSR 0xd0 /* Falling Edge/Low Level Select Register */
62#define PIO_REHLSR 0xd4 /* Rising Edge/ High Level Select Register */
63#define PIO_FRLHSR 0xd8 /* Fall/Rise - Low/High Status Register */
64#define PIO_SCHMITT 0x100 /* Schmitt Trigger Register */
65
66#define SAMA5D3_PIO_DRIVER1 0x118 /*PIO Driver 1 register offset*/
67#define SAMA5D3_PIO_DRIVER2 0x11C /*PIO Driver 2 register offset*/
68
69#define AT91SAM9X5_PIO_DRIVER1 0x114 /*PIO Driver 1 register offset*/
70#define AT91SAM9X5_PIO_DRIVER2 0x118 /*PIO Driver 2 register offset*/
71
72#endif
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
index a26e0c2ba33e..2b25047fef8d 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -1404,11 +1404,6 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
1404 1404
1405 /* So far We can assume there is only 1 bank of registers */ 1405 /* So far We can assume there is only 1 bank of registers */
1406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1407 if (!res) {
1408 dev_err(&pdev->dev, "Missing MEM resource\n");
1409 return -ENODEV;
1410 }
1411
1412 pdata->reg_base = devm_ioremap_resource(&pdev->dev, res); 1407 pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
1413 if (IS_ERR(pdata->reg_base)) { 1408 if (IS_ERR(pdata->reg_base)) {
1414 dev_err(&pdev->dev, "Failed to ioremap MEM resource\n"); 1409 dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 016f4578e494..40970c305dd0 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -856,27 +856,22 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
856 * leads to this function call (via the pinctrl_gpio_direction_{input|output}() 856 * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
857 * function called from the gpiolib interface). 857 * function called from the gpiolib interface).
858 */ 858 */
859static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, 859static int _rockchip_pmx_gpio_set_direction(struct gpio_chip *chip,
860 struct pinctrl_gpio_range *range, 860 int pin, bool input)
861 unsigned offset, bool input)
862{ 861{
863 struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
864 struct rockchip_pin_bank *bank; 862 struct rockchip_pin_bank *bank;
865 struct gpio_chip *chip; 863 int ret;
866 int pin, ret; 864 unsigned long flags;
867 u32 data; 865 u32 data;
868 866
869 chip = range->gc;
870 bank = gc_to_pin_bank(chip); 867 bank = gc_to_pin_bank(chip);
871 pin = offset - chip->base;
872
873 dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
874 offset, range->name, pin, input ? "input" : "output");
875 868
876 ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO); 869 ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
877 if (ret < 0) 870 if (ret < 0)
878 return ret; 871 return ret;
879 872
873 spin_lock_irqsave(&bank->slock, flags);
874
880 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); 875 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
881 /* set bit to 1 for output, 0 for input */ 876 /* set bit to 1 for output, 0 for input */
882 if (!input) 877 if (!input)
@@ -885,9 +880,28 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
885 data &= ~BIT(pin); 880 data &= ~BIT(pin);
886 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR); 881 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
887 882
883 spin_unlock_irqrestore(&bank->slock, flags);
884
888 return 0; 885 return 0;
889} 886}
890 887
888static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
889 struct pinctrl_gpio_range *range,
890 unsigned offset, bool input)
891{
892 struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
893 struct gpio_chip *chip;
894 int pin;
895
896 chip = range->gc;
897 pin = offset - chip->base;
898 dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
899 offset, range->name, pin, input ? "input" : "output");
900
901 return _rockchip_pmx_gpio_set_direction(chip, offset - chip->base,
902 input);
903}
904
891static const struct pinmux_ops rockchip_pmx_ops = { 905static const struct pinmux_ops rockchip_pmx_ops = {
892 .get_functions_count = rockchip_pmx_get_funcs_count, 906 .get_functions_count = rockchip_pmx_get_funcs_count,
893 .get_function_name = rockchip_pmx_get_func_name, 907 .get_function_name = rockchip_pmx_get_func_name,
@@ -917,8 +931,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
917 return false; 931 return false;
918} 932}
919 933
920static int rockchip_gpio_direction_output(struct gpio_chip *gc, 934static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value);
921 unsigned offset, int value);
922static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset); 935static int rockchip_gpio_get(struct gpio_chip *gc, unsigned offset);
923 936
924/* set the pin config settings for a specified pin */ 937/* set the pin config settings for a specified pin */
@@ -959,9 +972,10 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
959 return rc; 972 return rc;
960 break; 973 break;
961 case PIN_CONFIG_OUTPUT: 974 case PIN_CONFIG_OUTPUT:
962 rc = rockchip_gpio_direction_output(&bank->gpio_chip, 975 rockchip_gpio_set(&bank->gpio_chip,
963 pin - bank->pin_base, 976 pin - bank->pin_base, arg);
964 arg); 977 rc = _rockchip_pmx_gpio_set_direction(&bank->gpio_chip,
978 pin - bank->pin_base, false);
965 if (rc) 979 if (rc)
966 return rc; 980 return rc;
967 break; 981 break;
@@ -1253,6 +1267,10 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
1253 } 1267 }
1254 } 1268 }
1255 1269
1270 ret = rockchip_pinctrl_parse_dt(pdev, info);
1271 if (ret)
1272 return ret;
1273
1256 info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info); 1274 info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info);
1257 if (!info->pctl_dev) { 1275 if (!info->pctl_dev) {
1258 dev_err(&pdev->dev, "could not register pinctrl driver\n"); 1276 dev_err(&pdev->dev, "could not register pinctrl driver\n");
@@ -1270,12 +1288,6 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
1270 pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange); 1288 pinctrl_add_gpio_range(info->pctl_dev, &pin_bank->grange);
1271 } 1289 }
1272 1290
1273 ret = rockchip_pinctrl_parse_dt(pdev, info);
1274 if (ret) {
1275 pinctrl_unregister(info->pctl_dev);
1276 return ret;
1277 }
1278
1279 return 0; 1291 return 0;
1280} 1292}
1281 1293
@@ -1387,6 +1399,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
1387 u32 polarity = 0, data = 0; 1399 u32 polarity = 0, data = 0;
1388 u32 pend; 1400 u32 pend;
1389 bool edge_changed = false; 1401 bool edge_changed = false;
1402 unsigned long flags;
1390 1403
1391 dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); 1404 dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
1392 1405
@@ -1432,10 +1445,14 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
1432 1445
1433 if (bank->toggle_edge_mode && edge_changed) { 1446 if (bank->toggle_edge_mode && edge_changed) {
1434 /* Interrupt params should only be set with ints disabled */ 1447 /* Interrupt params should only be set with ints disabled */
1448 spin_lock_irqsave(&bank->slock, flags);
1449
1435 data = readl_relaxed(bank->reg_base + GPIO_INTEN); 1450 data = readl_relaxed(bank->reg_base + GPIO_INTEN);
1436 writel_relaxed(0, bank->reg_base + GPIO_INTEN); 1451 writel_relaxed(0, bank->reg_base + GPIO_INTEN);
1437 writel(polarity, bank->reg_base + GPIO_INT_POLARITY); 1452 writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
1438 writel(data, bank->reg_base + GPIO_INTEN); 1453 writel(data, bank->reg_base + GPIO_INTEN);
1454
1455 spin_unlock_irqrestore(&bank->slock, flags);
1439 } 1456 }
1440 1457
1441 chained_irq_exit(chip, desc); 1458 chained_irq_exit(chip, desc);
@@ -1449,6 +1466,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1449 u32 polarity; 1466 u32 polarity;
1450 u32 level; 1467 u32 level;
1451 u32 data; 1468 u32 data;
1469 unsigned long flags;
1452 int ret; 1470 int ret;
1453 1471
1454 /* make sure the pin is configured as gpio input */ 1472 /* make sure the pin is configured as gpio input */
@@ -1456,15 +1474,20 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1456 if (ret < 0) 1474 if (ret < 0)
1457 return ret; 1475 return ret;
1458 1476
1477 spin_lock_irqsave(&bank->slock, flags);
1478
1459 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); 1479 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
1460 data &= ~mask; 1480 data &= ~mask;
1461 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR); 1481 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
1462 1482
1483 spin_unlock_irqrestore(&bank->slock, flags);
1484
1463 if (type & IRQ_TYPE_EDGE_BOTH) 1485 if (type & IRQ_TYPE_EDGE_BOTH)
1464 __irq_set_handler_locked(d->irq, handle_edge_irq); 1486 __irq_set_handler_locked(d->irq, handle_edge_irq);
1465 else 1487 else
1466 __irq_set_handler_locked(d->irq, handle_level_irq); 1488 __irq_set_handler_locked(d->irq, handle_level_irq);
1467 1489
1490 spin_lock_irqsave(&bank->slock, flags);
1468 irq_gc_lock(gc); 1491 irq_gc_lock(gc);
1469 1492
1470 level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL); 1493 level = readl_relaxed(gc->reg_base + GPIO_INTTYPE_LEVEL);
@@ -1507,6 +1530,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1507 break; 1530 break;
1508 default: 1531 default:
1509 irq_gc_unlock(gc); 1532 irq_gc_unlock(gc);
1533 spin_unlock_irqrestore(&bank->slock, flags);
1510 return -EINVAL; 1534 return -EINVAL;
1511 } 1535 }
1512 1536
@@ -1514,6 +1538,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1514 writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY); 1538 writel_relaxed(polarity, gc->reg_base + GPIO_INT_POLARITY);
1515 1539
1516 irq_gc_unlock(gc); 1540 irq_gc_unlock(gc);
1541 spin_unlock_irqrestore(&bank->slock, flags);
1517 1542
1518 return 0; 1543 return 0;
1519} 1544}
@@ -1563,6 +1588,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
1563 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 1588 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
1564 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 1589 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
1565 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 1590 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
1591 gc->wake_enabled = IRQ_MSK(bank->nr_pins);
1566 1592
1567 irq_set_handler_data(bank->irq, bank); 1593 irq_set_handler_data(bank->irq, bank);
1568 irq_set_chained_handler(bank->irq, rockchip_irq_demux); 1594 irq_set_chained_handler(bank->irq, rockchip_irq_demux);
@@ -1770,6 +1796,51 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
1770 return ctrl; 1796 return ctrl;
1771} 1797}
1772 1798
1799#define RK3288_GRF_GPIO6C_IOMUX 0x64
1800#define GPIO6C6_SEL_WRITE_ENABLE BIT(28)
1801
1802static u32 rk3288_grf_gpio6c_iomux;
1803
1804static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
1805{
1806 struct rockchip_pinctrl *info = dev_get_drvdata(dev);
1807 int ret = pinctrl_force_sleep(info->pctl_dev);
1808
1809 if (ret)
1810 return ret;
1811
1812 /*
1813 * RK3288 GPIO6_C6 mux would be modified by Maskrom when resume, so save
1814 * the setting here, and restore it at resume.
1815 */
1816 if (info->ctrl->type == RK3288) {
1817 ret = regmap_read(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
1818 &rk3288_grf_gpio6c_iomux);
1819 if (ret) {
1820 pinctrl_force_default(info->pctl_dev);
1821 return ret;
1822 }
1823 }
1824
1825 return 0;
1826}
1827
1828static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
1829{
1830 struct rockchip_pinctrl *info = dev_get_drvdata(dev);
1831 int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
1832 rk3288_grf_gpio6c_iomux |
1833 GPIO6C6_SEL_WRITE_ENABLE);
1834
1835 if (ret)
1836 return ret;
1837
1838 return pinctrl_force_default(info->pctl_dev);
1839}
1840
1841static SIMPLE_DEV_PM_OPS(rockchip_pinctrl_dev_pm_ops, rockchip_pinctrl_suspend,
1842 rockchip_pinctrl_resume);
1843
1773static int rockchip_pinctrl_probe(struct platform_device *pdev) 1844static int rockchip_pinctrl_probe(struct platform_device *pdev)
1774{ 1845{
1775 struct rockchip_pinctrl *info; 1846 struct rockchip_pinctrl *info;
@@ -1983,6 +2054,7 @@ static struct platform_driver rockchip_pinctrl_driver = {
1983 .driver = { 2054 .driver = {
1984 .name = "rockchip-pinctrl", 2055 .name = "rockchip-pinctrl",
1985 .owner = THIS_MODULE, 2056 .owner = THIS_MODULE,
2057 .pm = &rockchip_pinctrl_dev_pm_ops,
1986 .of_match_table = rockchip_pinctrl_dt_match, 2058 .of_match_table = rockchip_pinctrl_dt_match,
1987 }, 2059 },
1988}; 2060};
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 4b1792aad3d8..caeeb1c65b0f 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
1512 gpio_irq, st_gpio_irq_handler); 1512 gpio_irq, st_gpio_irq_handler);
1513 } 1513 }
1514 1514
1515 if (info->irqmux_base > 0 || gpio_irq > 0) { 1515 if (info->irqmux_base || gpio_irq > 0) {
1516 err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip, 1516 err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
1517 0, handle_simple_irq, 1517 0, handle_simple_irq,
1518 IRQ_TYPE_LEVEL_LOW); 1518 IRQ_TYPE_LEVEL_LOW);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 3b9bfcf717ac..9363563f9777 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -759,7 +759,7 @@ static struct pinctrl_desc tb10x_pindesc = {
759static int tb10x_pinctrl_probe(struct platform_device *pdev) 759static int tb10x_pinctrl_probe(struct platform_device *pdev)
760{ 760{
761 int ret = -EINVAL; 761 int ret = -EINVAL;
762 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 762 struct resource *mem;
763 struct device *dev = &pdev->dev; 763 struct device *dev = &pdev->dev;
764 struct device_node *of_node = dev->of_node; 764 struct device_node *of_node = dev->of_node;
765 struct device_node *child; 765 struct device_node *child;
@@ -771,11 +771,6 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
771 return -EINVAL; 771 return -EINVAL;
772 } 772 }
773 773
774 if (!mem) {
775 dev_err(dev, "No memory resource defined.\n");
776 return -EINVAL;
777 }
778
779 state = devm_kzalloc(dev, sizeof(struct tb10x_pinctrl) + 774 state = devm_kzalloc(dev, sizeof(struct tb10x_pinctrl) +
780 of_get_child_count(of_node) 775 of_get_child_count(of_node)
781 * sizeof(struct tb10x_of_pinfunc), 776 * sizeof(struct tb10x_of_pinfunc),
@@ -787,6 +782,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
787 state->pinfuncs = (struct tb10x_of_pinfunc *)(state + 1); 782 state->pinfuncs = (struct tb10x_of_pinfunc *)(state + 1);
788 mutex_init(&state->mutex); 783 mutex_init(&state->mutex);
789 784
785 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
790 state->base = devm_ioremap_resource(dev, mem); 786 state->base = devm_ioremap_resource(dev, mem);
791 if (IS_ERR(state->base)) { 787 if (IS_ERR(state->base)) {
792 ret = PTR_ERR(state->base); 788 ret = PTR_ERR(state->base);
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
index 1631ec94fb02..080ec7723ef2 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -20,6 +20,7 @@
20#include <linux/pinctrl/pinmux.h> 20#include <linux/pinctrl/pinmux.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/reset.h> 22#include <linux/reset.h>
23#include <linux/slab.h>
23 24
24#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h> 25#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
25 26
@@ -171,7 +172,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
171 if (err == -EINVAL) 172 if (err == -EINVAL)
172 continue; 173 continue;
173 174
174 return err; 175 goto out;
175 } 176 }
176 177
177 config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, value); 178 config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, value);
@@ -179,7 +180,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
179 err = pinctrl_utils_add_config(padctl->pinctrl, &configs, 180 err = pinctrl_utils_add_config(padctl->pinctrl, &configs,
180 &num_configs, config); 181 &num_configs, config);
181 if (err < 0) 182 if (err < 0)
182 return err; 183 goto out;
183 } 184 }
184 185
185 if (function) 186 if (function)
@@ -190,14 +191,14 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
190 191
191 err = of_property_count_strings(np, "nvidia,lanes"); 192 err = of_property_count_strings(np, "nvidia,lanes");
192 if (err < 0) 193 if (err < 0)
193 return err; 194 goto out;
194 195
195 reserve *= err; 196 reserve *= err;
196 197
197 err = pinctrl_utils_reserve_map(padctl->pinctrl, maps, reserved_maps, 198 err = pinctrl_utils_reserve_map(padctl->pinctrl, maps, reserved_maps,
198 num_maps, reserve); 199 num_maps, reserve);
199 if (err < 0) 200 if (err < 0)
200 return err; 201 goto out;
201 202
202 of_property_for_each_string(np, "nvidia,lanes", prop, group) { 203 of_property_for_each_string(np, "nvidia,lanes", prop, group) {
203 if (function) { 204 if (function) {
@@ -205,7 +206,7 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
205 reserved_maps, num_maps, group, 206 reserved_maps, num_maps, group,
206 function); 207 function);
207 if (err < 0) 208 if (err < 0)
208 return err; 209 goto out;
209 } 210 }
210 211
211 if (num_configs) { 212 if (num_configs) {
@@ -214,11 +215,15 @@ static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
214 configs, num_configs, 215 configs, num_configs,
215 PIN_MAP_TYPE_CONFIGS_GROUP); 216 PIN_MAP_TYPE_CONFIGS_GROUP);
216 if (err < 0) 217 if (err < 0)
217 return err; 218 goto out;
218 } 219 }
219 } 220 }
220 221
221 return 0; 222 err = 0;
223
224out:
225 kfree(configs);
226 return err;
222} 227}
223 228
224static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl, 229static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 81275af9638b..3cd243c26b7d 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -47,4 +47,17 @@ config PINCTRL_MSM8X74
47 This is the pinctrl, pinmux, pinconf and gpiolib driver for the 47 This is the pinctrl, pinmux, pinconf and gpiolib driver for the
48 Qualcomm TLMM block found in the Qualcomm 8974 platform. 48 Qualcomm TLMM block found in the Qualcomm 8974 platform.
49 49
50config PINCTRL_QCOM_SPMI_PMIC
51 tristate "Qualcomm SPMI PMIC pin controller driver"
52 depends on GPIOLIB && OF && SPMI
53 select REGMAP_SPMI
54 select PINMUX
55 select PINCONF
56 select GENERIC_PINCONF
57 help
58 This is the pinctrl, pinmux, pinconf and gpiolib driver for the
59 Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
60 which are using SPMI for communication with SoC. Example PMIC's
61 devices are pm8841, pm8941 and pma8084.
62
50endif 63endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index ba8519fcd8d3..bfd79af5f982 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
5obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o 5obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
6obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o 6obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
7obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o 7obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
8obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
9obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
new file mode 100644
index 000000000000..b863b5080890
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -0,0 +1,933 @@
1/*
2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/gpio.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/pinctrl/pinconf-generic.h>
18#include <linux/pinctrl/pinconf.h>
19#include <linux/pinctrl/pinmux.h>
20#include <linux/platform_device.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23#include <linux/types.h>
24
25#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
26
27#include "../core.h"
28#include "../pinctrl-utils.h"
29
30#define PMIC_GPIO_ADDRESS_RANGE 0x100
31
32/* type and subtype registers base address offsets */
33#define PMIC_GPIO_REG_TYPE 0x4
34#define PMIC_GPIO_REG_SUBTYPE 0x5
35
36/* GPIO peripheral type and subtype out_values */
37#define PMIC_GPIO_TYPE 0x10
38#define PMIC_GPIO_SUBTYPE_GPIO_4CH 0x1
39#define PMIC_GPIO_SUBTYPE_GPIOC_4CH 0x5
40#define PMIC_GPIO_SUBTYPE_GPIO_8CH 0x9
41#define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd
42
43#define PMIC_MPP_REG_RT_STS 0x10
44#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
45
46/* control register base address offsets */
47#define PMIC_GPIO_REG_MODE_CTL 0x40
48#define PMIC_GPIO_REG_DIG_VIN_CTL 0x41
49#define PMIC_GPIO_REG_DIG_PULL_CTL 0x42
50#define PMIC_GPIO_REG_DIG_OUT_CTL 0x45
51#define PMIC_GPIO_REG_EN_CTL 0x46
52
53/* PMIC_GPIO_REG_MODE_CTL */
54#define PMIC_GPIO_REG_MODE_VALUE_SHIFT 0x1
55#define PMIC_GPIO_REG_MODE_FUNCTION_SHIFT 1
56#define PMIC_GPIO_REG_MODE_FUNCTION_MASK 0x7
57#define PMIC_GPIO_REG_MODE_DIR_SHIFT 4
58#define PMIC_GPIO_REG_MODE_DIR_MASK 0x7
59
60/* PMIC_GPIO_REG_DIG_VIN_CTL */
61#define PMIC_GPIO_REG_VIN_SHIFT 0
62#define PMIC_GPIO_REG_VIN_MASK 0x7
63
64/* PMIC_GPIO_REG_DIG_PULL_CTL */
65#define PMIC_GPIO_REG_PULL_SHIFT 0
66#define PMIC_GPIO_REG_PULL_MASK 0x7
67
68#define PMIC_GPIO_PULL_DOWN 4
69#define PMIC_GPIO_PULL_DISABLE 5
70
71/* PMIC_GPIO_REG_DIG_OUT_CTL */
72#define PMIC_GPIO_REG_OUT_STRENGTH_SHIFT 0
73#define PMIC_GPIO_REG_OUT_STRENGTH_MASK 0x3
74#define PMIC_GPIO_REG_OUT_TYPE_SHIFT 4
75#define PMIC_GPIO_REG_OUT_TYPE_MASK 0x3
76
77/*
78 * Output type - indicates pin should be configured as push-pull,
79 * open drain or open source.
80 */
81#define PMIC_GPIO_OUT_BUF_CMOS 0
82#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS 1
83#define PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS 2
84
85/* PMIC_GPIO_REG_EN_CTL */
86#define PMIC_GPIO_REG_MASTER_EN_SHIFT 7
87
88#define PMIC_GPIO_PHYSICAL_OFFSET 1
89
90/* Qualcomm specific pin configurations */
91#define PMIC_GPIO_CONF_PULL_UP (PIN_CONFIG_END + 1)
92#define PMIC_GPIO_CONF_STRENGTH (PIN_CONFIG_END + 2)
93
94/**
95 * struct pmic_gpio_pad - keep current GPIO settings
96 * @base: Address base in SPMI device.
97 * @irq: IRQ number which this GPIO generate.
98 * @is_enabled: Set to false when GPIO should be put in high Z state.
99 * @out_value: Cached pin output value
100 * @have_buffer: Set to true if GPIO output could be configured in push-pull,
101 * open-drain or open-source mode.
102 * @output_enabled: Set to true if GPIO output logic is enabled.
103 * @input_enabled: Set to true if GPIO input buffer logic is enabled.
104 * @num_sources: Number of power-sources supported by this GPIO.
105 * @power_source: Current power-source used.
106 * @buffer_type: Push-pull, open-drain or open-source.
107 * @pullup: Constant current which flow trough GPIO output buffer.
108 * @strength: No, Low, Medium, High
109 * @function: See pmic_gpio_functions[]
110 */
111struct pmic_gpio_pad {
112 u16 base;
113 int irq;
114 bool is_enabled;
115 bool out_value;
116 bool have_buffer;
117 bool output_enabled;
118 bool input_enabled;
119 unsigned int num_sources;
120 unsigned int power_source;
121 unsigned int buffer_type;
122 unsigned int pullup;
123 unsigned int strength;
124 unsigned int function;
125};
126
127struct pmic_gpio_state {
128 struct device *dev;
129 struct regmap *map;
130 struct pinctrl_dev *ctrl;
131 struct gpio_chip chip;
132};
133
134struct pmic_gpio_bindings {
135 const char *property;
136 unsigned param;
137};
138
139static struct pmic_gpio_bindings pmic_gpio_bindings[] = {
140 {"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP},
141 {"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH},
142};
143
144static const char *const pmic_gpio_groups[] = {
145 "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
146 "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
147 "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
148 "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
149 "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
150};
151
152static const char *const pmic_gpio_functions[] = {
153 PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
154 PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
155 PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
156 PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
157};
158
159static inline struct pmic_gpio_state *to_gpio_state(struct gpio_chip *chip)
160{
161 return container_of(chip, struct pmic_gpio_state, chip);
162};
163
164static int pmic_gpio_read(struct pmic_gpio_state *state,
165 struct pmic_gpio_pad *pad, unsigned int addr)
166{
167 unsigned int val;
168 int ret;
169
170 ret = regmap_read(state->map, pad->base + addr, &val);
171 if (ret < 0)
172 dev_err(state->dev, "read 0x%x failed\n", addr);
173 else
174 ret = val;
175
176 return ret;
177}
178
179static int pmic_gpio_write(struct pmic_gpio_state *state,
180 struct pmic_gpio_pad *pad, unsigned int addr,
181 unsigned int val)
182{
183 int ret;
184
185 ret = regmap_write(state->map, pad->base + addr, val);
186 if (ret < 0)
187 dev_err(state->dev, "write 0x%x failed\n", addr);
188
189 return ret;
190}
191
192static int pmic_gpio_get_groups_count(struct pinctrl_dev *pctldev)
193{
194 /* Every PIN is a group */
195 return pctldev->desc->npins;
196}
197
198static const char *pmic_gpio_get_group_name(struct pinctrl_dev *pctldev,
199 unsigned pin)
200{
201 return pctldev->desc->pins[pin].name;
202}
203
204static int pmic_gpio_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
205 const unsigned **pins, unsigned *num_pins)
206{
207 *pins = &pctldev->desc->pins[pin].number;
208 *num_pins = 1;
209 return 0;
210}
211
212static int pmic_gpio_parse_dt_config(struct device_node *np,
213 struct pinctrl_dev *pctldev,
214 unsigned long **configs,
215 unsigned int *nconfs)
216{
217 struct pmic_gpio_bindings *par;
218 unsigned long cfg;
219 int ret, i;
220 u32 val;
221
222 for (i = 0; i < ARRAY_SIZE(pmic_gpio_bindings); i++) {
223 par = &pmic_gpio_bindings[i];
224 ret = of_property_read_u32(np, par->property, &val);
225
226 /* property not found */
227 if (ret == -EINVAL)
228 continue;
229
230 /* use zero as default value */
231 if (ret)
232 val = 0;
233
234 dev_dbg(pctldev->dev, "found %s with value %u\n",
235 par->property, val);
236
237 cfg = pinconf_to_config_packed(par->param, val);
238
239 ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
240 if (ret)
241 return ret;
242 }
243
244 return 0;
245}
246
247static int pmic_gpio_dt_subnode_to_map(struct pinctrl_dev *pctldev,
248 struct device_node *np,
249 struct pinctrl_map **map,
250 unsigned *reserv, unsigned *nmaps,
251 enum pinctrl_map_type type)
252{
253 unsigned long *configs = NULL;
254 unsigned nconfs = 0;
255 struct property *prop;
256 const char *group;
257 int ret;
258
259 ret = pmic_gpio_parse_dt_config(np, pctldev, &configs, &nconfs);
260 if (ret < 0)
261 return ret;
262
263 if (!nconfs)
264 return 0;
265
266 ret = of_property_count_strings(np, "pins");
267 if (ret < 0)
268 goto exit;
269
270 ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
271 if (ret < 0)
272 goto exit;
273
274 of_property_for_each_string(np, "pins", prop, group) {
275 ret = pinctrl_utils_add_map_configs(pctldev, map,
276 reserv, nmaps, group,
277 configs, nconfs, type);
278 if (ret < 0)
279 break;
280 }
281exit:
282 kfree(configs);
283 return ret;
284}
285
286static int pmic_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
287 struct device_node *np_config,
288 struct pinctrl_map **map, unsigned *nmaps)
289{
290 enum pinctrl_map_type type;
291 struct device_node *np;
292 unsigned reserv;
293 int ret;
294
295 ret = 0;
296 *map = NULL;
297 *nmaps = 0;
298 reserv = 0;
299 type = PIN_MAP_TYPE_CONFIGS_GROUP;
300
301 for_each_child_of_node(np_config, np) {
302 ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
303 &reserv, nmaps, type);
304 if (ret)
305 break;
306
307 ret = pmic_gpio_dt_subnode_to_map(pctldev, np, map, &reserv,
308 nmaps, type);
309 if (ret)
310 break;
311 }
312
313 if (ret < 0)
314 pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
315
316 return ret;
317}
318
319static const struct pinctrl_ops pmic_gpio_pinctrl_ops = {
320 .get_groups_count = pmic_gpio_get_groups_count,
321 .get_group_name = pmic_gpio_get_group_name,
322 .get_group_pins = pmic_gpio_get_group_pins,
323 .dt_node_to_map = pmic_gpio_dt_node_to_map,
324 .dt_free_map = pinctrl_utils_dt_free_map,
325};
326
327static int pmic_gpio_get_functions_count(struct pinctrl_dev *pctldev)
328{
329 return ARRAY_SIZE(pmic_gpio_functions);
330}
331
332static const char *pmic_gpio_get_function_name(struct pinctrl_dev *pctldev,
333 unsigned function)
334{
335 return pmic_gpio_functions[function];
336}
337
338static int pmic_gpio_get_function_groups(struct pinctrl_dev *pctldev,
339 unsigned function,
340 const char *const **groups,
341 unsigned *const num_qgroups)
342{
343 *groups = pmic_gpio_groups;
344 *num_qgroups = pctldev->desc->npins;
345 return 0;
346}
347
348static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
349 unsigned pin)
350{
351 struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
352 struct pmic_gpio_pad *pad;
353 unsigned int val;
354 int ret;
355
356 pad = pctldev->desc->pins[pin].drv_data;
357
358 pad->function = function;
359
360 val = 0;
361 if (pad->output_enabled) {
362 if (pad->input_enabled)
363 val = 2;
364 else
365 val = 1;
366 }
367
368 val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
369 val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
370
371 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
372 if (ret < 0)
373 return ret;
374
375 val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
376
377 return pmic_gpio_write(state, pad, PMIC_GPIO_REG_EN_CTL, val);
378}
379
380static const struct pinmux_ops pmic_gpio_pinmux_ops = {
381 .get_functions_count = pmic_gpio_get_functions_count,
382 .get_function_name = pmic_gpio_get_function_name,
383 .get_function_groups = pmic_gpio_get_function_groups,
384 .set_mux = pmic_gpio_set_mux,
385};
386
387static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
388 unsigned int pin, unsigned long *config)
389{
390 unsigned param = pinconf_to_config_param(*config);
391 struct pmic_gpio_pad *pad;
392 unsigned arg;
393
394 pad = pctldev->desc->pins[pin].drv_data;
395
396 switch (param) {
397 case PIN_CONFIG_DRIVE_PUSH_PULL:
398 arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
399 break;
400 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
401 arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
402 break;
403 case PIN_CONFIG_DRIVE_OPEN_SOURCE:
404 arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
405 break;
406 case PIN_CONFIG_BIAS_PULL_DOWN:
407 arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
408 break;
409 case PIN_CONFIG_BIAS_DISABLE:
410 arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
411 break;
412 case PIN_CONFIG_BIAS_PULL_UP:
413 arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
414 break;
415 case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
416 arg = !pad->is_enabled;
417 break;
418 case PIN_CONFIG_POWER_SOURCE:
419 arg = pad->power_source;
420 break;
421 case PIN_CONFIG_INPUT_ENABLE:
422 arg = pad->input_enabled;
423 break;
424 case PIN_CONFIG_OUTPUT:
425 arg = pad->out_value;
426 break;
427 case PMIC_GPIO_CONF_PULL_UP:
428 arg = pad->pullup;
429 break;
430 case PMIC_GPIO_CONF_STRENGTH:
431 arg = pad->strength;
432 break;
433 default:
434 return -EINVAL;
435 }
436
437 *config = pinconf_to_config_packed(param, arg);
438 return 0;
439}
440
441static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
442 unsigned long *configs, unsigned nconfs)
443{
444 struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
445 struct pmic_gpio_pad *pad;
446 unsigned param, arg;
447 unsigned int val;
448 int i, ret;
449
450 pad = pctldev->desc->pins[pin].drv_data;
451
452 for (i = 0; i < nconfs; i++) {
453 param = pinconf_to_config_param(configs[i]);
454 arg = pinconf_to_config_argument(configs[i]);
455
456 switch (param) {
457 case PIN_CONFIG_DRIVE_PUSH_PULL:
458 pad->buffer_type = PMIC_GPIO_OUT_BUF_CMOS;
459 break;
460 case PIN_CONFIG_DRIVE_OPEN_DRAIN:
461 if (!pad->have_buffer)
462 return -EINVAL;
463 pad->buffer_type = PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
464 break;
465 case PIN_CONFIG_DRIVE_OPEN_SOURCE:
466 if (!pad->have_buffer)
467 return -EINVAL;
468 pad->buffer_type = PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
469 break;
470 case PIN_CONFIG_BIAS_DISABLE:
471 pad->pullup = PMIC_GPIO_PULL_DISABLE;
472 break;
473 case PIN_CONFIG_BIAS_PULL_UP:
474 pad->pullup = PMIC_GPIO_PULL_UP_30;
475 break;
476 case PIN_CONFIG_BIAS_PULL_DOWN:
477 if (arg)
478 pad->pullup = PMIC_GPIO_PULL_DOWN;
479 else
480 pad->pullup = PMIC_GPIO_PULL_DISABLE;
481 break;
482 case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
483 pad->is_enabled = false;
484 break;
485 case PIN_CONFIG_POWER_SOURCE:
486 if (arg > pad->num_sources)
487 return -EINVAL;
488 pad->power_source = arg;
489 break;
490 case PIN_CONFIG_INPUT_ENABLE:
491 pad->input_enabled = arg ? true : false;
492 break;
493 case PIN_CONFIG_OUTPUT:
494 pad->output_enabled = true;
495 pad->out_value = arg;
496 break;
497 case PMIC_GPIO_CONF_PULL_UP:
498 if (arg > PMIC_GPIO_PULL_UP_1P5_30)
499 return -EINVAL;
500 pad->pullup = arg;
501 break;
502 case PMIC_GPIO_CONF_STRENGTH:
503 if (arg > PMIC_GPIO_STRENGTH_LOW)
504 return -EINVAL;
505 pad->strength = arg;
506 break;
507 default:
508 return -EINVAL;
509 }
510 }
511
512 val = pad->power_source << PMIC_GPIO_REG_VIN_SHIFT;
513
514 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL, val);
515 if (ret < 0)
516 return ret;
517
518 val = pad->pullup << PMIC_GPIO_REG_PULL_SHIFT;
519
520 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_PULL_CTL, val);
521 if (ret < 0)
522 return ret;
523
524 val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
525 val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
526
527 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
528 if (ret < 0)
529 return ret;
530
531 val = 0;
532 if (pad->output_enabled) {
533 if (pad->input_enabled)
534 val = 2;
535 else
536 val = 1;
537 }
538
539 val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
540 val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
541 val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
542
543 return pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
544}
545
546static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
547 struct seq_file *s, unsigned pin)
548{
549 struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
550 struct pmic_gpio_pad *pad;
551 int ret, val;
552
553 static const char *const biases[] = {
554 "pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
555 "pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
556 };
557 static const char *const buffer_types[] = {
558 "push-pull", "open-drain", "open-source"
559 };
560 static const char *const strengths[] = {
561 "no", "high", "medium", "low"
562 };
563
564 pad = pctldev->desc->pins[pin].drv_data;
565
566 seq_printf(s, " gpio%-2d:", pin + PMIC_GPIO_PHYSICAL_OFFSET);
567
568 val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_EN_CTL);
569
570 if (val < 0 || !(val >> PMIC_GPIO_REG_MASTER_EN_SHIFT)) {
571 seq_puts(s, " ---");
572 } else {
573
574 if (!pad->input_enabled) {
575 ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
576 if (!ret) {
577 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
578 pad->out_value = ret;
579 }
580 }
581
582 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
583 seq_printf(s, " %-7s", pmic_gpio_functions[pad->function]);
584 seq_printf(s, " vin-%d", pad->power_source);
585 seq_printf(s, " %-27s", biases[pad->pullup]);
586 seq_printf(s, " %-10s", buffer_types[pad->buffer_type]);
587 seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
588 seq_printf(s, " %-7s", strengths[pad->strength]);
589 }
590}
591
592static const struct pinconf_ops pmic_gpio_pinconf_ops = {
593 .pin_config_group_get = pmic_gpio_config_get,
594 .pin_config_group_set = pmic_gpio_config_set,
595 .pin_config_group_dbg_show = pmic_gpio_config_dbg_show,
596};
597
598static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
599{
600 struct pmic_gpio_state *state = to_gpio_state(chip);
601 unsigned long config;
602
603 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
604
605 return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
606}
607
608static int pmic_gpio_direction_output(struct gpio_chip *chip,
609 unsigned pin, int val)
610{
611 struct pmic_gpio_state *state = to_gpio_state(chip);
612 unsigned long config;
613
614 config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
615
616 return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
617}
618
619static int pmic_gpio_get(struct gpio_chip *chip, unsigned pin)
620{
621 struct pmic_gpio_state *state = to_gpio_state(chip);
622 struct pmic_gpio_pad *pad;
623 int ret;
624
625 pad = state->ctrl->desc->pins[pin].drv_data;
626
627 if (!pad->is_enabled)
628 return -EINVAL;
629
630 if (pad->input_enabled) {
631 ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
632 if (ret < 0)
633 return ret;
634
635 pad->out_value = ret & PMIC_MPP_REG_RT_STS_VAL_MASK;
636 }
637
638 return pad->out_value;
639}
640
641static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
642{
643 struct pmic_gpio_state *state = to_gpio_state(chip);
644 unsigned long config;
645
646 config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
647
648 pmic_gpio_config_set(state->ctrl, pin, &config, 1);
649}
650
651static int pmic_gpio_request(struct gpio_chip *chip, unsigned base)
652{
653 return pinctrl_request_gpio(chip->base + base);
654}
655
656static void pmic_gpio_free(struct gpio_chip *chip, unsigned base)
657{
658 pinctrl_free_gpio(chip->base + base);
659}
660
661static int pmic_gpio_of_xlate(struct gpio_chip *chip,
662 const struct of_phandle_args *gpio_desc,
663 u32 *flags)
664{
665 if (chip->of_gpio_n_cells < 2)
666 return -EINVAL;
667
668 if (flags)
669 *flags = gpio_desc->args[1];
670
671 return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
672}
673
674static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
675{
676 struct pmic_gpio_state *state = to_gpio_state(chip);
677 struct pmic_gpio_pad *pad;
678
679 pad = state->ctrl->desc->pins[pin].drv_data;
680
681 return pad->irq;
682}
683
684static void pmic_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
685{
686 struct pmic_gpio_state *state = to_gpio_state(chip);
687 unsigned i;
688
689 for (i = 0; i < chip->ngpio; i++) {
690 pmic_gpio_config_dbg_show(state->ctrl, s, i);
691 seq_puts(s, "\n");
692 }
693}
694
695static const struct gpio_chip pmic_gpio_gpio_template = {
696 .direction_input = pmic_gpio_direction_input,
697 .direction_output = pmic_gpio_direction_output,
698 .get = pmic_gpio_get,
699 .set = pmic_gpio_set,
700 .request = pmic_gpio_request,
701 .free = pmic_gpio_free,
702 .of_xlate = pmic_gpio_of_xlate,
703 .to_irq = pmic_gpio_to_irq,
704 .dbg_show = pmic_gpio_dbg_show,
705};
706
707static int pmic_gpio_populate(struct pmic_gpio_state *state,
708 struct pmic_gpio_pad *pad)
709{
710 int type, subtype, val, dir;
711
712 type = pmic_gpio_read(state, pad, PMIC_GPIO_REG_TYPE);
713 if (type < 0)
714 return type;
715
716 if (type != PMIC_GPIO_TYPE) {
717 dev_err(state->dev, "incorrect block type 0x%x at 0x%x\n",
718 type, pad->base);
719 return -ENODEV;
720 }
721
722 subtype = pmic_gpio_read(state, pad, PMIC_GPIO_REG_SUBTYPE);
723 if (subtype < 0)
724 return subtype;
725
726 switch (subtype) {
727 case PMIC_GPIO_SUBTYPE_GPIO_4CH:
728 pad->have_buffer = true;
729 case PMIC_GPIO_SUBTYPE_GPIOC_4CH:
730 pad->num_sources = 4;
731 break;
732 case PMIC_GPIO_SUBTYPE_GPIO_8CH:
733 pad->have_buffer = true;
734 case PMIC_GPIO_SUBTYPE_GPIOC_8CH:
735 pad->num_sources = 8;
736 break;
737 default:
738 dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
739 return -ENODEV;
740 }
741
742 val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
743 if (val < 0)
744 return val;
745
746 pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
747
748 dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
749 dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
750 switch (dir) {
751 case 0:
752 pad->input_enabled = true;
753 pad->output_enabled = false;
754 break;
755 case 1:
756 pad->input_enabled = false;
757 pad->output_enabled = true;
758 break;
759 case 2:
760 pad->input_enabled = true;
761 pad->output_enabled = true;
762 break;
763 default:
764 dev_err(state->dev, "unknown GPIO direction\n");
765 return -ENODEV;
766 }
767
768 pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
769 pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
770
771 val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL);
772 if (val < 0)
773 return val;
774
775 pad->power_source = val >> PMIC_GPIO_REG_VIN_SHIFT;
776 pad->power_source &= PMIC_GPIO_REG_VIN_MASK;
777
778 val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_PULL_CTL);
779 if (val < 0)
780 return val;
781
782 pad->pullup = val >> PMIC_GPIO_REG_PULL_SHIFT;
783 pad->pullup &= PMIC_GPIO_REG_PULL_MASK;
784
785 val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL);
786 if (val < 0)
787 return val;
788
789 pad->strength = val >> PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
790 pad->strength &= PMIC_GPIO_REG_OUT_STRENGTH_MASK;
791
792 pad->buffer_type = val >> PMIC_GPIO_REG_OUT_TYPE_SHIFT;
793 pad->buffer_type &= PMIC_GPIO_REG_OUT_TYPE_MASK;
794
795 /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
796 pad->is_enabled = true;
797 return 0;
798}
799
800static int pmic_gpio_probe(struct platform_device *pdev)
801{
802 struct device *dev = &pdev->dev;
803 struct pinctrl_pin_desc *pindesc;
804 struct pinctrl_desc *pctrldesc;
805 struct pmic_gpio_pad *pad, *pads;
806 struct pmic_gpio_state *state;
807 int ret, npins, i;
808 u32 res[2];
809
810 ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
811 if (ret < 0) {
812 dev_err(dev, "missing base address and/or range");
813 return ret;
814 }
815
816 npins = res[1] / PMIC_GPIO_ADDRESS_RANGE;
817
818 if (!npins)
819 return -EINVAL;
820
821 BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
822
823 state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
824 if (!state)
825 return -ENOMEM;
826
827 platform_set_drvdata(pdev, state);
828
829 state->dev = &pdev->dev;
830 state->map = dev_get_regmap(dev->parent, NULL);
831
832 pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
833 if (!pindesc)
834 return -ENOMEM;
835
836 pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
837 if (!pads)
838 return -ENOMEM;
839
840 pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
841 if (!pctrldesc)
842 return -ENOMEM;
843
844 pctrldesc->pctlops = &pmic_gpio_pinctrl_ops;
845 pctrldesc->pmxops = &pmic_gpio_pinmux_ops;
846 pctrldesc->confops = &pmic_gpio_pinconf_ops;
847 pctrldesc->owner = THIS_MODULE;
848 pctrldesc->name = dev_name(dev);
849 pctrldesc->pins = pindesc;
850 pctrldesc->npins = npins;
851
852 for (i = 0; i < npins; i++, pindesc++) {
853 pad = &pads[i];
854 pindesc->drv_data = pad;
855 pindesc->number = i;
856 pindesc->name = pmic_gpio_groups[i];
857
858 pad->irq = platform_get_irq(pdev, i);
859 if (pad->irq < 0)
860 return pad->irq;
861
862 pad->base = res[0] + i * PMIC_GPIO_ADDRESS_RANGE;
863
864 ret = pmic_gpio_populate(state, pad);
865 if (ret < 0)
866 return ret;
867 }
868
869 state->chip = pmic_gpio_gpio_template;
870 state->chip.dev = dev;
871 state->chip.base = -1;
872 state->chip.ngpio = npins;
873 state->chip.label = dev_name(dev);
874 state->chip.of_gpio_n_cells = 2;
875 state->chip.can_sleep = false;
876
877 state->ctrl = pinctrl_register(pctrldesc, dev, state);
878 if (!state->ctrl)
879 return -ENODEV;
880
881 ret = gpiochip_add(&state->chip);
882 if (ret) {
883 dev_err(state->dev, "can't add gpio chip\n");
884 goto err_chip;
885 }
886
887 ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
888 if (ret) {
889 dev_err(dev, "failed to add pin range\n");
890 goto err_range;
891 }
892
893 return 0;
894
895err_range:
896 gpiochip_remove(&state->chip);
897err_chip:
898 pinctrl_unregister(state->ctrl);
899 return ret;
900}
901
902static int pmic_gpio_remove(struct platform_device *pdev)
903{
904 struct pmic_gpio_state *state = platform_get_drvdata(pdev);
905
906 gpiochip_remove(&state->chip);
907 pinctrl_unregister(state->ctrl);
908 return 0;
909}
910
911static const struct of_device_id pmic_gpio_of_match[] = {
912 { .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
913 { .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
914 { },
915};
916
917MODULE_DEVICE_TABLE(of, pmic_gpio_of_match);
918
919static struct platform_driver pmic_gpio_driver = {
920 .driver = {
921 .name = "qcom-spmi-gpio",
922 .of_match_table = pmic_gpio_of_match,
923 },
924 .probe = pmic_gpio_probe,
925 .remove = pmic_gpio_remove,
926};
927
928module_platform_driver(pmic_gpio_driver);
929
930MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
931MODULE_DESCRIPTION("Qualcomm SPMI PMIC GPIO pin control driver");
932MODULE_ALIAS("platform:qcom-spmi-gpio");
933MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
new file mode 100644
index 000000000000..a8924dba335e
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -0,0 +1,949 @@
1/*
2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/gpio.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/pinctrl/pinconf-generic.h>
18#include <linux/pinctrl/pinconf.h>
19#include <linux/pinctrl/pinmux.h>
20#include <linux/platform_device.h>
21#include <linux/regmap.h>
22#include <linux/slab.h>
23#include <linux/types.h>
24
25#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
26
27#include "../core.h"
28#include "../pinctrl-utils.h"
29
30#define PMIC_MPP_ADDRESS_RANGE 0x100
31
32/*
33 * Pull Up Values - it indicates whether a pull-up should be
34 * applied for bidirectional mode only. The hardware ignores the
35 * configuration when operating in other modes.
36 */
37#define PMIC_MPP_PULL_UP_0P6KOHM 0
38#define PMIC_MPP_PULL_UP_10KOHM 1
39#define PMIC_MPP_PULL_UP_30KOHM 2
40#define PMIC_MPP_PULL_UP_OPEN 3
41
42/* type registers base address bases */
43#define PMIC_MPP_REG_TYPE 0x4
44#define PMIC_MPP_REG_SUBTYPE 0x5
45
46/* mpp peripheral type and subtype values */
47#define PMIC_MPP_TYPE 0x11
48#define PMIC_MPP_SUBTYPE_4CH_NO_ANA_OUT 0x3
49#define PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT 0x4
50#define PMIC_MPP_SUBTYPE_4CH_NO_SINK 0x5
51#define PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK 0x6
52#define PMIC_MPP_SUBTYPE_4CH_FULL_FUNC 0x7
53#define PMIC_MPP_SUBTYPE_8CH_FULL_FUNC 0xf
54
55#define PMIC_MPP_REG_RT_STS 0x10
56#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
57
58/* control register base address bases */
59#define PMIC_MPP_REG_MODE_CTL 0x40
60#define PMIC_MPP_REG_DIG_VIN_CTL 0x41
61#define PMIC_MPP_REG_DIG_PULL_CTL 0x42
62#define PMIC_MPP_REG_DIG_IN_CTL 0x43
63#define PMIC_MPP_REG_EN_CTL 0x46
64#define PMIC_MPP_REG_AIN_CTL 0x4a
65
66/* PMIC_MPP_REG_MODE_CTL */
67#define PMIC_MPP_REG_MODE_VALUE_MASK 0x1
68#define PMIC_MPP_REG_MODE_FUNCTION_SHIFT 1
69#define PMIC_MPP_REG_MODE_FUNCTION_MASK 0x7
70#define PMIC_MPP_REG_MODE_DIR_SHIFT 4
71#define PMIC_MPP_REG_MODE_DIR_MASK 0x7
72
73/* PMIC_MPP_REG_DIG_VIN_CTL */
74#define PMIC_MPP_REG_VIN_SHIFT 0
75#define PMIC_MPP_REG_VIN_MASK 0x7
76
77/* PMIC_MPP_REG_DIG_PULL_CTL */
78#define PMIC_MPP_REG_PULL_SHIFT 0
79#define PMIC_MPP_REG_PULL_MASK 0x7
80
81/* PMIC_MPP_REG_EN_CTL */
82#define PMIC_MPP_REG_MASTER_EN_SHIFT 7
83
84/* PMIC_MPP_REG_AIN_CTL */
85#define PMIC_MPP_REG_AIN_ROUTE_SHIFT 0
86#define PMIC_MPP_REG_AIN_ROUTE_MASK 0x7
87
88#define PMIC_MPP_PHYSICAL_OFFSET 1
89
90/* Qualcomm specific pin configurations */
91#define PMIC_MPP_CONF_AMUX_ROUTE (PIN_CONFIG_END + 1)
92#define PMIC_MPP_CONF_ANALOG_MODE (PIN_CONFIG_END + 2)
93
94/**
95 * struct pmic_mpp_pad - keep current MPP settings
96 * @base: Address base in SPMI device.
97 * @irq: IRQ number which this MPP generate.
98 * @is_enabled: Set to false when MPP should be put in high Z state.
99 * @out_value: Cached pin output value.
100 * @output_enabled: Set to true if MPP output logic is enabled.
101 * @input_enabled: Set to true if MPP input buffer logic is enabled.
102 * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
103 * Output or Bidirectional Analog mode.
104 * @num_sources: Number of power-sources supported by this MPP.
105 * @power_source: Current power-source used.
106 * @amux_input: Set the source for analog input.
107 * @pullup: Pullup resistor value. Valid in Bidirectional mode only.
108 * @function: See pmic_mpp_functions[].
109 */
110struct pmic_mpp_pad {
111 u16 base;
112 int irq;
113 bool is_enabled;
114 bool out_value;
115 bool output_enabled;
116 bool input_enabled;
117 bool analog_mode;
118 unsigned int num_sources;
119 unsigned int power_source;
120 unsigned int amux_input;
121 unsigned int pullup;
122 unsigned int function;
123};
124
125struct pmic_mpp_state {
126 struct device *dev;
127 struct regmap *map;
128 struct pinctrl_dev *ctrl;
129 struct gpio_chip chip;
130};
131
132struct pmic_mpp_bindings {
133 const char *property;
134 unsigned param;
135};
136
137static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
138 {"qcom,amux-route", PMIC_MPP_CONF_AMUX_ROUTE},
139 {"qcom,analog-mode", PMIC_MPP_CONF_ANALOG_MODE},
140};
141
142static const char *const pmic_mpp_groups[] = {
143 "mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
144};
145
146static const char *const pmic_mpp_functions[] = {
147 PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
148 "reserved1", "reserved2",
149 PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
150 PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
151};
152
153static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
154{
155 return container_of(chip, struct pmic_mpp_state, chip);
156};
157
158static int pmic_mpp_read(struct pmic_mpp_state *state,
159 struct pmic_mpp_pad *pad, unsigned int addr)
160{
161 unsigned int val;
162 int ret;
163
164 ret = regmap_read(state->map, pad->base + addr, &val);
165 if (ret < 0)
166 dev_err(state->dev, "read 0x%x failed\n", addr);
167 else
168 ret = val;
169
170 return ret;
171}
172
173static int pmic_mpp_write(struct pmic_mpp_state *state,
174 struct pmic_mpp_pad *pad, unsigned int addr,
175 unsigned int val)
176{
177 int ret;
178
179 ret = regmap_write(state->map, pad->base + addr, val);
180 if (ret < 0)
181 dev_err(state->dev, "write 0x%x failed\n", addr);
182
183 return ret;
184}
185
186static int pmic_mpp_get_groups_count(struct pinctrl_dev *pctldev)
187{
188 /* Every PIN is a group */
189 return pctldev->desc->npins;
190}
191
192static const char *pmic_mpp_get_group_name(struct pinctrl_dev *pctldev,
193 unsigned pin)
194{
195 return pctldev->desc->pins[pin].name;
196}
197
198static int pmic_mpp_get_group_pins(struct pinctrl_dev *pctldev,
199 unsigned pin,
200 const unsigned **pins, unsigned *num_pins)
201{
202 *pins = &pctldev->desc->pins[pin].number;
203 *num_pins = 1;
204 return 0;
205}
206
207static int pmic_mpp_parse_dt_config(struct device_node *np,
208 struct pinctrl_dev *pctldev,
209 unsigned long **configs,
210 unsigned int *nconfs)
211{
212 struct pmic_mpp_bindings *par;
213 unsigned long cfg;
214 int ret, i;
215 u32 val;
216
217 for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
218 par = &pmic_mpp_bindings[i];
219 ret = of_property_read_u32(np, par->property, &val);
220
221 /* property not found */
222 if (ret == -EINVAL)
223 continue;
224
225 /* use zero as default value, when no value is specified */
226 if (ret)
227 val = 0;
228
229 dev_dbg(pctldev->dev, "found %s with value %u\n",
230 par->property, val);
231
232 cfg = pinconf_to_config_packed(par->param, val);
233
234 ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
235 if (ret)
236 return ret;
237 }
238
239 return 0;
240}
241
242static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
243 struct device_node *np,
244 struct pinctrl_map **map,
245 unsigned *reserv, unsigned *nmaps,
246 enum pinctrl_map_type type)
247{
248 unsigned long *configs = NULL;
249 unsigned nconfs = 0;
250 struct property *prop;
251 const char *group;
252 int ret;
253
254 ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
255 if (ret < 0)
256 return ret;
257
258 if (!nconfs)
259 return 0;
260
261 ret = of_property_count_strings(np, "pins");
262 if (ret < 0)
263 goto exit;
264
265 ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
266 if (ret < 0)
267 goto exit;
268
269 of_property_for_each_string(np, "pins", prop, group) {
270 ret = pinctrl_utils_add_map_configs(pctldev, map,
271 reserv, nmaps, group,
272 configs, nconfs, type);
273 if (ret < 0)
274 break;
275 }
276exit:
277 kfree(configs);
278 return ret;
279}
280
281static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
282 struct device_node *np_config,
283 struct pinctrl_map **map, unsigned *nmaps)
284{
285 struct device_node *np;
286 enum pinctrl_map_type type;
287 unsigned reserv;
288 int ret;
289
290 ret = 0;
291 *map = NULL;
292 *nmaps = 0;
293 reserv = 0;
294 type = PIN_MAP_TYPE_CONFIGS_GROUP;
295
296 for_each_child_of_node(np_config, np) {
297 ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
298 &reserv, nmaps, type);
299 if (ret)
300 break;
301
302 ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
303 nmaps, type);
304 if (ret)
305 break;
306 }
307
308 if (ret < 0)
309 pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
310
311 return ret;
312}
313
314static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
315 .get_groups_count = pmic_mpp_get_groups_count,
316 .get_group_name = pmic_mpp_get_group_name,
317 .get_group_pins = pmic_mpp_get_group_pins,
318 .dt_node_to_map = pmic_mpp_dt_node_to_map,
319 .dt_free_map = pinctrl_utils_dt_free_map,
320};
321
322static int pmic_mpp_get_functions_count(struct pinctrl_dev *pctldev)
323{
324 return ARRAY_SIZE(pmic_mpp_functions);
325}
326
327static const char *pmic_mpp_get_function_name(struct pinctrl_dev *pctldev,
328 unsigned function)
329{
330 return pmic_mpp_functions[function];
331}
332
333static int pmic_mpp_get_function_groups(struct pinctrl_dev *pctldev,
334 unsigned function,
335 const char *const **groups,
336 unsigned *const num_qgroups)
337{
338 *groups = pmic_mpp_groups;
339 *num_qgroups = pctldev->desc->npins;
340 return 0;
341}
342
343static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
344 unsigned pin)
345{
346 struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
347 struct pmic_mpp_pad *pad;
348 unsigned int val;
349 int ret;
350
351 pad = pctldev->desc->pins[pin].drv_data;
352
353 pad->function = function;
354
355 if (!pad->analog_mode) {
356 val = 0; /* just digital input */
357 if (pad->output_enabled) {
358 if (pad->input_enabled)
359 val = 2; /* digital input and output */
360 else
361 val = 1; /* just digital output */
362 }
363 } else {
364 val = 4; /* just analog input */
365 if (pad->output_enabled) {
366 if (pad->input_enabled)
367 val = 3; /* analog input and output */
368 else
369 val = 5; /* just analog output */
370 }
371 }
372
373 val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
374 val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
375
376 ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
377 if (ret < 0)
378 return ret;
379
380 val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
381
382 return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
383}
384
385static const struct pinmux_ops pmic_mpp_pinmux_ops = {
386 .get_functions_count = pmic_mpp_get_functions_count,
387 .get_function_name = pmic_mpp_get_function_name,
388 .get_function_groups = pmic_mpp_get_function_groups,
389 .set_mux = pmic_mpp_set_mux,
390};
391
392static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
393 unsigned int pin, unsigned long *config)
394{
395 unsigned param = pinconf_to_config_param(*config);
396 struct pmic_mpp_pad *pad;
397 unsigned arg = 0;
398
399 pad = pctldev->desc->pins[pin].drv_data;
400
401 switch (param) {
402 case PIN_CONFIG_BIAS_DISABLE:
403 arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
404 break;
405 case PIN_CONFIG_BIAS_PULL_UP:
406 switch (pad->pullup) {
407 case PMIC_MPP_PULL_UP_OPEN:
408 arg = 0;
409 break;
410 case PMIC_MPP_PULL_UP_0P6KOHM:
411 arg = 600;
412 break;
413 case PMIC_MPP_PULL_UP_10KOHM:
414 arg = 10000;
415 break;
416 case PMIC_MPP_PULL_UP_30KOHM:
417 arg = 30000;
418 break;
419 default:
420 return -EINVAL;
421 }
422 break;
423 case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
424 arg = !pad->is_enabled;
425 break;
426 case PIN_CONFIG_POWER_SOURCE:
427 arg = pad->power_source;
428 break;
429 case PIN_CONFIG_INPUT_ENABLE:
430 arg = pad->input_enabled;
431 break;
432 case PIN_CONFIG_OUTPUT:
433 arg = pad->out_value;
434 break;
435 case PMIC_MPP_CONF_AMUX_ROUTE:
436 arg = pad->amux_input;
437 break;
438 case PMIC_MPP_CONF_ANALOG_MODE:
439 arg = pad->analog_mode;
440 break;
441 default:
442 return -EINVAL;
443 }
444
445 /* Convert register value to pinconf value */
446 *config = pinconf_to_config_packed(param, arg);
447 return 0;
448}
449
450static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
451 unsigned long *configs, unsigned nconfs)
452{
453 struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
454 struct pmic_mpp_pad *pad;
455 unsigned param, arg;
456 unsigned int val;
457 int i, ret;
458
459 pad = pctldev->desc->pins[pin].drv_data;
460
461 for (i = 0; i < nconfs; i++) {
462 param = pinconf_to_config_param(configs[i]);
463 arg = pinconf_to_config_argument(configs[i]);
464
465 switch (param) {
466 case PIN_CONFIG_BIAS_DISABLE:
467 pad->pullup = PMIC_MPP_PULL_UP_OPEN;
468 break;
469 case PIN_CONFIG_BIAS_PULL_UP:
470 switch (arg) {
471 case 600:
472 pad->pullup = PMIC_MPP_PULL_UP_0P6KOHM;
473 break;
474 case 10000:
475 pad->pullup = PMIC_MPP_PULL_UP_10KOHM;
476 break;
477 case 30000:
478 pad->pullup = PMIC_MPP_PULL_UP_30KOHM;
479 break;
480 default:
481 return -EINVAL;
482 }
483 break;
484 case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
485 pad->is_enabled = false;
486 break;
487 case PIN_CONFIG_POWER_SOURCE:
488 if (arg >= pad->num_sources)
489 return -EINVAL;
490 pad->power_source = arg;
491 break;
492 case PIN_CONFIG_INPUT_ENABLE:
493 pad->input_enabled = arg ? true : false;
494 break;
495 case PIN_CONFIG_OUTPUT:
496 pad->output_enabled = true;
497 pad->out_value = arg;
498 break;
499 case PMIC_MPP_CONF_AMUX_ROUTE:
500 if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
501 return -EINVAL;
502 pad->amux_input = arg;
503 break;
504 case PMIC_MPP_CONF_ANALOG_MODE:
505 pad->analog_mode = true;
506 break;
507 default:
508 return -EINVAL;
509 }
510 }
511
512 val = pad->power_source << PMIC_MPP_REG_VIN_SHIFT;
513
514 ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_VIN_CTL, val);
515 if (ret < 0)
516 return ret;
517
518 val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
519
520 ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val);
521 if (ret < 0)
522 return ret;
523
524 val = pad->amux_input & PMIC_MPP_REG_AIN_ROUTE_MASK;
525
526 ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AIN_CTL, val);
527 if (ret < 0)
528 return ret;
529
530 if (!pad->analog_mode) {
531 val = 0; /* just digital input */
532 if (pad->output_enabled) {
533 if (pad->input_enabled)
534 val = 2; /* digital input and output */
535 else
536 val = 1; /* just digital output */
537 }
538 } else {
539 val = 4; /* just analog input */
540 if (pad->output_enabled) {
541 if (pad->input_enabled)
542 val = 3; /* analog input and output */
543 else
544 val = 5; /* just analog output */
545 }
546 }
547
548 val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
549 val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
550 val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
551
552 return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
553}
554
555static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
556 struct seq_file *s, unsigned pin)
557{
558 struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
559 struct pmic_mpp_pad *pad;
560 int ret, val;
561
562 static const char *const biases[] = {
563 "0.6kOhm", "10kOhm", "30kOhm", "Disabled"
564 };
565
566
567 pad = pctldev->desc->pins[pin].drv_data;
568
569 seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
570
571 val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
572
573 if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
574 seq_puts(s, " ---");
575 } else {
576
577 if (pad->input_enabled) {
578 ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
579 if (!ret) {
580 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
581 pad->out_value = ret;
582 }
583 }
584
585 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
586 seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
587 seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
588 seq_printf(s, " vin-%d", pad->power_source);
589 seq_printf(s, " %-8s", biases[pad->pullup]);
590 seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
591 }
592}
593
594static const struct pinconf_ops pmic_mpp_pinconf_ops = {
595 .pin_config_group_get = pmic_mpp_config_get,
596 .pin_config_group_set = pmic_mpp_config_set,
597 .pin_config_group_dbg_show = pmic_mpp_config_dbg_show,
598};
599
600static int pmic_mpp_direction_input(struct gpio_chip *chip, unsigned pin)
601{
602 struct pmic_mpp_state *state = to_mpp_state(chip);
603 unsigned long config;
604
605 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
606
607 return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
608}
609
610static int pmic_mpp_direction_output(struct gpio_chip *chip,
611 unsigned pin, int val)
612{
613 struct pmic_mpp_state *state = to_mpp_state(chip);
614 unsigned long config;
615
616 config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
617
618 return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
619}
620
621static int pmic_mpp_get(struct gpio_chip *chip, unsigned pin)
622{
623 struct pmic_mpp_state *state = to_mpp_state(chip);
624 struct pmic_mpp_pad *pad;
625 int ret;
626
627 pad = state->ctrl->desc->pins[pin].drv_data;
628
629 if (pad->input_enabled) {
630 ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
631 if (ret < 0)
632 return ret;
633
634 pad->out_value = ret & PMIC_MPP_REG_RT_STS_VAL_MASK;
635 }
636
637 return pad->out_value;
638}
639
640static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
641{
642 struct pmic_mpp_state *state = to_mpp_state(chip);
643 unsigned long config;
644
645 config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
646
647 pmic_mpp_config_set(state->ctrl, pin, &config, 1);
648}
649
650static int pmic_mpp_request(struct gpio_chip *chip, unsigned base)
651{
652 return pinctrl_request_gpio(chip->base + base);
653}
654
655static void pmic_mpp_free(struct gpio_chip *chip, unsigned base)
656{
657 pinctrl_free_gpio(chip->base + base);
658}
659
660static int pmic_mpp_of_xlate(struct gpio_chip *chip,
661 const struct of_phandle_args *gpio_desc,
662 u32 *flags)
663{
664 if (chip->of_gpio_n_cells < 2)
665 return -EINVAL;
666
667 if (flags)
668 *flags = gpio_desc->args[1];
669
670 return gpio_desc->args[0] - PMIC_MPP_PHYSICAL_OFFSET;
671}
672
673static int pmic_mpp_to_irq(struct gpio_chip *chip, unsigned pin)
674{
675 struct pmic_mpp_state *state = to_mpp_state(chip);
676 struct pmic_mpp_pad *pad;
677
678 pad = state->ctrl->desc->pins[pin].drv_data;
679
680 return pad->irq;
681}
682
683static void pmic_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
684{
685 struct pmic_mpp_state *state = to_mpp_state(chip);
686 unsigned i;
687
688 for (i = 0; i < chip->ngpio; i++) {
689 pmic_mpp_config_dbg_show(state->ctrl, s, i);
690 seq_puts(s, "\n");
691 }
692}
693
694static const struct gpio_chip pmic_mpp_gpio_template = {
695 .direction_input = pmic_mpp_direction_input,
696 .direction_output = pmic_mpp_direction_output,
697 .get = pmic_mpp_get,
698 .set = pmic_mpp_set,
699 .request = pmic_mpp_request,
700 .free = pmic_mpp_free,
701 .of_xlate = pmic_mpp_of_xlate,
702 .to_irq = pmic_mpp_to_irq,
703 .dbg_show = pmic_mpp_dbg_show,
704};
705
706static int pmic_mpp_populate(struct pmic_mpp_state *state,
707 struct pmic_mpp_pad *pad)
708{
709 int type, subtype, val, dir;
710
711 type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
712 if (type < 0)
713 return type;
714
715 if (type != PMIC_MPP_TYPE) {
716 dev_err(state->dev, "incorrect block type 0x%x at 0x%x\n",
717 type, pad->base);
718 return -ENODEV;
719 }
720
721 subtype = pmic_mpp_read(state, pad, PMIC_MPP_REG_SUBTYPE);
722 if (subtype < 0)
723 return subtype;
724
725 switch (subtype) {
726 case PMIC_MPP_SUBTYPE_4CH_NO_ANA_OUT:
727 case PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
728 case PMIC_MPP_SUBTYPE_4CH_NO_SINK:
729 case PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK:
730 case PMIC_MPP_SUBTYPE_4CH_FULL_FUNC:
731 pad->num_sources = 4;
732 break;
733 case PMIC_MPP_SUBTYPE_8CH_FULL_FUNC:
734 pad->num_sources = 8;
735 break;
736 default:
737 dev_err(state->dev, "unknown MPP type 0x%x at 0x%x\n",
738 subtype, pad->base);
739 return -ENODEV;
740 }
741
742 val = pmic_mpp_read(state, pad, PMIC_MPP_REG_MODE_CTL);
743 if (val < 0)
744 return val;
745
746 pad->out_value = val & PMIC_MPP_REG_MODE_VALUE_MASK;
747
748 dir = val >> PMIC_MPP_REG_MODE_DIR_SHIFT;
749 dir &= PMIC_MPP_REG_MODE_DIR_MASK;
750
751 switch (dir) {
752 case 0:
753 pad->input_enabled = true;
754 pad->output_enabled = false;
755 pad->analog_mode = false;
756 break;
757 case 1:
758 pad->input_enabled = false;
759 pad->output_enabled = true;
760 pad->analog_mode = false;
761 break;
762 case 2:
763 pad->input_enabled = true;
764 pad->output_enabled = true;
765 pad->analog_mode = false;
766 break;
767 case 3:
768 pad->input_enabled = true;
769 pad->output_enabled = true;
770 pad->analog_mode = true;
771 break;
772 case 4:
773 pad->input_enabled = true;
774 pad->output_enabled = false;
775 pad->analog_mode = true;
776 break;
777 case 5:
778 pad->input_enabled = false;
779 pad->output_enabled = true;
780 pad->analog_mode = true;
781 break;
782 default:
783 dev_err(state->dev, "unknown MPP direction\n");
784 return -ENODEV;
785 }
786
787 pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
788 pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
789
790 val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
791 if (val < 0)
792 return val;
793
794 pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
795 pad->power_source &= PMIC_MPP_REG_VIN_MASK;
796
797 val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
798 if (val < 0)
799 return val;
800
801 pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
802 pad->pullup &= PMIC_MPP_REG_PULL_MASK;
803
804 val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AIN_CTL);
805 if (val < 0)
806 return val;
807
808 pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
809 pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
810
811 /* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
812 pad->is_enabled = true;
813 return 0;
814}
815
816static int pmic_mpp_probe(struct platform_device *pdev)
817{
818 struct device *dev = &pdev->dev;
819 struct pinctrl_pin_desc *pindesc;
820 struct pinctrl_desc *pctrldesc;
821 struct pmic_mpp_pad *pad, *pads;
822 struct pmic_mpp_state *state;
823 int ret, npins, i;
824 u32 res[2];
825
826 ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
827 if (ret < 0) {
828 dev_err(dev, "missing base address and/or range");
829 return ret;
830 }
831
832 npins = res[1] / PMIC_MPP_ADDRESS_RANGE;
833 if (!npins)
834 return -EINVAL;
835
836 BUG_ON(npins > ARRAY_SIZE(pmic_mpp_groups));
837
838 state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
839 if (!state)
840 return -ENOMEM;
841
842 platform_set_drvdata(pdev, state);
843
844 state->dev = &pdev->dev;
845 state->map = dev_get_regmap(dev->parent, NULL);
846
847 pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
848 if (!pindesc)
849 return -ENOMEM;
850
851 pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
852 if (!pads)
853 return -ENOMEM;
854
855 pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
856 if (!pctrldesc)
857 return -ENOMEM;
858
859 pctrldesc->pctlops = &pmic_mpp_pinctrl_ops;
860 pctrldesc->pmxops = &pmic_mpp_pinmux_ops;
861 pctrldesc->confops = &pmic_mpp_pinconf_ops;
862 pctrldesc->owner = THIS_MODULE;
863 pctrldesc->name = dev_name(dev);
864 pctrldesc->pins = pindesc;
865 pctrldesc->npins = npins;
866
867 for (i = 0; i < npins; i++, pindesc++) {
868 pad = &pads[i];
869 pindesc->drv_data = pad;
870 pindesc->number = i;
871 pindesc->name = pmic_mpp_groups[i];
872
873 pad->irq = platform_get_irq(pdev, i);
874 if (pad->irq < 0)
875 return pad->irq;
876
877 pad->base = res[0] + i * PMIC_MPP_ADDRESS_RANGE;
878
879 ret = pmic_mpp_populate(state, pad);
880 if (ret < 0)
881 return ret;
882 }
883
884 state->chip = pmic_mpp_gpio_template;
885 state->chip.dev = dev;
886 state->chip.base = -1;
887 state->chip.ngpio = npins;
888 state->chip.label = dev_name(dev);
889 state->chip.of_gpio_n_cells = 2;
890 state->chip.can_sleep = false;
891
892 state->ctrl = pinctrl_register(pctrldesc, dev, state);
893 if (!state->ctrl)
894 return -ENODEV;
895
896 ret = gpiochip_add(&state->chip);
897 if (ret) {
898 dev_err(state->dev, "can't add gpio chip\n");
899 goto err_chip;
900 }
901
902 ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
903 if (ret) {
904 dev_err(dev, "failed to add pin range\n");
905 goto err_range;
906 }
907
908 return 0;
909
910err_range:
911 gpiochip_remove(&state->chip);
912err_chip:
913 pinctrl_unregister(state->ctrl);
914 return ret;
915}
916
917static int pmic_mpp_remove(struct platform_device *pdev)
918{
919 struct pmic_mpp_state *state = platform_get_drvdata(pdev);
920
921 gpiochip_remove(&state->chip);
922 pinctrl_unregister(state->ctrl);
923 return 0;
924}
925
926static const struct of_device_id pmic_mpp_of_match[] = {
927 { .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
928 { .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
929 { .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
930 { },
931};
932
933MODULE_DEVICE_TABLE(of, pmic_mpp_of_match);
934
935static struct platform_driver pmic_mpp_driver = {
936 .driver = {
937 .name = "qcom-spmi-mpp",
938 .of_match_table = pmic_mpp_of_match,
939 },
940 .probe = pmic_mpp_probe,
941 .remove = pmic_mpp_remove,
942};
943
944module_platform_driver(pmic_mpp_driver);
945
946MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
947MODULE_DESCRIPTION("Qualcomm SPMI PMIC MPP pin control driver");
948MODULE_ALIAS("platform:qcom-spmi-mpp");
949MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index d7154ed0b0eb..d5d4cfc55873 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -46,22 +46,16 @@ static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
46 return container_of(chip, struct exynos_irq_chip, chip); 46 return container_of(chip, struct exynos_irq_chip, chip);
47} 47}
48 48
49static struct samsung_pin_bank_type bank_type_off = { 49static const struct samsung_pin_bank_type bank_type_off = {
50 .fld_width = { 4, 1, 2, 2, 2, 2, }, 50 .fld_width = { 4, 1, 2, 2, 2, 2, },
51 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, }, 51 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
52}; 52};
53 53
54static struct samsung_pin_bank_type bank_type_alive = { 54static const struct samsung_pin_bank_type bank_type_alive = {
55 .fld_width = { 4, 1, 2, 2, }, 55 .fld_width = { 4, 1, 2, 2, },
56 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, }, 56 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
57}; 57};
58 58
59/* list of external wakeup controllers supported */
60static const struct of_device_id exynos_wkup_irq_ids[] = {
61 { .compatible = "samsung,exynos4210-wakeup-eint", },
62 { }
63};
64
65static void exynos_irq_mask(struct irq_data *irqd) 59static void exynos_irq_mask(struct irq_data *irqd)
66{ 60{
67 struct irq_chip *chip = irq_data_get_irq_chip(irqd); 61 struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -171,7 +165,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
171 struct irq_chip *chip = irq_data_get_irq_chip(irqd); 165 struct irq_chip *chip = irq_data_get_irq_chip(irqd);
172 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); 166 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
173 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); 167 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
174 struct samsung_pin_bank_type *bank_type = bank->type; 168 const struct samsung_pin_bank_type *bank_type = bank->type;
175 struct samsung_pinctrl_drv_data *d = bank->drvdata; 169 struct samsung_pinctrl_drv_data *d = bank->drvdata;
176 unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; 170 unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
177 unsigned long reg_con = our_chip->eint_con + bank->eint_offset; 171 unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
@@ -210,7 +204,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
210 struct irq_chip *chip = irq_data_get_irq_chip(irqd); 204 struct irq_chip *chip = irq_data_get_irq_chip(irqd);
211 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); 205 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
212 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); 206 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
213 struct samsung_pin_bank_type *bank_type = bank->type; 207 const struct samsung_pin_bank_type *bank_type = bank->type;
214 struct samsung_pinctrl_drv_data *d = bank->drvdata; 208 struct samsung_pinctrl_drv_data *d = bank->drvdata;
215 unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; 209 unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
216 unsigned long reg_con = our_chip->eint_con + bank->eint_offset; 210 unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
@@ -254,31 +248,30 @@ static struct exynos_irq_chip exynos_gpio_irq_chip = {
254 .eint_pend = EXYNOS_GPIO_EPEND_OFFSET, 248 .eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
255}; 249};
256 250
257static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq, 251static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
258 irq_hw_number_t hw) 252 irq_hw_number_t hw)
259{ 253{
260 struct samsung_pin_bank *b = h->host_data; 254 struct samsung_pin_bank *b = h->host_data;
261 255
262 irq_set_chip_data(virq, b); 256 irq_set_chip_data(virq, b);
263 irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip.chip, 257 irq_set_chip_and_handler(virq, &b->irq_chip->chip,
264 handle_level_irq); 258 handle_level_irq);
265 set_irq_flags(virq, IRQF_VALID); 259 set_irq_flags(virq, IRQF_VALID);
266 return 0; 260 return 0;
267} 261}
268 262
269/* 263/*
270 * irq domain callbacks for external gpio interrupt controller. 264 * irq domain callbacks for external gpio and wakeup interrupt controllers.
271 */ 265 */
272static const struct irq_domain_ops exynos_gpio_irqd_ops = { 266static const struct irq_domain_ops exynos_eint_irqd_ops = {
273 .map = exynos_gpio_irq_map, 267 .map = exynos_eint_irq_map,
274 .xlate = irq_domain_xlate_twocell, 268 .xlate = irq_domain_xlate_twocell,
275}; 269};
276 270
277static irqreturn_t exynos_eint_gpio_irq(int irq, void *data) 271static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
278{ 272{
279 struct samsung_pinctrl_drv_data *d = data; 273 struct samsung_pinctrl_drv_data *d = data;
280 struct samsung_pin_ctrl *ctrl = d->ctrl; 274 struct samsung_pin_bank *bank = d->pin_banks;
281 struct samsung_pin_bank *bank = ctrl->pin_banks;
282 unsigned int svc, group, pin, virq; 275 unsigned int svc, group, pin, virq;
283 276
284 svc = readl(d->virt_base + EXYNOS_SVC_OFFSET); 277 svc = readl(d->virt_base + EXYNOS_SVC_OFFSET);
@@ -325,12 +318,12 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
325 return -ENXIO; 318 return -ENXIO;
326 } 319 }
327 320
328 bank = d->ctrl->pin_banks; 321 bank = d->pin_banks;
329 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 322 for (i = 0; i < d->nr_banks; ++i, ++bank) {
330 if (bank->eint_type != EINT_TYPE_GPIO) 323 if (bank->eint_type != EINT_TYPE_GPIO)
331 continue; 324 continue;
332 bank->irq_domain = irq_domain_add_linear(bank->of_node, 325 bank->irq_domain = irq_domain_add_linear(bank->of_node,
333 bank->nr_pins, &exynos_gpio_irqd_ops, bank); 326 bank->nr_pins, &exynos_eint_irqd_ops, bank);
334 if (!bank->irq_domain) { 327 if (!bank->irq_domain) {
335 dev_err(dev, "gpio irq domain add failed\n"); 328 dev_err(dev, "gpio irq domain add failed\n");
336 ret = -ENXIO; 329 ret = -ENXIO;
@@ -344,6 +337,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
344 ret = -ENOMEM; 337 ret = -ENOMEM;
345 goto err_domains; 338 goto err_domains;
346 } 339 }
340
341 bank->irq_chip = &exynos_gpio_irq_chip;
347 } 342 }
348 343
349 return 0; 344 return 0;
@@ -383,9 +378,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
383/* 378/*
384 * irq_chip for wakeup interrupts 379 * irq_chip for wakeup interrupts
385 */ 380 */
386static struct exynos_irq_chip exynos_wkup_irq_chip = { 381static struct exynos_irq_chip exynos4210_wkup_irq_chip __initdata = {
387 .chip = { 382 .chip = {
388 .name = "exynos_wkup_irq_chip", 383 .name = "exynos4210_wkup_irq_chip",
389 .irq_unmask = exynos_irq_unmask, 384 .irq_unmask = exynos_irq_unmask,
390 .irq_mask = exynos_irq_mask, 385 .irq_mask = exynos_irq_mask,
391 .irq_ack = exynos_irq_ack, 386 .irq_ack = exynos_irq_ack,
@@ -399,6 +394,31 @@ static struct exynos_irq_chip exynos_wkup_irq_chip = {
399 .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, 394 .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
400}; 395};
401 396
397static struct exynos_irq_chip exynos7_wkup_irq_chip __initdata = {
398 .chip = {
399 .name = "exynos7_wkup_irq_chip",
400 .irq_unmask = exynos_irq_unmask,
401 .irq_mask = exynos_irq_mask,
402 .irq_ack = exynos_irq_ack,
403 .irq_set_type = exynos_irq_set_type,
404 .irq_set_wake = exynos_wkup_irq_set_wake,
405 .irq_request_resources = exynos_irq_request_resources,
406 .irq_release_resources = exynos_irq_release_resources,
407 },
408 .eint_con = EXYNOS7_WKUP_ECON_OFFSET,
409 .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
410 .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
411};
412
413/* list of external wakeup controllers supported */
414static const struct of_device_id exynos_wkup_irq_ids[] = {
415 { .compatible = "samsung,exynos4210-wakeup-eint",
416 .data = &exynos4210_wkup_irq_chip },
417 { .compatible = "samsung,exynos7-wakeup-eint",
418 .data = &exynos7_wkup_irq_chip },
419 { }
420};
421
402/* interrupt handler for wakeup interrupts 0..15 */ 422/* interrupt handler for wakeup interrupts 0..15 */
403static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) 423static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
404{ 424{
@@ -445,9 +465,9 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
445 465
446 for (i = 0; i < eintd->nr_banks; ++i) { 466 for (i = 0; i < eintd->nr_banks; ++i) {
447 struct samsung_pin_bank *b = eintd->banks[i]; 467 struct samsung_pin_bank *b = eintd->banks[i];
448 pend = readl(d->virt_base + EXYNOS_WKUP_EPEND_OFFSET 468 pend = readl(d->virt_base + b->irq_chip->eint_pend
449 + b->eint_offset); 469 + b->eint_offset);
450 mask = readl(d->virt_base + EXYNOS_WKUP_EMASK_OFFSET 470 mask = readl(d->virt_base + b->irq_chip->eint_mask
451 + b->eint_offset); 471 + b->eint_offset);
452 exynos_irq_demux_eint(pend & ~mask, b->irq_domain); 472 exynos_irq_demux_eint(pend & ~mask, b->irq_domain);
453 } 473 }
@@ -455,24 +475,6 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
455 chained_irq_exit(chip, desc); 475 chained_irq_exit(chip, desc);
456} 476}
457 477
458static int exynos_wkup_irq_map(struct irq_domain *h, unsigned int virq,
459 irq_hw_number_t hw)
460{
461 irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip.chip,
462 handle_level_irq);
463 irq_set_chip_data(virq, h->host_data);
464 set_irq_flags(virq, IRQF_VALID);
465 return 0;
466}
467
468/*
469 * irq domain callbacks for external wakeup interrupt controller.
470 */
471static const struct irq_domain_ops exynos_wkup_irqd_ops = {
472 .map = exynos_wkup_irq_map,
473 .xlate = irq_domain_xlate_twocell,
474};
475
476/* 478/*
477 * exynos_eint_wkup_init() - setup handling of external wakeup interrupts. 479 * exynos_eint_wkup_init() - setup handling of external wakeup interrupts.
478 * @d: driver data of samsung pinctrl driver. 480 * @d: driver data of samsung pinctrl driver.
@@ -485,12 +487,18 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
485 struct samsung_pin_bank *bank; 487 struct samsung_pin_bank *bank;
486 struct exynos_weint_data *weint_data; 488 struct exynos_weint_data *weint_data;
487 struct exynos_muxed_weint_data *muxed_data; 489 struct exynos_muxed_weint_data *muxed_data;
490 struct exynos_irq_chip *irq_chip;
488 unsigned int muxed_banks = 0; 491 unsigned int muxed_banks = 0;
489 unsigned int i; 492 unsigned int i;
490 int idx, irq; 493 int idx, irq;
491 494
492 for_each_child_of_node(dev->of_node, np) { 495 for_each_child_of_node(dev->of_node, np) {
493 if (of_match_node(exynos_wkup_irq_ids, np)) { 496 const struct of_device_id *match;
497
498 match = of_match_node(exynos_wkup_irq_ids, np);
499 if (match) {
500 irq_chip = kmemdup(match->data,
501 sizeof(*irq_chip), GFP_KERNEL);
494 wkup_np = np; 502 wkup_np = np;
495 break; 503 break;
496 } 504 }
@@ -498,18 +506,20 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
498 if (!wkup_np) 506 if (!wkup_np)
499 return -ENODEV; 507 return -ENODEV;
500 508
501 bank = d->ctrl->pin_banks; 509 bank = d->pin_banks;
502 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 510 for (i = 0; i < d->nr_banks; ++i, ++bank) {
503 if (bank->eint_type != EINT_TYPE_WKUP) 511 if (bank->eint_type != EINT_TYPE_WKUP)
504 continue; 512 continue;
505 513
506 bank->irq_domain = irq_domain_add_linear(bank->of_node, 514 bank->irq_domain = irq_domain_add_linear(bank->of_node,
507 bank->nr_pins, &exynos_wkup_irqd_ops, bank); 515 bank->nr_pins, &exynos_eint_irqd_ops, bank);
508 if (!bank->irq_domain) { 516 if (!bank->irq_domain) {
509 dev_err(dev, "wkup irq domain add failed\n"); 517 dev_err(dev, "wkup irq domain add failed\n");
510 return -ENXIO; 518 return -ENXIO;
511 } 519 }
512 520
521 bank->irq_chip = irq_chip;
522
513 if (!of_find_property(bank->of_node, "interrupts", NULL)) { 523 if (!of_find_property(bank->of_node, "interrupts", NULL)) {
514 bank->eint_type = EINT_TYPE_WKUP_MUX; 524 bank->eint_type = EINT_TYPE_WKUP_MUX;
515 ++muxed_banks; 525 ++muxed_banks;
@@ -556,9 +566,9 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
556 irq_set_chained_handler(irq, exynos_irq_demux_eint16_31); 566 irq_set_chained_handler(irq, exynos_irq_demux_eint16_31);
557 irq_set_handler_data(irq, muxed_data); 567 irq_set_handler_data(irq, muxed_data);
558 568
559 bank = d->ctrl->pin_banks; 569 bank = d->pin_banks;
560 idx = 0; 570 idx = 0;
561 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 571 for (i = 0; i < d->nr_banks; ++i, ++bank) {
562 if (bank->eint_type != EINT_TYPE_WKUP_MUX) 572 if (bank->eint_type != EINT_TYPE_WKUP_MUX)
563 continue; 573 continue;
564 574
@@ -590,11 +600,10 @@ static void exynos_pinctrl_suspend_bank(
590 600
591static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) 601static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
592{ 602{
593 struct samsung_pin_ctrl *ctrl = drvdata->ctrl; 603 struct samsung_pin_bank *bank = drvdata->pin_banks;
594 struct samsung_pin_bank *bank = ctrl->pin_banks;
595 int i; 604 int i;
596 605
597 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) 606 for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
598 if (bank->eint_type == EINT_TYPE_GPIO) 607 if (bank->eint_type == EINT_TYPE_GPIO)
599 exynos_pinctrl_suspend_bank(drvdata, bank); 608 exynos_pinctrl_suspend_bank(drvdata, bank);
600} 609}
@@ -626,17 +635,16 @@ static void exynos_pinctrl_resume_bank(
626 635
627static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) 636static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
628{ 637{
629 struct samsung_pin_ctrl *ctrl = drvdata->ctrl; 638 struct samsung_pin_bank *bank = drvdata->pin_banks;
630 struct samsung_pin_bank *bank = ctrl->pin_banks;
631 int i; 639 int i;
632 640
633 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) 641 for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
634 if (bank->eint_type == EINT_TYPE_GPIO) 642 if (bank->eint_type == EINT_TYPE_GPIO)
635 exynos_pinctrl_resume_bank(drvdata, bank); 643 exynos_pinctrl_resume_bank(drvdata, bank);
636} 644}
637 645
638/* pin banks of s5pv210 pin-controller */ 646/* pin banks of s5pv210 pin-controller */
639static struct samsung_pin_bank s5pv210_pin_bank[] = { 647static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
640 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 648 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
641 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04), 649 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
642 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 650 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -673,7 +681,7 @@ static struct samsung_pin_bank s5pv210_pin_bank[] = {
673 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), 681 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
674}; 682};
675 683
676struct samsung_pin_ctrl s5pv210_pin_ctrl[] = { 684const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
677 { 685 {
678 /* pin-controller instance 0 data */ 686 /* pin-controller instance 0 data */
679 .pin_banks = s5pv210_pin_bank, 687 .pin_banks = s5pv210_pin_bank,
@@ -682,12 +690,11 @@ struct samsung_pin_ctrl s5pv210_pin_ctrl[] = {
682 .eint_wkup_init = exynos_eint_wkup_init, 690 .eint_wkup_init = exynos_eint_wkup_init,
683 .suspend = exynos_pinctrl_suspend, 691 .suspend = exynos_pinctrl_suspend,
684 .resume = exynos_pinctrl_resume, 692 .resume = exynos_pinctrl_resume,
685 .label = "s5pv210-gpio-ctrl0",
686 }, 693 },
687}; 694};
688 695
689/* pin banks of exynos3250 pin-controller 0 */ 696/* pin banks of exynos3250 pin-controller 0 */
690static struct samsung_pin_bank exynos3250_pin_banks0[] = { 697static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
691 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 698 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
692 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 699 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
693 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 700 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -698,7 +705,7 @@ static struct samsung_pin_bank exynos3250_pin_banks0[] = {
698}; 705};
699 706
700/* pin banks of exynos3250 pin-controller 1 */ 707/* pin banks of exynos3250 pin-controller 1 */
701static struct samsung_pin_bank exynos3250_pin_banks1[] = { 708static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
702 EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"), 709 EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
703 EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"), 710 EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
704 EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"), 711 EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
@@ -721,7 +728,7 @@ static struct samsung_pin_bank exynos3250_pin_banks1[] = {
721 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes 728 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
722 * two gpio/pin-mux/pinconfig controllers. 729 * two gpio/pin-mux/pinconfig controllers.
723 */ 730 */
724struct samsung_pin_ctrl exynos3250_pin_ctrl[] = { 731const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
725 { 732 {
726 /* pin-controller instance 0 data */ 733 /* pin-controller instance 0 data */
727 .pin_banks = exynos3250_pin_banks0, 734 .pin_banks = exynos3250_pin_banks0,
@@ -729,7 +736,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
729 .eint_gpio_init = exynos_eint_gpio_init, 736 .eint_gpio_init = exynos_eint_gpio_init,
730 .suspend = exynos_pinctrl_suspend, 737 .suspend = exynos_pinctrl_suspend,
731 .resume = exynos_pinctrl_resume, 738 .resume = exynos_pinctrl_resume,
732 .label = "exynos3250-gpio-ctrl0",
733 }, { 739 }, {
734 /* pin-controller instance 1 data */ 740 /* pin-controller instance 1 data */
735 .pin_banks = exynos3250_pin_banks1, 741 .pin_banks = exynos3250_pin_banks1,
@@ -738,12 +744,11 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
738 .eint_wkup_init = exynos_eint_wkup_init, 744 .eint_wkup_init = exynos_eint_wkup_init,
739 .suspend = exynos_pinctrl_suspend, 745 .suspend = exynos_pinctrl_suspend,
740 .resume = exynos_pinctrl_resume, 746 .resume = exynos_pinctrl_resume,
741 .label = "exynos3250-gpio-ctrl1",
742 }, 747 },
743}; 748};
744 749
745/* pin banks of exynos4210 pin-controller 0 */ 750/* pin banks of exynos4210 pin-controller 0 */
746static struct samsung_pin_bank exynos4210_pin_banks0[] = { 751static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
747 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 752 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
748 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 753 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
749 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 754 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -763,7 +768,7 @@ static struct samsung_pin_bank exynos4210_pin_banks0[] = {
763}; 768};
764 769
765/* pin banks of exynos4210 pin-controller 1 */ 770/* pin banks of exynos4210 pin-controller 1 */
766static struct samsung_pin_bank exynos4210_pin_banks1[] = { 771static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
767 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00), 772 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
768 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04), 773 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
769 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08), 774 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
@@ -787,7 +792,7 @@ static struct samsung_pin_bank exynos4210_pin_banks1[] = {
787}; 792};
788 793
789/* pin banks of exynos4210 pin-controller 2 */ 794/* pin banks of exynos4210 pin-controller 2 */
790static struct samsung_pin_bank exynos4210_pin_banks2[] = { 795static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
791 EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"), 796 EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
792}; 797};
793 798
@@ -795,7 +800,7 @@ static struct samsung_pin_bank exynos4210_pin_banks2[] = {
795 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes 800 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
796 * three gpio/pin-mux/pinconfig controllers. 801 * three gpio/pin-mux/pinconfig controllers.
797 */ 802 */
798struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { 803const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
799 { 804 {
800 /* pin-controller instance 0 data */ 805 /* pin-controller instance 0 data */
801 .pin_banks = exynos4210_pin_banks0, 806 .pin_banks = exynos4210_pin_banks0,
@@ -803,7 +808,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
803 .eint_gpio_init = exynos_eint_gpio_init, 808 .eint_gpio_init = exynos_eint_gpio_init,
804 .suspend = exynos_pinctrl_suspend, 809 .suspend = exynos_pinctrl_suspend,
805 .resume = exynos_pinctrl_resume, 810 .resume = exynos_pinctrl_resume,
806 .label = "exynos4210-gpio-ctrl0",
807 }, { 811 }, {
808 /* pin-controller instance 1 data */ 812 /* pin-controller instance 1 data */
809 .pin_banks = exynos4210_pin_banks1, 813 .pin_banks = exynos4210_pin_banks1,
@@ -812,17 +816,15 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
812 .eint_wkup_init = exynos_eint_wkup_init, 816 .eint_wkup_init = exynos_eint_wkup_init,
813 .suspend = exynos_pinctrl_suspend, 817 .suspend = exynos_pinctrl_suspend,
814 .resume = exynos_pinctrl_resume, 818 .resume = exynos_pinctrl_resume,
815 .label = "exynos4210-gpio-ctrl1",
816 }, { 819 }, {
817 /* pin-controller instance 2 data */ 820 /* pin-controller instance 2 data */
818 .pin_banks = exynos4210_pin_banks2, 821 .pin_banks = exynos4210_pin_banks2,
819 .nr_banks = ARRAY_SIZE(exynos4210_pin_banks2), 822 .nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
820 .label = "exynos4210-gpio-ctrl2",
821 }, 823 },
822}; 824};
823 825
824/* pin banks of exynos4x12 pin-controller 0 */ 826/* pin banks of exynos4x12 pin-controller 0 */
825static struct samsung_pin_bank exynos4x12_pin_banks0[] = { 827static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
826 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 828 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
827 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 829 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
828 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 830 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -839,7 +841,7 @@ static struct samsung_pin_bank exynos4x12_pin_banks0[] = {
839}; 841};
840 842
841/* pin banks of exynos4x12 pin-controller 1 */ 843/* pin banks of exynos4x12 pin-controller 1 */
842static struct samsung_pin_bank exynos4x12_pin_banks1[] = { 844static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
843 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08), 845 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
844 EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c), 846 EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
845 EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10), 847 EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
@@ -866,12 +868,12 @@ static struct samsung_pin_bank exynos4x12_pin_banks1[] = {
866}; 868};
867 869
868/* pin banks of exynos4x12 pin-controller 2 */ 870/* pin banks of exynos4x12 pin-controller 2 */
869static struct samsung_pin_bank exynos4x12_pin_banks2[] = { 871static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
870 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00), 872 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
871}; 873};
872 874
873/* pin banks of exynos4x12 pin-controller 3 */ 875/* pin banks of exynos4x12 pin-controller 3 */
874static struct samsung_pin_bank exynos4x12_pin_banks3[] = { 876static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
875 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00), 877 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
876 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04), 878 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
877 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08), 879 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
@@ -883,7 +885,7 @@ static struct samsung_pin_bank exynos4x12_pin_banks3[] = {
883 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes 885 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
884 * four gpio/pin-mux/pinconfig controllers. 886 * four gpio/pin-mux/pinconfig controllers.
885 */ 887 */
886struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { 888const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
887 { 889 {
888 /* pin-controller instance 0 data */ 890 /* pin-controller instance 0 data */
889 .pin_banks = exynos4x12_pin_banks0, 891 .pin_banks = exynos4x12_pin_banks0,
@@ -891,7 +893,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
891 .eint_gpio_init = exynos_eint_gpio_init, 893 .eint_gpio_init = exynos_eint_gpio_init,
892 .suspend = exynos_pinctrl_suspend, 894 .suspend = exynos_pinctrl_suspend,
893 .resume = exynos_pinctrl_resume, 895 .resume = exynos_pinctrl_resume,
894 .label = "exynos4x12-gpio-ctrl0",
895 }, { 896 }, {
896 /* pin-controller instance 1 data */ 897 /* pin-controller instance 1 data */
897 .pin_banks = exynos4x12_pin_banks1, 898 .pin_banks = exynos4x12_pin_banks1,
@@ -900,7 +901,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
900 .eint_wkup_init = exynos_eint_wkup_init, 901 .eint_wkup_init = exynos_eint_wkup_init,
901 .suspend = exynos_pinctrl_suspend, 902 .suspend = exynos_pinctrl_suspend,
902 .resume = exynos_pinctrl_resume, 903 .resume = exynos_pinctrl_resume,
903 .label = "exynos4x12-gpio-ctrl1",
904 }, { 904 }, {
905 /* pin-controller instance 2 data */ 905 /* pin-controller instance 2 data */
906 .pin_banks = exynos4x12_pin_banks2, 906 .pin_banks = exynos4x12_pin_banks2,
@@ -908,7 +908,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
908 .eint_gpio_init = exynos_eint_gpio_init, 908 .eint_gpio_init = exynos_eint_gpio_init,
909 .suspend = exynos_pinctrl_suspend, 909 .suspend = exynos_pinctrl_suspend,
910 .resume = exynos_pinctrl_resume, 910 .resume = exynos_pinctrl_resume,
911 .label = "exynos4x12-gpio-ctrl2",
912 }, { 911 }, {
913 /* pin-controller instance 3 data */ 912 /* pin-controller instance 3 data */
914 .pin_banks = exynos4x12_pin_banks3, 913 .pin_banks = exynos4x12_pin_banks3,
@@ -916,12 +915,86 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
916 .eint_gpio_init = exynos_eint_gpio_init, 915 .eint_gpio_init = exynos_eint_gpio_init,
917 .suspend = exynos_pinctrl_suspend, 916 .suspend = exynos_pinctrl_suspend,
918 .resume = exynos_pinctrl_resume, 917 .resume = exynos_pinctrl_resume,
919 .label = "exynos4x12-gpio-ctrl3", 918 },
919};
920
921/* pin banks of exynos4415 pin-controller 0 */
922static const struct samsung_pin_bank_data exynos4415_pin_banks0[] = {
923 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
924 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
925 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
926 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
927 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
928 EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
929 EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
930 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
931 EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
932 EXYNOS_PIN_BANK_EINTG(1, 0x1C0, "gpf2", 0x38),
933};
934
935/* pin banks of exynos4415 pin-controller 1 */
936static const struct samsung_pin_bank_data exynos4415_pin_banks1[] = {
937 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
938 EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
939 EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
940 EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
941 EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpl0", 0x18),
942 EXYNOS_PIN_BANK_EINTN(6, 0x120, "mp00"),
943 EXYNOS_PIN_BANK_EINTN(4, 0x140, "mp01"),
944 EXYNOS_PIN_BANK_EINTN(6, 0x160, "mp02"),
945 EXYNOS_PIN_BANK_EINTN(8, 0x180, "mp03"),
946 EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "mp04"),
947 EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "mp05"),
948 EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "mp06"),
949 EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
950 EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
951 EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
952 EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
953 EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
954 EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
955 EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
956 EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
957 EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
958};
959
960/* pin banks of exynos4415 pin-controller 2 */
961static const struct samsung_pin_bank_data exynos4415_pin_banks2[] = {
962 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
963 EXYNOS_PIN_BANK_EINTN(2, 0x000, "etc1"),
964};
965
966/*
967 * Samsung pinctrl driver data for Exynos4415 SoC. Exynos4415 SoC includes
968 * three gpio/pin-mux/pinconfig controllers.
969 */
970const struct samsung_pin_ctrl exynos4415_pin_ctrl[] = {
971 {
972 /* pin-controller instance 0 data */
973 .pin_banks = exynos4415_pin_banks0,
974 .nr_banks = ARRAY_SIZE(exynos4415_pin_banks0),
975 .eint_gpio_init = exynos_eint_gpio_init,
976 .suspend = exynos_pinctrl_suspend,
977 .resume = exynos_pinctrl_resume,
978 }, {
979 /* pin-controller instance 1 data */
980 .pin_banks = exynos4415_pin_banks1,
981 .nr_banks = ARRAY_SIZE(exynos4415_pin_banks1),
982 .eint_gpio_init = exynos_eint_gpio_init,
983 .eint_wkup_init = exynos_eint_wkup_init,
984 .suspend = exynos_pinctrl_suspend,
985 .resume = exynos_pinctrl_resume,
986 }, {
987 /* pin-controller instance 2 data */
988 .pin_banks = exynos4415_pin_banks2,
989 .nr_banks = ARRAY_SIZE(exynos4415_pin_banks2),
990 .eint_gpio_init = exynos_eint_gpio_init,
991 .suspend = exynos_pinctrl_suspend,
992 .resume = exynos_pinctrl_resume,
920 }, 993 },
921}; 994};
922 995
923/* pin banks of exynos5250 pin-controller 0 */ 996/* pin banks of exynos5250 pin-controller 0 */
924static struct samsung_pin_bank exynos5250_pin_banks0[] = { 997static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
925 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 998 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
926 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 999 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
927 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08), 1000 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -950,7 +1023,7 @@ static struct samsung_pin_bank exynos5250_pin_banks0[] = {
950}; 1023};
951 1024
952/* pin banks of exynos5250 pin-controller 1 */ 1025/* pin banks of exynos5250 pin-controller 1 */
953static struct samsung_pin_bank exynos5250_pin_banks1[] = { 1026static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
954 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00), 1027 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
955 EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04), 1028 EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
956 EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08), 1029 EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
@@ -963,7 +1036,7 @@ static struct samsung_pin_bank exynos5250_pin_banks1[] = {
963}; 1036};
964 1037
965/* pin banks of exynos5250 pin-controller 2 */ 1038/* pin banks of exynos5250 pin-controller 2 */
966static struct samsung_pin_bank exynos5250_pin_banks2[] = { 1039static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
967 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00), 1040 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
968 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04), 1041 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
969 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08), 1042 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -972,7 +1045,7 @@ static struct samsung_pin_bank exynos5250_pin_banks2[] = {
972}; 1045};
973 1046
974/* pin banks of exynos5250 pin-controller 3 */ 1047/* pin banks of exynos5250 pin-controller 3 */
975static struct samsung_pin_bank exynos5250_pin_banks3[] = { 1048static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
976 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00), 1049 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
977}; 1050};
978 1051
@@ -980,7 +1053,7 @@ static struct samsung_pin_bank exynos5250_pin_banks3[] = {
980 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes 1053 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
981 * four gpio/pin-mux/pinconfig controllers. 1054 * four gpio/pin-mux/pinconfig controllers.
982 */ 1055 */
983struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { 1056const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
984 { 1057 {
985 /* pin-controller instance 0 data */ 1058 /* pin-controller instance 0 data */
986 .pin_banks = exynos5250_pin_banks0, 1059 .pin_banks = exynos5250_pin_banks0,
@@ -989,7 +1062,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
989 .eint_wkup_init = exynos_eint_wkup_init, 1062 .eint_wkup_init = exynos_eint_wkup_init,
990 .suspend = exynos_pinctrl_suspend, 1063 .suspend = exynos_pinctrl_suspend,
991 .resume = exynos_pinctrl_resume, 1064 .resume = exynos_pinctrl_resume,
992 .label = "exynos5250-gpio-ctrl0",
993 }, { 1065 }, {
994 /* pin-controller instance 1 data */ 1066 /* pin-controller instance 1 data */
995 .pin_banks = exynos5250_pin_banks1, 1067 .pin_banks = exynos5250_pin_banks1,
@@ -997,7 +1069,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
997 .eint_gpio_init = exynos_eint_gpio_init, 1069 .eint_gpio_init = exynos_eint_gpio_init,
998 .suspend = exynos_pinctrl_suspend, 1070 .suspend = exynos_pinctrl_suspend,
999 .resume = exynos_pinctrl_resume, 1071 .resume = exynos_pinctrl_resume,
1000 .label = "exynos5250-gpio-ctrl1",
1001 }, { 1072 }, {
1002 /* pin-controller instance 2 data */ 1073 /* pin-controller instance 2 data */
1003 .pin_banks = exynos5250_pin_banks2, 1074 .pin_banks = exynos5250_pin_banks2,
@@ -1005,7 +1076,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
1005 .eint_gpio_init = exynos_eint_gpio_init, 1076 .eint_gpio_init = exynos_eint_gpio_init,
1006 .suspend = exynos_pinctrl_suspend, 1077 .suspend = exynos_pinctrl_suspend,
1007 .resume = exynos_pinctrl_resume, 1078 .resume = exynos_pinctrl_resume,
1008 .label = "exynos5250-gpio-ctrl2",
1009 }, { 1079 }, {
1010 /* pin-controller instance 3 data */ 1080 /* pin-controller instance 3 data */
1011 .pin_banks = exynos5250_pin_banks3, 1081 .pin_banks = exynos5250_pin_banks3,
@@ -1013,12 +1083,11 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
1013 .eint_gpio_init = exynos_eint_gpio_init, 1083 .eint_gpio_init = exynos_eint_gpio_init,
1014 .suspend = exynos_pinctrl_suspend, 1084 .suspend = exynos_pinctrl_suspend,
1015 .resume = exynos_pinctrl_resume, 1085 .resume = exynos_pinctrl_resume,
1016 .label = "exynos5250-gpio-ctrl3",
1017 }, 1086 },
1018}; 1087};
1019 1088
1020/* pin banks of exynos5260 pin-controller 0 */ 1089/* pin banks of exynos5260 pin-controller 0 */
1021static struct samsung_pin_bank exynos5260_pin_banks0[] = { 1090static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
1022 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), 1091 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
1023 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04), 1092 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
1024 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08), 1093 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -1043,7 +1112,7 @@ static struct samsung_pin_bank exynos5260_pin_banks0[] = {
1043}; 1112};
1044 1113
1045/* pin banks of exynos5260 pin-controller 1 */ 1114/* pin banks of exynos5260 pin-controller 1 */
1046static struct samsung_pin_bank exynos5260_pin_banks1[] = { 1115static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
1047 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00), 1116 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
1048 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04), 1117 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
1049 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08), 1118 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -1052,7 +1121,7 @@ static struct samsung_pin_bank exynos5260_pin_banks1[] = {
1052}; 1121};
1053 1122
1054/* pin banks of exynos5260 pin-controller 2 */ 1123/* pin banks of exynos5260 pin-controller 2 */
1055static struct samsung_pin_bank exynos5260_pin_banks2[] = { 1124static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
1056 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1125 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1057 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1126 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1058}; 1127};
@@ -1061,31 +1130,28 @@ static struct samsung_pin_bank exynos5260_pin_banks2[] = {
1061 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes 1130 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
1062 * three gpio/pin-mux/pinconfig controllers. 1131 * three gpio/pin-mux/pinconfig controllers.
1063 */ 1132 */
1064struct samsung_pin_ctrl exynos5260_pin_ctrl[] = { 1133const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
1065 { 1134 {
1066 /* pin-controller instance 0 data */ 1135 /* pin-controller instance 0 data */
1067 .pin_banks = exynos5260_pin_banks0, 1136 .pin_banks = exynos5260_pin_banks0,
1068 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0), 1137 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
1069 .eint_gpio_init = exynos_eint_gpio_init, 1138 .eint_gpio_init = exynos_eint_gpio_init,
1070 .eint_wkup_init = exynos_eint_wkup_init, 1139 .eint_wkup_init = exynos_eint_wkup_init,
1071 .label = "exynos5260-gpio-ctrl0",
1072 }, { 1140 }, {
1073 /* pin-controller instance 1 data */ 1141 /* pin-controller instance 1 data */
1074 .pin_banks = exynos5260_pin_banks1, 1142 .pin_banks = exynos5260_pin_banks1,
1075 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1), 1143 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
1076 .eint_gpio_init = exynos_eint_gpio_init, 1144 .eint_gpio_init = exynos_eint_gpio_init,
1077 .label = "exynos5260-gpio-ctrl1",
1078 }, { 1145 }, {
1079 /* pin-controller instance 2 data */ 1146 /* pin-controller instance 2 data */
1080 .pin_banks = exynos5260_pin_banks2, 1147 .pin_banks = exynos5260_pin_banks2,
1081 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2), 1148 .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
1082 .eint_gpio_init = exynos_eint_gpio_init, 1149 .eint_gpio_init = exynos_eint_gpio_init,
1083 .label = "exynos5260-gpio-ctrl2",
1084 }, 1150 },
1085}; 1151};
1086 1152
1087/* pin banks of exynos5420 pin-controller 0 */ 1153/* pin banks of exynos5420 pin-controller 0 */
1088static struct samsung_pin_bank exynos5420_pin_banks0[] = { 1154static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
1089 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), 1155 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
1090 EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00), 1156 EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
1091 EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04), 1157 EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
@@ -1094,7 +1160,7 @@ static struct samsung_pin_bank exynos5420_pin_banks0[] = {
1094}; 1160};
1095 1161
1096/* pin banks of exynos5420 pin-controller 1 */ 1162/* pin banks of exynos5420 pin-controller 1 */
1097static struct samsung_pin_bank exynos5420_pin_banks1[] = { 1163static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
1098 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00), 1164 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
1099 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04), 1165 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
1100 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08), 1166 EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -1111,7 +1177,7 @@ static struct samsung_pin_bank exynos5420_pin_banks1[] = {
1111}; 1177};
1112 1178
1113/* pin banks of exynos5420 pin-controller 2 */ 1179/* pin banks of exynos5420 pin-controller 2 */
1114static struct samsung_pin_bank exynos5420_pin_banks2[] = { 1180static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
1115 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00), 1181 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
1116 EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04), 1182 EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
1117 EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08), 1183 EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
@@ -1123,7 +1189,7 @@ static struct samsung_pin_bank exynos5420_pin_banks2[] = {
1123}; 1189};
1124 1190
1125/* pin banks of exynos5420 pin-controller 3 */ 1191/* pin banks of exynos5420 pin-controller 3 */
1126static struct samsung_pin_bank exynos5420_pin_banks3[] = { 1192static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
1127 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 1193 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
1128 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 1194 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
1129 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08), 1195 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -1136,7 +1202,7 @@ static struct samsung_pin_bank exynos5420_pin_banks3[] = {
1136}; 1202};
1137 1203
1138/* pin banks of exynos5420 pin-controller 4 */ 1204/* pin banks of exynos5420 pin-controller 4 */
1139static struct samsung_pin_bank exynos5420_pin_banks4[] = { 1205static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
1140 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00), 1206 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
1141}; 1207};
1142 1208
@@ -1144,37 +1210,137 @@ static struct samsung_pin_bank exynos5420_pin_banks4[] = {
1144 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes 1210 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
1145 * four gpio/pin-mux/pinconfig controllers. 1211 * four gpio/pin-mux/pinconfig controllers.
1146 */ 1212 */
1147struct samsung_pin_ctrl exynos5420_pin_ctrl[] = { 1213const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1148 { 1214 {
1149 /* pin-controller instance 0 data */ 1215 /* pin-controller instance 0 data */
1150 .pin_banks = exynos5420_pin_banks0, 1216 .pin_banks = exynos5420_pin_banks0,
1151 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks0), 1217 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
1152 .eint_gpio_init = exynos_eint_gpio_init, 1218 .eint_gpio_init = exynos_eint_gpio_init,
1153 .eint_wkup_init = exynos_eint_wkup_init, 1219 .eint_wkup_init = exynos_eint_wkup_init,
1154 .label = "exynos5420-gpio-ctrl0",
1155 }, { 1220 }, {
1156 /* pin-controller instance 1 data */ 1221 /* pin-controller instance 1 data */
1157 .pin_banks = exynos5420_pin_banks1, 1222 .pin_banks = exynos5420_pin_banks1,
1158 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks1), 1223 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
1159 .eint_gpio_init = exynos_eint_gpio_init, 1224 .eint_gpio_init = exynos_eint_gpio_init,
1160 .label = "exynos5420-gpio-ctrl1",
1161 }, { 1225 }, {
1162 /* pin-controller instance 2 data */ 1226 /* pin-controller instance 2 data */
1163 .pin_banks = exynos5420_pin_banks2, 1227 .pin_banks = exynos5420_pin_banks2,
1164 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks2), 1228 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
1165 .eint_gpio_init = exynos_eint_gpio_init, 1229 .eint_gpio_init = exynos_eint_gpio_init,
1166 .label = "exynos5420-gpio-ctrl2",
1167 }, { 1230 }, {
1168 /* pin-controller instance 3 data */ 1231 /* pin-controller instance 3 data */
1169 .pin_banks = exynos5420_pin_banks3, 1232 .pin_banks = exynos5420_pin_banks3,
1170 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks3), 1233 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
1171 .eint_gpio_init = exynos_eint_gpio_init, 1234 .eint_gpio_init = exynos_eint_gpio_init,
1172 .label = "exynos5420-gpio-ctrl3",
1173 }, { 1235 }, {
1174 /* pin-controller instance 4 data */ 1236 /* pin-controller instance 4 data */
1175 .pin_banks = exynos5420_pin_banks4, 1237 .pin_banks = exynos5420_pin_banks4,
1176 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks4), 1238 .nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
1177 .eint_gpio_init = exynos_eint_gpio_init, 1239 .eint_gpio_init = exynos_eint_gpio_init,
1178 .label = "exynos5420-gpio-ctrl4", 1240 },
1241};
1242
1243/* pin banks of exynos7 pin-controller - ALIVE */
1244static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
1245 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1246 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1247 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1248 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1249};
1250
1251/* pin banks of exynos7 pin-controller - BUS0 */
1252static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
1253 EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
1254 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
1255 EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
1256 EXYNOS_PIN_BANK_EINTG(6, 0x060, "gpc2", 0x0c),
1257 EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpc3", 0x10),
1258 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
1259 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
1260 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpd2", 0x1c),
1261 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpd4", 0x20),
1262 EXYNOS_PIN_BANK_EINTG(4, 0x120, "gpd5", 0x24),
1263 EXYNOS_PIN_BANK_EINTG(6, 0x140, "gpd6", 0x28),
1264 EXYNOS_PIN_BANK_EINTG(3, 0x160, "gpd7", 0x2c),
1265 EXYNOS_PIN_BANK_EINTG(2, 0x180, "gpd8", 0x30),
1266 EXYNOS_PIN_BANK_EINTG(2, 0x1a0, "gpg0", 0x34),
1267 EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpg3", 0x38),
1268};
1269
1270/* pin banks of exynos7 pin-controller - NFC */
1271static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
1272 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1273};
1274
1275/* pin banks of exynos7 pin-controller - TOUCH */
1276static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
1277 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1278};
1279
1280/* pin banks of exynos7 pin-controller - FF */
1281static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
1282 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
1283};
1284
1285/* pin banks of exynos7 pin-controller - ESE */
1286static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
1287 EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
1288};
1289
1290/* pin banks of exynos7 pin-controller - FSYS0 */
1291static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
1292 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
1293};
1294
1295/* pin banks of exynos7 pin-controller - FSYS1 */
1296static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
1297 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
1298 EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
1299 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
1300 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr3", 0x0c),
1301};
1302
1303const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
1304 {
1305 /* pin-controller instance 0 Alive data */
1306 .pin_banks = exynos7_pin_banks0,
1307 .nr_banks = ARRAY_SIZE(exynos7_pin_banks0),
1308 .eint_gpio_init = exynos_eint_gpio_init,
1309 .eint_wkup_init = exynos_eint_wkup_init,
1310 }, {
1311 /* pin-controller instance 1 BUS0 data */
1312 .pin_banks = exynos7_pin_banks1,
1313 .nr_banks = ARRAY_SIZE(exynos7_pin_banks1),
1314 .eint_gpio_init = exynos_eint_gpio_init,
1315 }, {
1316 /* pin-controller instance 2 NFC data */
1317 .pin_banks = exynos7_pin_banks2,
1318 .nr_banks = ARRAY_SIZE(exynos7_pin_banks2),
1319 .eint_gpio_init = exynos_eint_gpio_init,
1320 }, {
1321 /* pin-controller instance 3 TOUCH data */
1322 .pin_banks = exynos7_pin_banks3,
1323 .nr_banks = ARRAY_SIZE(exynos7_pin_banks3),
1324 .eint_gpio_init = exynos_eint_gpio_init,
1325 }, {
1326 /* pin-controller instance 4 FF data */
1327 .pin_banks = exynos7_pin_banks4,
1328 .nr_banks = ARRAY_SIZE(exynos7_pin_banks4),
1329 .eint_gpio_init = exynos_eint_gpio_init,
1330 }, {
1331 /* pin-controller instance 5 ESE data */
1332 .pin_banks = exynos7_pin_banks5,
1333 .nr_banks = ARRAY_SIZE(exynos7_pin_banks5),
1334 .eint_gpio_init = exynos_eint_gpio_init,
1335 }, {
1336 /* pin-controller instance 6 FSYS0 data */
1337 .pin_banks = exynos7_pin_banks6,
1338 .nr_banks = ARRAY_SIZE(exynos7_pin_banks6),
1339 .eint_gpio_init = exynos_eint_gpio_init,
1340 }, {
1341 /* pin-controller instance 7 FSYS1 data */
1342 .pin_banks = exynos7_pin_banks7,
1343 .nr_banks = ARRAY_SIZE(exynos7_pin_banks7),
1344 .eint_gpio_init = exynos_eint_gpio_init,
1179 }, 1345 },
1180}; 1346};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 3c91c357792f..0f0f7cedb2dc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -25,6 +25,9 @@
25#define EXYNOS_WKUP_ECON_OFFSET 0xE00 25#define EXYNOS_WKUP_ECON_OFFSET 0xE00
26#define EXYNOS_WKUP_EMASK_OFFSET 0xF00 26#define EXYNOS_WKUP_EMASK_OFFSET 0xF00
27#define EXYNOS_WKUP_EPEND_OFFSET 0xF40 27#define EXYNOS_WKUP_EPEND_OFFSET 0xF40
28#define EXYNOS7_WKUP_ECON_OFFSET 0x700
29#define EXYNOS7_WKUP_EMASK_OFFSET 0x900
30#define EXYNOS7_WKUP_EPEND_OFFSET 0xA00
28#define EXYNOS_SVC_OFFSET 0xB08 31#define EXYNOS_SVC_OFFSET 0xB08
29#define EXYNOS_EINT_FUNC 0xF 32#define EXYNOS_EINT_FUNC 0xF
30 33
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index ad3eaad17001..f1993f42114c 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -44,12 +44,12 @@
44#define EINT_EDGE_BOTH 6 44#define EINT_EDGE_BOTH 6
45#define EINT_MASK 0xf 45#define EINT_MASK 0xf
46 46
47static struct samsung_pin_bank_type bank_type_1bit = { 47static const struct samsung_pin_bank_type bank_type_1bit = {
48 .fld_width = { 1, 1, }, 48 .fld_width = { 1, 1, },
49 .reg_offset = { 0x00, 0x04, }, 49 .reg_offset = { 0x00, 0x04, },
50}; 50};
51 51
52static struct samsung_pin_bank_type bank_type_2bit = { 52static const struct samsung_pin_bank_type bank_type_2bit = {
53 .fld_width = { 2, 1, 2, }, 53 .fld_width = { 2, 1, 2, },
54 .reg_offset = { 0x00, 0x04, 0x08, }, 54 .reg_offset = { 0x00, 0x04, 0x08, },
55}; 55};
@@ -143,7 +143,7 @@ static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
143static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d, 143static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
144 struct samsung_pin_bank *bank, int pin) 144 struct samsung_pin_bank *bank, int pin)
145{ 145{
146 struct samsung_pin_bank_type *bank_type = bank->type; 146 const struct samsung_pin_bank_type *bank_type = bank->type;
147 unsigned long flags; 147 unsigned long flags;
148 void __iomem *reg; 148 void __iomem *reg;
149 u8 shift; 149 u8 shift;
@@ -518,8 +518,8 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
518 irq_set_handler_data(irq, eint_data); 518 irq_set_handler_data(irq, eint_data);
519 } 519 }
520 520
521 bank = d->ctrl->pin_banks; 521 bank = d->pin_banks;
522 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 522 for (i = 0; i < d->nr_banks; ++i, ++bank) {
523 struct s3c24xx_eint_domain_data *ddata; 523 struct s3c24xx_eint_domain_data *ddata;
524 unsigned int mask; 524 unsigned int mask;
525 unsigned int irq; 525 unsigned int irq;
@@ -561,7 +561,7 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
561 return 0; 561 return 0;
562} 562}
563 563
564static struct samsung_pin_bank s3c2412_pin_banks[] = { 564static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {
565 PIN_BANK_A(23, 0x000, "gpa"), 565 PIN_BANK_A(23, 0x000, "gpa"),
566 PIN_BANK_2BIT(11, 0x010, "gpb"), 566 PIN_BANK_2BIT(11, 0x010, "gpb"),
567 PIN_BANK_2BIT(16, 0x020, "gpc"), 567 PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -573,16 +573,15 @@ static struct samsung_pin_bank s3c2412_pin_banks[] = {
573 PIN_BANK_2BIT(13, 0x080, "gpj"), 573 PIN_BANK_2BIT(13, 0x080, "gpj"),
574}; 574};
575 575
576struct samsung_pin_ctrl s3c2412_pin_ctrl[] = { 576const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
577 { 577 {
578 .pin_banks = s3c2412_pin_banks, 578 .pin_banks = s3c2412_pin_banks,
579 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), 579 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
580 .eint_wkup_init = s3c24xx_eint_init, 580 .eint_wkup_init = s3c24xx_eint_init,
581 .label = "S3C2412-GPIO",
582 }, 581 },
583}; 582};
584 583
585static struct samsung_pin_bank s3c2416_pin_banks[] = { 584static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
586 PIN_BANK_A(27, 0x000, "gpa"), 585 PIN_BANK_A(27, 0x000, "gpa"),
587 PIN_BANK_2BIT(11, 0x010, "gpb"), 586 PIN_BANK_2BIT(11, 0x010, "gpb"),
588 PIN_BANK_2BIT(16, 0x020, "gpc"), 587 PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -596,16 +595,15 @@ static struct samsung_pin_bank s3c2416_pin_banks[] = {
596 PIN_BANK_2BIT(2, 0x100, "gpm"), 595 PIN_BANK_2BIT(2, 0x100, "gpm"),
597}; 596};
598 597
599struct samsung_pin_ctrl s3c2416_pin_ctrl[] = { 598const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
600 { 599 {
601 .pin_banks = s3c2416_pin_banks, 600 .pin_banks = s3c2416_pin_banks,
602 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), 601 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
603 .eint_wkup_init = s3c24xx_eint_init, 602 .eint_wkup_init = s3c24xx_eint_init,
604 .label = "S3C2416-GPIO",
605 }, 603 },
606}; 604};
607 605
608static struct samsung_pin_bank s3c2440_pin_banks[] = { 606static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
609 PIN_BANK_A(25, 0x000, "gpa"), 607 PIN_BANK_A(25, 0x000, "gpa"),
610 PIN_BANK_2BIT(11, 0x010, "gpb"), 608 PIN_BANK_2BIT(11, 0x010, "gpb"),
611 PIN_BANK_2BIT(16, 0x020, "gpc"), 609 PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -617,16 +615,15 @@ static struct samsung_pin_bank s3c2440_pin_banks[] = {
617 PIN_BANK_2BIT(13, 0x0d0, "gpj"), 615 PIN_BANK_2BIT(13, 0x0d0, "gpj"),
618}; 616};
619 617
620struct samsung_pin_ctrl s3c2440_pin_ctrl[] = { 618const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
621 { 619 {
622 .pin_banks = s3c2440_pin_banks, 620 .pin_banks = s3c2440_pin_banks,
623 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), 621 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
624 .eint_wkup_init = s3c24xx_eint_init, 622 .eint_wkup_init = s3c24xx_eint_init,
625 .label = "S3C2440-GPIO",
626 }, 623 },
627}; 624};
628 625
629static struct samsung_pin_bank s3c2450_pin_banks[] = { 626static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
630 PIN_BANK_A(28, 0x000, "gpa"), 627 PIN_BANK_A(28, 0x000, "gpa"),
631 PIN_BANK_2BIT(11, 0x010, "gpb"), 628 PIN_BANK_2BIT(11, 0x010, "gpb"),
632 PIN_BANK_2BIT(16, 0x020, "gpc"), 629 PIN_BANK_2BIT(16, 0x020, "gpc"),
@@ -641,11 +638,10 @@ static struct samsung_pin_bank s3c2450_pin_banks[] = {
641 PIN_BANK_2BIT(2, 0x100, "gpm"), 638 PIN_BANK_2BIT(2, 0x100, "gpm"),
642}; 639};
643 640
644struct samsung_pin_ctrl s3c2450_pin_ctrl[] = { 641const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {
645 { 642 {
646 .pin_banks = s3c2450_pin_banks, 643 .pin_banks = s3c2450_pin_banks,
647 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), 644 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
648 .eint_wkup_init = s3c24xx_eint_init, 645 .eint_wkup_init = s3c24xx_eint_init,
649 .label = "S3C2450-GPIO",
650 }, 646 },
651}; 647};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 89143c903000..7756c1e9e763 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -68,32 +68,32 @@
68#define EINT_CON_MASK 0xF 68#define EINT_CON_MASK 0xF
69#define EINT_CON_LEN 4 69#define EINT_CON_LEN 4
70 70
71static struct samsung_pin_bank_type bank_type_4bit_off = { 71static const struct samsung_pin_bank_type bank_type_4bit_off = {
72 .fld_width = { 4, 1, 2, 0, 2, 2, }, 72 .fld_width = { 4, 1, 2, 0, 2, 2, },
73 .reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, }, 73 .reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
74}; 74};
75 75
76static struct samsung_pin_bank_type bank_type_4bit_alive = { 76static const struct samsung_pin_bank_type bank_type_4bit_alive = {
77 .fld_width = { 4, 1, 2, }, 77 .fld_width = { 4, 1, 2, },
78 .reg_offset = { 0x00, 0x04, 0x08, }, 78 .reg_offset = { 0x00, 0x04, 0x08, },
79}; 79};
80 80
81static struct samsung_pin_bank_type bank_type_4bit2_off = { 81static const struct samsung_pin_bank_type bank_type_4bit2_off = {
82 .fld_width = { 4, 1, 2, 0, 2, 2, }, 82 .fld_width = { 4, 1, 2, 0, 2, 2, },
83 .reg_offset = { 0x00, 0x08, 0x0c, 0, 0x10, 0x14, }, 83 .reg_offset = { 0x00, 0x08, 0x0c, 0, 0x10, 0x14, },
84}; 84};
85 85
86static struct samsung_pin_bank_type bank_type_4bit2_alive = { 86static const struct samsung_pin_bank_type bank_type_4bit2_alive = {
87 .fld_width = { 4, 1, 2, }, 87 .fld_width = { 4, 1, 2, },
88 .reg_offset = { 0x00, 0x08, 0x0c, }, 88 .reg_offset = { 0x00, 0x08, 0x0c, },
89}; 89};
90 90
91static struct samsung_pin_bank_type bank_type_2bit_off = { 91static const struct samsung_pin_bank_type bank_type_2bit_off = {
92 .fld_width = { 2, 1, 2, 0, 2, 2, }, 92 .fld_width = { 2, 1, 2, 0, 2, 2, },
93 .reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, }, 93 .reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
94}; 94};
95 95
96static struct samsung_pin_bank_type bank_type_2bit_alive = { 96static const struct samsung_pin_bank_type bank_type_2bit_alive = {
97 .fld_width = { 2, 1, 2, }, 97 .fld_width = { 2, 1, 2, },
98 .reg_offset = { 0x00, 0x04, 0x08, }, 98 .reg_offset = { 0x00, 0x04, 0x08, },
99}; 99};
@@ -272,7 +272,7 @@ static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
272static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d, 272static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
273 struct samsung_pin_bank *bank, int pin) 273 struct samsung_pin_bank *bank, int pin)
274{ 274{
275 struct samsung_pin_bank_type *bank_type = bank->type; 275 const struct samsung_pin_bank_type *bank_type = bank->type;
276 unsigned long flags; 276 unsigned long flags;
277 void __iomem *reg; 277 void __iomem *reg;
278 u8 shift; 278 u8 shift;
@@ -468,8 +468,8 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
468 } 468 }
469 469
470 nr_domains = 0; 470 nr_domains = 0;
471 bank = d->ctrl->pin_banks; 471 bank = d->pin_banks;
472 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 472 for (i = 0; i < d->nr_banks; ++i, ++bank) {
473 unsigned int nr_eints; 473 unsigned int nr_eints;
474 unsigned int mask; 474 unsigned int mask;
475 475
@@ -497,9 +497,9 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
497 } 497 }
498 data->drvdata = d; 498 data->drvdata = d;
499 499
500 bank = d->ctrl->pin_banks; 500 bank = d->pin_banks;
501 nr_domains = 0; 501 nr_domains = 0;
502 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 502 for (i = 0; i < d->nr_banks; ++i, ++bank) {
503 if (bank->eint_type != EINT_TYPE_GPIO) 503 if (bank->eint_type != EINT_TYPE_GPIO)
504 continue; 504 continue;
505 505
@@ -735,8 +735,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
735 irq_set_handler_data(irq, data); 735 irq_set_handler_data(irq, data);
736 } 736 }
737 737
738 bank = d->ctrl->pin_banks; 738 bank = d->pin_banks;
739 for (i = 0; i < d->ctrl->nr_banks; ++i, ++bank) { 739 for (i = 0; i < d->nr_banks; ++i, ++bank) {
740 struct s3c64xx_eint0_domain_data *ddata; 740 struct s3c64xx_eint0_domain_data *ddata;
741 unsigned int nr_eints; 741 unsigned int nr_eints;
742 unsigned int mask; 742 unsigned int mask;
@@ -780,7 +780,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
780} 780}
781 781
782/* pin banks of s3c64xx pin-controller 0 */ 782/* pin banks of s3c64xx pin-controller 0 */
783static struct samsung_pin_bank s3c64xx_pin_banks0[] = { 783static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = {
784 PIN_BANK_4BIT_EINTG(8, 0x000, "gpa", 0), 784 PIN_BANK_4BIT_EINTG(8, 0x000, "gpa", 0),
785 PIN_BANK_4BIT_EINTG(7, 0x020, "gpb", 8), 785 PIN_BANK_4BIT_EINTG(7, 0x020, "gpb", 8),
786 PIN_BANK_4BIT_EINTG(8, 0x040, "gpc", 16), 786 PIN_BANK_4BIT_EINTG(8, 0x040, "gpc", 16),
@@ -804,13 +804,12 @@ static struct samsung_pin_bank s3c64xx_pin_banks0[] = {
804 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes 804 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes
805 * one gpio/pin-mux/pinconfig controller. 805 * one gpio/pin-mux/pinconfig controller.
806 */ 806 */
807struct samsung_pin_ctrl s3c64xx_pin_ctrl[] = { 807const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
808 { 808 {
809 /* pin-controller instance 1 data */ 809 /* pin-controller instance 1 data */
810 .pin_banks = s3c64xx_pin_banks0, 810 .pin_banks = s3c64xx_pin_banks0,
811 .nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0), 811 .nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0),
812 .eint_gpio_init = s3c64xx_eint_gpio_init, 812 .eint_gpio_init = s3c64xx_eint_gpio_init,
813 .eint_wkup_init = s3c64xx_eint_eint0_init, 813 .eint_wkup_init = s3c64xx_eint_eint0_init,
814 .label = "S3C64xx-GPIO",
815 }, 814 },
816}; 815};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 2d37c8f49f3c..32940a01a84f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -349,7 +349,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
349{ 349{
350 struct samsung_pin_bank *b; 350 struct samsung_pin_bank *b;
351 351
352 b = drvdata->ctrl->pin_banks; 352 b = drvdata->pin_banks;
353 353
354 while ((pin >= b->pin_base) && 354 while ((pin >= b->pin_base) &&
355 ((b->pin_base + b->nr_pins - 1) < pin)) 355 ((b->pin_base + b->nr_pins - 1) < pin))
@@ -366,7 +366,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
366 unsigned group, bool enable) 366 unsigned group, bool enable)
367{ 367{
368 struct samsung_pinctrl_drv_data *drvdata; 368 struct samsung_pinctrl_drv_data *drvdata;
369 struct samsung_pin_bank_type *type; 369 const struct samsung_pin_bank_type *type;
370 struct samsung_pin_bank *bank; 370 struct samsung_pin_bank *bank;
371 void __iomem *reg; 371 void __iomem *reg;
372 u32 mask, shift, data, pin_offset; 372 u32 mask, shift, data, pin_offset;
@@ -378,7 +378,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
378 func = &drvdata->pmx_functions[selector]; 378 func = &drvdata->pmx_functions[selector];
379 grp = &drvdata->pin_groups[group]; 379 grp = &drvdata->pin_groups[group];
380 380
381 pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->ctrl->base, 381 pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->pin_base,
382 &reg, &pin_offset, &bank); 382 &reg, &pin_offset, &bank);
383 type = bank->type; 383 type = bank->type;
384 mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1; 384 mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
@@ -422,7 +422,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
422 unsigned long *config, bool set) 422 unsigned long *config, bool set)
423{ 423{
424 struct samsung_pinctrl_drv_data *drvdata; 424 struct samsung_pinctrl_drv_data *drvdata;
425 struct samsung_pin_bank_type *type; 425 const struct samsung_pin_bank_type *type;
426 struct samsung_pin_bank *bank; 426 struct samsung_pin_bank *bank;
427 void __iomem *reg_base; 427 void __iomem *reg_base;
428 enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config); 428 enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config);
@@ -431,7 +431,7 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
431 unsigned long flags; 431 unsigned long flags;
432 432
433 drvdata = pinctrl_dev_get_drvdata(pctldev); 433 drvdata = pinctrl_dev_get_drvdata(pctldev);
434 pin_to_reg_bank(drvdata, pin - drvdata->ctrl->base, &reg_base, 434 pin_to_reg_bank(drvdata, pin - drvdata->pin_base, &reg_base,
435 &pin_offset, &bank); 435 &pin_offset, &bank);
436 type = bank->type; 436 type = bank->type;
437 437
@@ -528,7 +528,7 @@ static const struct pinconf_ops samsung_pinconf_ops = {
528static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 528static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
529{ 529{
530 struct samsung_pin_bank *bank = gc_to_pin_bank(gc); 530 struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
531 struct samsung_pin_bank_type *type = bank->type; 531 const struct samsung_pin_bank_type *type = bank->type;
532 unsigned long flags; 532 unsigned long flags;
533 void __iomem *reg; 533 void __iomem *reg;
534 u32 data; 534 u32 data;
@@ -552,7 +552,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
552 void __iomem *reg; 552 void __iomem *reg;
553 u32 data; 553 u32 data;
554 struct samsung_pin_bank *bank = gc_to_pin_bank(gc); 554 struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
555 struct samsung_pin_bank_type *type = bank->type; 555 const struct samsung_pin_bank_type *type = bank->type;
556 556
557 reg = bank->drvdata->virt_base + bank->pctl_offset; 557 reg = bank->drvdata->virt_base + bank->pctl_offset;
558 558
@@ -569,7 +569,7 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
569static int samsung_gpio_set_direction(struct gpio_chip *gc, 569static int samsung_gpio_set_direction(struct gpio_chip *gc,
570 unsigned offset, bool input) 570 unsigned offset, bool input)
571{ 571{
572 struct samsung_pin_bank_type *type; 572 const struct samsung_pin_bank_type *type;
573 struct samsung_pin_bank *bank; 573 struct samsung_pin_bank *bank;
574 struct samsung_pinctrl_drv_data *drvdata; 574 struct samsung_pinctrl_drv_data *drvdata;
575 void __iomem *reg; 575 void __iomem *reg;
@@ -834,32 +834,32 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
834 ctrldesc->confops = &samsung_pinconf_ops; 834 ctrldesc->confops = &samsung_pinconf_ops;
835 835
836 pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) * 836 pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
837 drvdata->ctrl->nr_pins, GFP_KERNEL); 837 drvdata->nr_pins, GFP_KERNEL);
838 if (!pindesc) { 838 if (!pindesc) {
839 dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n"); 839 dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
840 return -ENOMEM; 840 return -ENOMEM;
841 } 841 }
842 ctrldesc->pins = pindesc; 842 ctrldesc->pins = pindesc;
843 ctrldesc->npins = drvdata->ctrl->nr_pins; 843 ctrldesc->npins = drvdata->nr_pins;
844 844
845 /* dynamically populate the pin number and pin name for pindesc */ 845 /* dynamically populate the pin number and pin name for pindesc */
846 for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++) 846 for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
847 pdesc->number = pin + drvdata->ctrl->base; 847 pdesc->number = pin + drvdata->pin_base;
848 848
849 /* 849 /*
850 * allocate space for storing the dynamically generated names for all 850 * allocate space for storing the dynamically generated names for all
851 * the pins which belong to this pin-controller. 851 * the pins which belong to this pin-controller.
852 */ 852 */
853 pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH * 853 pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
854 drvdata->ctrl->nr_pins, GFP_KERNEL); 854 drvdata->nr_pins, GFP_KERNEL);
855 if (!pin_names) { 855 if (!pin_names) {
856 dev_err(&pdev->dev, "mem alloc for pin names failed\n"); 856 dev_err(&pdev->dev, "mem alloc for pin names failed\n");
857 return -ENOMEM; 857 return -ENOMEM;
858 } 858 }
859 859
860 /* for each pin, the name of the pin is pin-bank name + pin number */ 860 /* for each pin, the name of the pin is pin-bank name + pin number */
861 for (bank = 0; bank < drvdata->ctrl->nr_banks; bank++) { 861 for (bank = 0; bank < drvdata->nr_banks; bank++) {
862 pin_bank = &drvdata->ctrl->pin_banks[bank]; 862 pin_bank = &drvdata->pin_banks[bank];
863 for (pin = 0; pin < pin_bank->nr_pins; pin++) { 863 for (pin = 0; pin < pin_bank->nr_pins; pin++) {
864 sprintf(pin_names, "%s-%d", pin_bank->name, pin); 864 sprintf(pin_names, "%s-%d", pin_bank->name, pin);
865 pdesc = pindesc + pin_bank->pin_base + pin; 865 pdesc = pindesc + pin_bank->pin_base + pin;
@@ -878,11 +878,11 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
878 return -EINVAL; 878 return -EINVAL;
879 } 879 }
880 880
881 for (bank = 0; bank < drvdata->ctrl->nr_banks; ++bank) { 881 for (bank = 0; bank < drvdata->nr_banks; ++bank) {
882 pin_bank = &drvdata->ctrl->pin_banks[bank]; 882 pin_bank = &drvdata->pin_banks[bank];
883 pin_bank->grange.name = pin_bank->name; 883 pin_bank->grange.name = pin_bank->name;
884 pin_bank->grange.id = bank; 884 pin_bank->grange.id = bank;
885 pin_bank->grange.pin_base = drvdata->ctrl->base 885 pin_bank->grange.pin_base = drvdata->pin_base
886 + pin_bank->pin_base; 886 + pin_bank->pin_base;
887 pin_bank->grange.base = pin_bank->gpio_chip.base; 887 pin_bank->grange.base = pin_bank->gpio_chip.base;
888 pin_bank->grange.npins = pin_bank->gpio_chip.ngpio; 888 pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
@@ -918,17 +918,16 @@ static const struct gpio_chip samsung_gpiolib_chip = {
918static int samsung_gpiolib_register(struct platform_device *pdev, 918static int samsung_gpiolib_register(struct platform_device *pdev,
919 struct samsung_pinctrl_drv_data *drvdata) 919 struct samsung_pinctrl_drv_data *drvdata)
920{ 920{
921 struct samsung_pin_ctrl *ctrl = drvdata->ctrl; 921 struct samsung_pin_bank *bank = drvdata->pin_banks;
922 struct samsung_pin_bank *bank = ctrl->pin_banks;
923 struct gpio_chip *gc; 922 struct gpio_chip *gc;
924 int ret; 923 int ret;
925 int i; 924 int i;
926 925
927 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) { 926 for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
928 bank->gpio_chip = samsung_gpiolib_chip; 927 bank->gpio_chip = samsung_gpiolib_chip;
929 928
930 gc = &bank->gpio_chip; 929 gc = &bank->gpio_chip;
931 gc->base = ctrl->base + bank->pin_base; 930 gc->base = drvdata->pin_base + bank->pin_base;
932 gc->ngpio = bank->nr_pins; 931 gc->ngpio = bank->nr_pins;
933 gc->dev = &pdev->dev; 932 gc->dev = &pdev->dev;
934 gc->of_node = bank->of_node; 933 gc->of_node = bank->of_node;
@@ -954,51 +953,70 @@ fail:
954static int samsung_gpiolib_unregister(struct platform_device *pdev, 953static int samsung_gpiolib_unregister(struct platform_device *pdev,
955 struct samsung_pinctrl_drv_data *drvdata) 954 struct samsung_pinctrl_drv_data *drvdata)
956{ 955{
957 struct samsung_pin_ctrl *ctrl = drvdata->ctrl; 956 struct samsung_pin_bank *bank = drvdata->pin_banks;
958 struct samsung_pin_bank *bank = ctrl->pin_banks;
959 int i; 957 int i;
960 958
961 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) 959 for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
962 gpiochip_remove(&bank->gpio_chip); 960 gpiochip_remove(&bank->gpio_chip);
961
963 return 0; 962 return 0;
964} 963}
965 964
966static const struct of_device_id samsung_pinctrl_dt_match[]; 965static const struct of_device_id samsung_pinctrl_dt_match[];
967 966
968/* retrieve the soc specific data */ 967/* retrieve the soc specific data */
969static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data( 968static const struct samsung_pin_ctrl *
970 struct samsung_pinctrl_drv_data *d, 969samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
971 struct platform_device *pdev) 970 struct platform_device *pdev)
972{ 971{
973 int id; 972 int id;
974 const struct of_device_id *match; 973 const struct of_device_id *match;
975 struct device_node *node = pdev->dev.of_node; 974 struct device_node *node = pdev->dev.of_node;
976 struct device_node *np; 975 struct device_node *np;
977 struct samsung_pin_ctrl *ctrl; 976 const struct samsung_pin_bank_data *bdata;
977 const struct samsung_pin_ctrl *ctrl;
978 struct samsung_pin_bank *bank; 978 struct samsung_pin_bank *bank;
979 int i; 979 int i;
980 980
981 id = of_alias_get_id(node, "pinctrl"); 981 id = of_alias_get_id(node, "pinctrl");
982 if (id < 0) { 982 if (id < 0) {
983 dev_err(&pdev->dev, "failed to get alias id\n"); 983 dev_err(&pdev->dev, "failed to get alias id\n");
984 return NULL; 984 return ERR_PTR(-ENOENT);
985 } 985 }
986 match = of_match_node(samsung_pinctrl_dt_match, node); 986 match = of_match_node(samsung_pinctrl_dt_match, node);
987 ctrl = (struct samsung_pin_ctrl *)match->data + id; 987 ctrl = (struct samsung_pin_ctrl *)match->data + id;
988 988
989 bank = ctrl->pin_banks; 989 d->suspend = ctrl->suspend;
990 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) { 990 d->resume = ctrl->resume;
991 d->nr_banks = ctrl->nr_banks;
992 d->pin_banks = devm_kcalloc(&pdev->dev, d->nr_banks,
993 sizeof(*d->pin_banks), GFP_KERNEL);
994 if (!d->pin_banks)
995 return ERR_PTR(-ENOMEM);
996
997 bank = d->pin_banks;
998 bdata = ctrl->pin_banks;
999 for (i = 0; i < ctrl->nr_banks; ++i, ++bdata, ++bank) {
1000 bank->type = bdata->type;
1001 bank->pctl_offset = bdata->pctl_offset;
1002 bank->nr_pins = bdata->nr_pins;
1003 bank->eint_func = bdata->eint_func;
1004 bank->eint_type = bdata->eint_type;
1005 bank->eint_mask = bdata->eint_mask;
1006 bank->eint_offset = bdata->eint_offset;
1007 bank->name = bdata->name;
1008
991 spin_lock_init(&bank->slock); 1009 spin_lock_init(&bank->slock);
992 bank->drvdata = d; 1010 bank->drvdata = d;
993 bank->pin_base = ctrl->nr_pins; 1011 bank->pin_base = d->nr_pins;
994 ctrl->nr_pins += bank->nr_pins; 1012 d->nr_pins += bank->nr_pins;
995 } 1013 }
996 1014
997 for_each_child_of_node(node, np) { 1015 for_each_child_of_node(node, np) {
998 if (!of_find_property(np, "gpio-controller", NULL)) 1016 if (!of_find_property(np, "gpio-controller", NULL))
999 continue; 1017 continue;
1000 bank = ctrl->pin_banks; 1018 bank = d->pin_banks;
1001 for (i = 0; i < ctrl->nr_banks; ++i, ++bank) { 1019 for (i = 0; i < d->nr_banks; ++i, ++bank) {
1002 if (!strcmp(bank->name, np->name)) { 1020 if (!strcmp(bank->name, np->name)) {
1003 bank->of_node = np; 1021 bank->of_node = np;
1004 break; 1022 break;
@@ -1006,8 +1024,8 @@ static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
1006 } 1024 }
1007 } 1025 }
1008 1026
1009 ctrl->base = pin_base; 1027 d->pin_base = pin_base;
1010 pin_base += ctrl->nr_pins; 1028 pin_base += d->nr_pins;
1011 1029
1012 return ctrl; 1030 return ctrl;
1013} 1031}
@@ -1015,8 +1033,8 @@ static struct samsung_pin_ctrl *samsung_pinctrl_get_soc_data(
1015static int samsung_pinctrl_probe(struct platform_device *pdev) 1033static int samsung_pinctrl_probe(struct platform_device *pdev)
1016{ 1034{
1017 struct samsung_pinctrl_drv_data *drvdata; 1035 struct samsung_pinctrl_drv_data *drvdata;
1036 const struct samsung_pin_ctrl *ctrl;
1018 struct device *dev = &pdev->dev; 1037 struct device *dev = &pdev->dev;
1019 struct samsung_pin_ctrl *ctrl;
1020 struct resource *res; 1038 struct resource *res;
1021 int ret; 1039 int ret;
1022 1040
@@ -1033,11 +1051,10 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
1033 } 1051 }
1034 1052
1035 ctrl = samsung_pinctrl_get_soc_data(drvdata, pdev); 1053 ctrl = samsung_pinctrl_get_soc_data(drvdata, pdev);
1036 if (!ctrl) { 1054 if (IS_ERR(ctrl)) {
1037 dev_err(&pdev->dev, "driver data not available\n"); 1055 dev_err(&pdev->dev, "driver data not available\n");
1038 return -EINVAL; 1056 return PTR_ERR(ctrl);
1039 } 1057 }
1040 drvdata->ctrl = ctrl;
1041 drvdata->dev = dev; 1058 drvdata->dev = dev;
1042 1059
1043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1060 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1082,16 +1099,14 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
1082static void samsung_pinctrl_suspend_dev( 1099static void samsung_pinctrl_suspend_dev(
1083 struct samsung_pinctrl_drv_data *drvdata) 1100 struct samsung_pinctrl_drv_data *drvdata)
1084{ 1101{
1085 struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
1086 void __iomem *virt_base = drvdata->virt_base; 1102 void __iomem *virt_base = drvdata->virt_base;
1087 int i; 1103 int i;
1088 1104
1089 for (i = 0; i < ctrl->nr_banks; i++) { 1105 for (i = 0; i < drvdata->nr_banks; i++) {
1090 struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; 1106 struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
1091 void __iomem *reg = virt_base + bank->pctl_offset; 1107 void __iomem *reg = virt_base + bank->pctl_offset;
1092 1108 const u8 *offs = bank->type->reg_offset;
1093 u8 *offs = bank->type->reg_offset; 1109 const u8 *widths = bank->type->fld_width;
1094 u8 *widths = bank->type->fld_width;
1095 enum pincfg_type type; 1110 enum pincfg_type type;
1096 1111
1097 /* Registers without a powerdown config aren't lost */ 1112 /* Registers without a powerdown config aren't lost */
@@ -1116,8 +1131,8 @@ static void samsung_pinctrl_suspend_dev(
1116 } 1131 }
1117 } 1132 }
1118 1133
1119 if (ctrl->suspend) 1134 if (drvdata->suspend)
1120 ctrl->suspend(drvdata); 1135 drvdata->suspend(drvdata);
1121} 1136}
1122 1137
1123/** 1138/**
@@ -1130,19 +1145,17 @@ static void samsung_pinctrl_suspend_dev(
1130 */ 1145 */
1131static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata) 1146static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
1132{ 1147{
1133 struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
1134 void __iomem *virt_base = drvdata->virt_base; 1148 void __iomem *virt_base = drvdata->virt_base;
1135 int i; 1149 int i;
1136 1150
1137 if (ctrl->resume) 1151 if (drvdata->resume)
1138 ctrl->resume(drvdata); 1152 drvdata->resume(drvdata);
1139 1153
1140 for (i = 0; i < ctrl->nr_banks; i++) { 1154 for (i = 0; i < drvdata->nr_banks; i++) {
1141 struct samsung_pin_bank *bank = &ctrl->pin_banks[i]; 1155 struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
1142 void __iomem *reg = virt_base + bank->pctl_offset; 1156 void __iomem *reg = virt_base + bank->pctl_offset;
1143 1157 const u8 *offs = bank->type->reg_offset;
1144 u8 *offs = bank->type->reg_offset; 1158 const u8 *widths = bank->type->fld_width;
1145 u8 *widths = bank->type->fld_width;
1146 enum pincfg_type type; 1159 enum pincfg_type type;
1147 1160
1148 /* Registers without a powerdown config aren't lost */ 1161 /* Registers without a powerdown config aren't lost */
@@ -1218,6 +1231,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
1218 .data = (void *)exynos4210_pin_ctrl }, 1231 .data = (void *)exynos4210_pin_ctrl },
1219 { .compatible = "samsung,exynos4x12-pinctrl", 1232 { .compatible = "samsung,exynos4x12-pinctrl",
1220 .data = (void *)exynos4x12_pin_ctrl }, 1233 .data = (void *)exynos4x12_pin_ctrl },
1234 { .compatible = "samsung,exynos4415-pinctrl",
1235 .data = (void *)exynos4415_pin_ctrl },
1221 { .compatible = "samsung,exynos5250-pinctrl", 1236 { .compatible = "samsung,exynos5250-pinctrl",
1222 .data = (void *)exynos5250_pin_ctrl }, 1237 .data = (void *)exynos5250_pin_ctrl },
1223 { .compatible = "samsung,exynos5260-pinctrl", 1238 { .compatible = "samsung,exynos5260-pinctrl",
@@ -1226,6 +1241,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
1226 .data = (void *)exynos5420_pin_ctrl }, 1241 .data = (void *)exynos5420_pin_ctrl },
1227 { .compatible = "samsung,s5pv210-pinctrl", 1242 { .compatible = "samsung,s5pv210-pinctrl",
1228 .data = (void *)s5pv210_pin_ctrl }, 1243 .data = (void *)s5pv210_pin_ctrl },
1244 { .compatible = "samsung,exynos7-pinctrl",
1245 .data = (void *)exynos7_pin_ctrl },
1229#endif 1246#endif
1230#ifdef CONFIG_PINCTRL_S3C64XX 1247#ifdef CONFIG_PINCTRL_S3C64XX
1231 { .compatible = "samsung,s3c64xx-pinctrl", 1248 { .compatible = "samsung,s3c64xx-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 5cedc9d26390..1b8c0139d604 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -113,39 +113,66 @@ struct samsung_pin_bank_type {
113}; 113};
114 114
115/** 115/**
116 * struct samsung_pin_bank_data: represent a controller pin-bank (init data).
117 * @type: type of the bank (register offsets and bitfield widths)
118 * @pctl_offset: starting offset of the pin-bank registers.
119 * @nr_pins: number of pins included in this bank.
120 * @eint_func: function to set in CON register to configure pin as EINT.
121 * @eint_type: type of the external interrupt supported by the bank.
122 * @eint_mask: bit mask of pins which support EINT function.
123 * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
124 * @name: name to be prefixed for each pin in this pin bank.
125 */
126struct samsung_pin_bank_data {
127 const struct samsung_pin_bank_type *type;
128 u32 pctl_offset;
129 u8 nr_pins;
130 u8 eint_func;
131 enum eint_type eint_type;
132 u32 eint_mask;
133 u32 eint_offset;
134 const char *name;
135};
136
137/**
116 * struct samsung_pin_bank: represent a controller pin-bank. 138 * struct samsung_pin_bank: represent a controller pin-bank.
117 * @type: type of the bank (register offsets and bitfield widths) 139 * @type: type of the bank (register offsets and bitfield widths)
118 * @pctl_offset: starting offset of the pin-bank registers. 140 * @pctl_offset: starting offset of the pin-bank registers.
119 * @pin_base: starting pin number of the bank.
120 * @nr_pins: number of pins included in this bank. 141 * @nr_pins: number of pins included in this bank.
121 * @eint_func: function to set in CON register to configure pin as EINT. 142 * @eint_func: function to set in CON register to configure pin as EINT.
122 * @eint_type: type of the external interrupt supported by the bank. 143 * @eint_type: type of the external interrupt supported by the bank.
123 * @eint_mask: bit mask of pins which support EINT function. 144 * @eint_mask: bit mask of pins which support EINT function.
145 * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
124 * @name: name to be prefixed for each pin in this pin bank. 146 * @name: name to be prefixed for each pin in this pin bank.
147 * @pin_base: starting pin number of the bank.
148 * @soc_priv: per-bank private data for SoC-specific code.
125 * @of_node: OF node of the bank. 149 * @of_node: OF node of the bank.
126 * @drvdata: link to controller driver data 150 * @drvdata: link to controller driver data
127 * @irq_domain: IRQ domain of the bank. 151 * @irq_domain: IRQ domain of the bank.
128 * @gpio_chip: GPIO chip of the bank. 152 * @gpio_chip: GPIO chip of the bank.
129 * @grange: linux gpio pin range supported by this bank. 153 * @grange: linux gpio pin range supported by this bank.
154 * @irq_chip: link to irq chip for external gpio and wakeup interrupts.
130 * @slock: spinlock protecting bank registers 155 * @slock: spinlock protecting bank registers
131 * @pm_save: saved register values during suspend 156 * @pm_save: saved register values during suspend
132 */ 157 */
133struct samsung_pin_bank { 158struct samsung_pin_bank {
134 struct samsung_pin_bank_type *type; 159 const struct samsung_pin_bank_type *type;
135 u32 pctl_offset; 160 u32 pctl_offset;
136 u32 pin_base;
137 u8 nr_pins; 161 u8 nr_pins;
138 u8 eint_func; 162 u8 eint_func;
139 enum eint_type eint_type; 163 enum eint_type eint_type;
140 u32 eint_mask; 164 u32 eint_mask;
141 u32 eint_offset; 165 u32 eint_offset;
142 char *name; 166 const char *name;
167
168 u32 pin_base;
143 void *soc_priv; 169 void *soc_priv;
144 struct device_node *of_node; 170 struct device_node *of_node;
145 struct samsung_pinctrl_drv_data *drvdata; 171 struct samsung_pinctrl_drv_data *drvdata;
146 struct irq_domain *irq_domain; 172 struct irq_domain *irq_domain;
147 struct gpio_chip gpio_chip; 173 struct gpio_chip gpio_chip;
148 struct pinctrl_gpio_range grange; 174 struct pinctrl_gpio_range grange;
175 struct exynos_irq_chip *irq_chip;
149 spinlock_t slock; 176 spinlock_t slock;
150 177
151 u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/ 178 u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
@@ -155,27 +182,19 @@ struct samsung_pin_bank {
155 * struct samsung_pin_ctrl: represent a pin controller. 182 * struct samsung_pin_ctrl: represent a pin controller.
156 * @pin_banks: list of pin banks included in this controller. 183 * @pin_banks: list of pin banks included in this controller.
157 * @nr_banks: number of pin banks. 184 * @nr_banks: number of pin banks.
158 * @base: starting system wide pin number.
159 * @nr_pins: number of pins supported by the controller.
160 * @eint_gpio_init: platform specific callback to setup the external gpio 185 * @eint_gpio_init: platform specific callback to setup the external gpio
161 * interrupts for the controller. 186 * interrupts for the controller.
162 * @eint_wkup_init: platform specific callback to setup the external wakeup 187 * @eint_wkup_init: platform specific callback to setup the external wakeup
163 * interrupts for the controller. 188 * interrupts for the controller.
164 * @label: for debug information.
165 */ 189 */
166struct samsung_pin_ctrl { 190struct samsung_pin_ctrl {
167 struct samsung_pin_bank *pin_banks; 191 const struct samsung_pin_bank_data *pin_banks;
168 u32 nr_banks; 192 u32 nr_banks;
169 193
170 u32 base;
171 u32 nr_pins;
172
173 int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); 194 int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
174 int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); 195 int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
175 void (*suspend)(struct samsung_pinctrl_drv_data *); 196 void (*suspend)(struct samsung_pinctrl_drv_data *);
176 void (*resume)(struct samsung_pinctrl_drv_data *); 197 void (*resume)(struct samsung_pinctrl_drv_data *);
177
178 char *label;
179}; 198};
180 199
181/** 200/**
@@ -191,6 +210,8 @@ struct samsung_pin_ctrl {
191 * @nr_groups: number of such pin groups. 210 * @nr_groups: number of such pin groups.
192 * @pmx_functions: list of pin functions available to the driver. 211 * @pmx_functions: list of pin functions available to the driver.
193 * @nr_function: number of such pin functions. 212 * @nr_function: number of such pin functions.
213 * @pin_base: starting system wide pin number.
214 * @nr_pins: number of pins supported by the controller.
194 */ 215 */
195struct samsung_pinctrl_drv_data { 216struct samsung_pinctrl_drv_data {
196 struct list_head node; 217 struct list_head node;
@@ -198,7 +219,6 @@ struct samsung_pinctrl_drv_data {
198 struct device *dev; 219 struct device *dev;
199 int irq; 220 int irq;
200 221
201 struct samsung_pin_ctrl *ctrl;
202 struct pinctrl_desc pctl; 222 struct pinctrl_desc pctl;
203 struct pinctrl_dev *pctl_dev; 223 struct pinctrl_dev *pctl_dev;
204 224
@@ -206,6 +226,14 @@ struct samsung_pinctrl_drv_data {
206 unsigned int nr_groups; 226 unsigned int nr_groups;
207 const struct samsung_pmx_func *pmx_functions; 227 const struct samsung_pmx_func *pmx_functions;
208 unsigned int nr_functions; 228 unsigned int nr_functions;
229
230 struct samsung_pin_bank *pin_banks;
231 u32 nr_banks;
232 unsigned int pin_base;
233 unsigned int nr_pins;
234
235 void (*suspend)(struct samsung_pinctrl_drv_data *);
236 void (*resume)(struct samsung_pinctrl_drv_data *);
209}; 237};
210 238
211/** 239/**
@@ -236,17 +264,19 @@ struct samsung_pmx_func {
236}; 264};
237 265
238/* list of all exported SoC specific data */ 266/* list of all exported SoC specific data */
239extern struct samsung_pin_ctrl exynos3250_pin_ctrl[]; 267extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[];
240extern struct samsung_pin_ctrl exynos4210_pin_ctrl[]; 268extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[];
241extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; 269extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
242extern struct samsung_pin_ctrl exynos5250_pin_ctrl[]; 270extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[];
243extern struct samsung_pin_ctrl exynos5260_pin_ctrl[]; 271extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[];
244extern struct samsung_pin_ctrl exynos5420_pin_ctrl[]; 272extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[];
245extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; 273extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[];
246extern struct samsung_pin_ctrl s3c2412_pin_ctrl[]; 274extern const struct samsung_pin_ctrl exynos7_pin_ctrl[];
247extern struct samsung_pin_ctrl s3c2416_pin_ctrl[]; 275extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
248extern struct samsung_pin_ctrl s3c2440_pin_ctrl[]; 276extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[];
249extern struct samsung_pin_ctrl s3c2450_pin_ctrl[]; 277extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[];
250extern struct samsung_pin_ctrl s5pv210_pin_ctrl[]; 278extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[];
279extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[];
280extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[];
251 281
252#endif /* __PINCTRL_SAMSUNG_H */ 282#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index a5e10f777ed2..230a952608cb 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -39,4 +39,8 @@ config PINCTRL_SUN8I_A23_R
39 depends on RESET_CONTROLLER 39 depends on RESET_CONTROLLER
40 select PINCTRL_SUNXI_COMMON 40 select PINCTRL_SUNXI_COMMON
41 41
42config PINCTRL_SUN9I_A80
43 def_bool MACH_SUN9I
44 select PINCTRL_SUNXI_COMMON
45
42endif 46endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index e797efb02901..c7d92e4673b5 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
10obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o 10obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
11obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o 11obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
12obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o 12obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
13obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
new file mode 100644
index 000000000000..adb29422efc9
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -0,0 +1,749 @@
1/*
2 * Allwinner A80 SoCs pinctrl driver.
3 *
4 * Copyright (C) 2014 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/pinctrl/pinctrl.h>
18
19#include "pinctrl-sunxi.h"
20
21static const struct sunxi_desc_pin sun9i_a80_pins[] = {
22 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
23 SUNXI_FUNCTION(0x0, "gpio_in"),
24 SUNXI_FUNCTION(0x1, "gpio_out"),
25 SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
26 SUNXI_FUNCTION(0x4, "uart1"), /* TX */
27 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
28 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
29 SUNXI_FUNCTION(0x0, "gpio_in"),
30 SUNXI_FUNCTION(0x1, "gpio_out"),
31 SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
32 SUNXI_FUNCTION(0x4, "uart1"), /* RX */
33 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
34 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
35 SUNXI_FUNCTION(0x0, "gpio_in"),
36 SUNXI_FUNCTION(0x1, "gpio_out"),
37 SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
38 SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
39 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
40 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
41 SUNXI_FUNCTION(0x0, "gpio_in"),
42 SUNXI_FUNCTION(0x1, "gpio_out"),
43 SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
44 SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
45 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
46 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
47 SUNXI_FUNCTION(0x0, "gpio_in"),
48 SUNXI_FUNCTION(0x1, "gpio_out"),
49 SUNXI_FUNCTION(0x2, "gmac"), /* RXCK */
50 SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
51 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
52 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
53 SUNXI_FUNCTION(0x0, "gpio_in"),
54 SUNXI_FUNCTION(0x1, "gpio_out"),
55 SUNXI_FUNCTION(0x2, "gmac"), /* RXCTL */
56 SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
57 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
58 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
59 SUNXI_FUNCTION(0x0, "gpio_in"),
60 SUNXI_FUNCTION(0x1, "gpio_out"),
61 SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
62 SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
63 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
64 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
65 SUNXI_FUNCTION(0x0, "gpio_in"),
66 SUNXI_FUNCTION(0x1, "gpio_out"),
67 SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
68 SUNXI_FUNCTION(0x4, "uart1"), /* RING */
69 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
70 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
71 SUNXI_FUNCTION(0x0, "gpio_in"),
72 SUNXI_FUNCTION(0x1, "gpio_out"),
73 SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
74 SUNXI_FUNCTION(0x4, "eclk"), /* IN0 */
75 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
76 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
77 SUNXI_FUNCTION(0x0, "gpio_in"),
78 SUNXI_FUNCTION(0x1, "gpio_out"),
79 SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
80 SUNXI_FUNCTION(0x4, "eclk"), /* IN1 */
81 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
82 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
83 SUNXI_FUNCTION(0x0, "gpio_in"),
84 SUNXI_FUNCTION(0x1, "gpio_out"),
85 SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
86 SUNXI_FUNCTION(0x4, "clk_out_a"),
87 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
88 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
89 SUNXI_FUNCTION(0x0, "gpio_in"),
90 SUNXI_FUNCTION(0x1, "gpio_out"),
91 SUNXI_FUNCTION(0x2, "gmac"), /* MII-CRS */
92 SUNXI_FUNCTION(0x4, "clk_out_b"),
93 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
94 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
95 SUNXI_FUNCTION(0x0, "gpio_in"),
96 SUNXI_FUNCTION(0x1, "gpio_out"),
97 SUNXI_FUNCTION(0x2, "gmac"), /* TXCK */
98 SUNXI_FUNCTION(0x4, "pwm3"), /* PWM_P */
99 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
100 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
101 SUNXI_FUNCTION(0x0, "gpio_in"),
102 SUNXI_FUNCTION(0x1, "gpio_out"),
103 SUNXI_FUNCTION(0x2, "gmac"), /* RGMII-TXCK / GMII-TXEN */
104 SUNXI_FUNCTION(0x4, "pwm3"), /* PWM_N */
105 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
106 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
107 SUNXI_FUNCTION(0x0, "gpio_in"),
108 SUNXI_FUNCTION(0x1, "gpio_out"),
109 SUNXI_FUNCTION(0x2, "gmac"), /* MII-TXERR */
110 SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
111 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
112 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
113 SUNXI_FUNCTION(0x0, "gpio_in"),
114 SUNXI_FUNCTION(0x1, "gpio_out"),
115 SUNXI_FUNCTION(0x2, "gmac"), /* RGMII-CLKIN / MII-COL */
116 SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
117 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
118 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
119 SUNXI_FUNCTION(0x0, "gpio_in"),
120 SUNXI_FUNCTION(0x1, "gpio_out"),
121 SUNXI_FUNCTION(0x2, "gmac"), /* EMDC */
122 SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
123 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
124 SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
125 SUNXI_FUNCTION(0x0, "gpio_in"),
126 SUNXI_FUNCTION(0x1, "gpio_out"),
127 SUNXI_FUNCTION(0x2, "gmac"), /* EMDIO */
128 SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
129 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
130
131 /* Hole */
132 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
133 SUNXI_FUNCTION(0x0, "gpio_in"),
134 SUNXI_FUNCTION(0x1, "gpio_out"),
135 SUNXI_FUNCTION(0x3, "uart3"), /* TX */
136 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */
137 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
138 SUNXI_FUNCTION(0x0, "gpio_in"),
139 SUNXI_FUNCTION(0x1, "gpio_out"),
140 SUNXI_FUNCTION(0x3, "uart3"), /* RX */
141 SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */
142
143 /* Hole */
144 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
145 SUNXI_FUNCTION(0x0, "gpio_in"),
146 SUNXI_FUNCTION(0x1, "gpio_out"),
147 SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */
148 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */
149 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
150 SUNXI_FUNCTION(0x0, "gpio_in"),
151 SUNXI_FUNCTION(0x1, "gpio_out"),
152 SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */
153 SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */
154 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */
155 SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
156 SUNXI_FUNCTION(0x0, "gpio_in"),
157 SUNXI_FUNCTION(0x1, "gpio_out"),
158 SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */
159 SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */
160 SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */
161
162 /* Hole */
163 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
164 SUNXI_FUNCTION(0x0, "gpio_in"),
165 SUNXI_FUNCTION(0x1, "gpio_out"),
166 SUNXI_FUNCTION(0x2, "nand0"), /* WE */
167 SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
168 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
169 SUNXI_FUNCTION(0x0, "gpio_in"),
170 SUNXI_FUNCTION(0x1, "gpio_out"),
171 SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
172 SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
173 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
174 SUNXI_FUNCTION(0x0, "gpio_in"),
175 SUNXI_FUNCTION(0x1, "gpio_out"),
176 SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
177 SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
178 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
179 SUNXI_FUNCTION(0x0, "gpio_in"),
180 SUNXI_FUNCTION(0x1, "gpio_out"),
181 SUNXI_FUNCTION(0x2, "nand0")), /* CE1 */
182 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
183 SUNXI_FUNCTION(0x0, "gpio_in"),
184 SUNXI_FUNCTION(0x1, "gpio_out"),
185 SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
186 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
187 SUNXI_FUNCTION(0x0, "gpio_in"),
188 SUNXI_FUNCTION(0x1, "gpio_out"),
189 SUNXI_FUNCTION(0x2, "nand0")), /* RE */
190 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
191 SUNXI_FUNCTION(0x0, "gpio_in"),
192 SUNXI_FUNCTION(0x1, "gpio_out"),
193 SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
194 SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
195 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
196 SUNXI_FUNCTION(0x0, "gpio_in"),
197 SUNXI_FUNCTION(0x1, "gpio_out"),
198 SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
199 SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
200 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
201 SUNXI_FUNCTION(0x0, "gpio_in"),
202 SUNXI_FUNCTION(0x1, "gpio_out"),
203 SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
204 SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
205 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
206 SUNXI_FUNCTION(0x0, "gpio_in"),
207 SUNXI_FUNCTION(0x1, "gpio_out"),
208 SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
209 SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
210 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
211 SUNXI_FUNCTION(0x0, "gpio_in"),
212 SUNXI_FUNCTION(0x1, "gpio_out"),
213 SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
214 SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
215 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
216 SUNXI_FUNCTION(0x0, "gpio_in"),
217 SUNXI_FUNCTION(0x1, "gpio_out"),
218 SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
219 SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
220 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
221 SUNXI_FUNCTION(0x0, "gpio_in"),
222 SUNXI_FUNCTION(0x1, "gpio_out"),
223 SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
224 SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
225 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
226 SUNXI_FUNCTION(0x0, "gpio_in"),
227 SUNXI_FUNCTION(0x1, "gpio_out"),
228 SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
229 SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
230 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
231 SUNXI_FUNCTION(0x0, "gpio_in"),
232 SUNXI_FUNCTION(0x1, "gpio_out"),
233 SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
234 SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
235 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
236 SUNXI_FUNCTION(0x0, "gpio_in"),
237 SUNXI_FUNCTION(0x1, "gpio_out"),
238 SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
239 SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
240 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
241 SUNXI_FUNCTION(0x0, "gpio_in"),
242 SUNXI_FUNCTION(0x1, "gpio_out"),
243 SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
244 SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
245 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
246 SUNXI_FUNCTION(0x0, "gpio_in"),
247 SUNXI_FUNCTION(0x1, "gpio_out"),
248 SUNXI_FUNCTION(0x2, "nand0"), /* CE2 */
249 SUNXI_FUNCTION(0x3, "nand0_b")), /* RE */
250 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
251 SUNXI_FUNCTION(0x0, "gpio_in"),
252 SUNXI_FUNCTION(0x1, "gpio_out"),
253 SUNXI_FUNCTION(0x2, "nand0"), /* CE3 */
254 SUNXI_FUNCTION(0x3, "nand0_b")), /* DQS */
255 SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
256 SUNXI_FUNCTION(0x0, "gpio_in"),
257 SUNXI_FUNCTION(0x1, "gpio_out"),
258 SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
259
260 /* Hole */
261 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
262 SUNXI_FUNCTION(0x0, "gpio_in"),
263 SUNXI_FUNCTION(0x1, "gpio_out"),
264 SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
265 SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
266 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
267 SUNXI_FUNCTION(0x0, "gpio_in"),
268 SUNXI_FUNCTION(0x1, "gpio_out"),
269 SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
270 SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
271 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
272 SUNXI_FUNCTION(0x0, "gpio_in"),
273 SUNXI_FUNCTION(0x1, "gpio_out"),
274 SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
275 SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
276 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
277 SUNXI_FUNCTION(0x0, "gpio_in"),
278 SUNXI_FUNCTION(0x1, "gpio_out"),
279 SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
280 SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
281 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
282 SUNXI_FUNCTION(0x0, "gpio_in"),
283 SUNXI_FUNCTION(0x1, "gpio_out"),
284 SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
285 SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
286 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
287 SUNXI_FUNCTION(0x0, "gpio_in"),
288 SUNXI_FUNCTION(0x1, "gpio_out"),
289 SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
290 SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
291 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
292 SUNXI_FUNCTION(0x0, "gpio_in"),
293 SUNXI_FUNCTION(0x1, "gpio_out"),
294 SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
295 SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
296 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
297 SUNXI_FUNCTION(0x0, "gpio_in"),
298 SUNXI_FUNCTION(0x1, "gpio_out"),
299 SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
300 SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
301 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
302 SUNXI_FUNCTION(0x0, "gpio_in"),
303 SUNXI_FUNCTION(0x1, "gpio_out"),
304 SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
305 SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
306 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
307 SUNXI_FUNCTION(0x0, "gpio_in"),
308 SUNXI_FUNCTION(0x1, "gpio_out"),
309 SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
310 SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
311 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
312 SUNXI_FUNCTION(0x0, "gpio_in"),
313 SUNXI_FUNCTION(0x1, "gpio_out"),
314 SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
315 SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
316 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
317 SUNXI_FUNCTION(0x0, "gpio_in"),
318 SUNXI_FUNCTION(0x1, "gpio_out"),
319 SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
320 SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
321 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
322 SUNXI_FUNCTION(0x0, "gpio_in"),
323 SUNXI_FUNCTION(0x1, "gpio_out"),
324 SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
325 SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
326 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
327 SUNXI_FUNCTION(0x0, "gpio_in"),
328 SUNXI_FUNCTION(0x1, "gpio_out"),
329 SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
330 SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
331 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
332 SUNXI_FUNCTION(0x0, "gpio_in"),
333 SUNXI_FUNCTION(0x1, "gpio_out"),
334 SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
335 SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
336 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
337 SUNXI_FUNCTION(0x0, "gpio_in"),
338 SUNXI_FUNCTION(0x1, "gpio_out"),
339 SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
340 SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
341 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
342 SUNXI_FUNCTION(0x0, "gpio_in"),
343 SUNXI_FUNCTION(0x1, "gpio_out"),
344 SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
345 SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
346 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
347 SUNXI_FUNCTION(0x0, "gpio_in"),
348 SUNXI_FUNCTION(0x1, "gpio_out"),
349 SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
350 SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
351 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
352 SUNXI_FUNCTION(0x0, "gpio_in"),
353 SUNXI_FUNCTION(0x1, "gpio_out"),
354 SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
355 SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
356 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
357 SUNXI_FUNCTION(0x0, "gpio_in"),
358 SUNXI_FUNCTION(0x1, "gpio_out"),
359 SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
360 SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
361 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
362 SUNXI_FUNCTION(0x0, "gpio_in"),
363 SUNXI_FUNCTION(0x1, "gpio_out"),
364 SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
365 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
366 SUNXI_FUNCTION(0x0, "gpio_in"),
367 SUNXI_FUNCTION(0x1, "gpio_out"),
368 SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
369 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
370 SUNXI_FUNCTION(0x0, "gpio_in"),
371 SUNXI_FUNCTION(0x1, "gpio_out"),
372 SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
373 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
374 SUNXI_FUNCTION(0x0, "gpio_in"),
375 SUNXI_FUNCTION(0x1, "gpio_out"),
376 SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
377 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
378 SUNXI_FUNCTION(0x0, "gpio_in"),
379 SUNXI_FUNCTION(0x1, "gpio_out"),
380 SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
381 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
382 SUNXI_FUNCTION(0x0, "gpio_in"),
383 SUNXI_FUNCTION(0x1, "gpio_out"),
384 SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
385 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
386 SUNXI_FUNCTION(0x0, "gpio_in"),
387 SUNXI_FUNCTION(0x1, "gpio_out"),
388 SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
389 SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
390 SUNXI_FUNCTION(0x0, "gpio_in"),
391 SUNXI_FUNCTION(0x1, "gpio_out"),
392 SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
393
394 /* Hole */
395 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
396 SUNXI_FUNCTION(0x0, "gpio_in"),
397 SUNXI_FUNCTION(0x1, "gpio_out"),
398 SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
399 SUNXI_FUNCTION(0x3, "ts"), /* CLK */
400 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */
401 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
402 SUNXI_FUNCTION(0x0, "gpio_in"),
403 SUNXI_FUNCTION(0x1, "gpio_out"),
404 SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
405 SUNXI_FUNCTION(0x3, "ts"), /* ERR */
406 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */
407 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
408 SUNXI_FUNCTION(0x0, "gpio_in"),
409 SUNXI_FUNCTION(0x1, "gpio_out"),
410 SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
411 SUNXI_FUNCTION(0x3, "ts"), /* SYNC */
412 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */
413 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
414 SUNXI_FUNCTION(0x0, "gpio_in"),
415 SUNXI_FUNCTION(0x1, "gpio_out"),
416 SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
417 SUNXI_FUNCTION(0x3, "ts"), /* DVLD */
418 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */
419 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
420 SUNXI_FUNCTION(0x0, "gpio_in"),
421 SUNXI_FUNCTION(0x1, "gpio_out"),
422 SUNXI_FUNCTION(0x2, "csi"), /* D0 */
423 SUNXI_FUNCTION(0x3, "spi2"), /* CS0 */
424 SUNXI_FUNCTION(0x4, "uart5"), /* TX */
425 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */
426 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
427 SUNXI_FUNCTION(0x0, "gpio_in"),
428 SUNXI_FUNCTION(0x1, "gpio_out"),
429 SUNXI_FUNCTION(0x2, "csi"), /* D1 */
430 SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
431 SUNXI_FUNCTION(0x4, "uart5"), /* RX */
432 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */
433 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
434 SUNXI_FUNCTION(0x0, "gpio_in"),
435 SUNXI_FUNCTION(0x1, "gpio_out"),
436 SUNXI_FUNCTION(0x2, "csi"), /* D2 */
437 SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
438 SUNXI_FUNCTION(0x4, "uart5"), /* RTS */
439 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */
440 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
441 SUNXI_FUNCTION(0x0, "gpio_in"),
442 SUNXI_FUNCTION(0x1, "gpio_out"),
443 SUNXI_FUNCTION(0x2, "csi"), /* D3 */
444 SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
445 SUNXI_FUNCTION(0x4, "uart5"), /* CTS */
446 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */
447 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
448 SUNXI_FUNCTION(0x0, "gpio_in"),
449 SUNXI_FUNCTION(0x1, "gpio_out"),
450 SUNXI_FUNCTION(0x2, "csi"), /* D4 */
451 SUNXI_FUNCTION(0x3, "ts"), /* D0 */
452 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */
453 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
454 SUNXI_FUNCTION(0x0, "gpio_in"),
455 SUNXI_FUNCTION(0x1, "gpio_out"),
456 SUNXI_FUNCTION(0x2, "csi"), /* D5 */
457 SUNXI_FUNCTION(0x3, "ts"), /* D1 */
458 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */
459 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
460 SUNXI_FUNCTION(0x0, "gpio_in"),
461 SUNXI_FUNCTION(0x1, "gpio_out"),
462 SUNXI_FUNCTION(0x2, "csi"), /* D6 */
463 SUNXI_FUNCTION(0x3, "ts"), /* D2 */
464 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */
465 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
466 SUNXI_FUNCTION(0x0, "gpio_in"),
467 SUNXI_FUNCTION(0x1, "gpio_out"),
468 SUNXI_FUNCTION(0x2, "csi"), /* D7 */
469 SUNXI_FUNCTION(0x3, "ts"), /* D3 */
470 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */
471 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
472 SUNXI_FUNCTION(0x0, "gpio_in"),
473 SUNXI_FUNCTION(0x1, "gpio_out"),
474 SUNXI_FUNCTION(0x2, "csi"), /* D8 */
475 SUNXI_FUNCTION(0x3, "ts"), /* D4 */
476 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */
477 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
478 SUNXI_FUNCTION(0x0, "gpio_in"),
479 SUNXI_FUNCTION(0x1, "gpio_out"),
480 SUNXI_FUNCTION(0x2, "csi"), /* D9 */
481 SUNXI_FUNCTION(0x3, "ts"), /* D5 */
482 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */
483 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
484 SUNXI_FUNCTION(0x0, "gpio_in"),
485 SUNXI_FUNCTION(0x1, "gpio_out"),
486 SUNXI_FUNCTION(0x2, "csi"), /* D10 */
487 SUNXI_FUNCTION(0x3, "ts"), /* D6 */
488 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */
489 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
490 SUNXI_FUNCTION(0x0, "gpio_in"),
491 SUNXI_FUNCTION(0x1, "gpio_out"),
492 SUNXI_FUNCTION(0x2, "csi"), /* D11 */
493 SUNXI_FUNCTION(0x3, "ts"), /* D7 */
494 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
495 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
496 SUNXI_FUNCTION(0x0, "gpio_in"),
497 SUNXI_FUNCTION(0x1, "gpio_out"),
498 SUNXI_FUNCTION(0x2, "csi"), /* SCK */
499 SUNXI_FUNCTION(0x3, "i2c4"), /* SCK */
500 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)), /* PE_EINT16 */
501 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
502 SUNXI_FUNCTION(0x0, "gpio_in"),
503 SUNXI_FUNCTION(0x1, "gpio_out"),
504 SUNXI_FUNCTION(0x2, "csi"), /* SDA */
505 SUNXI_FUNCTION(0x3, "i2c4"), /* SDA */
506 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)), /* PE_EINT17 */
507
508 /* Hole */
509 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
510 SUNXI_FUNCTION(0x0, "gpio_in"),
511 SUNXI_FUNCTION(0x1, "gpio_out"),
512 SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
513 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
514 SUNXI_FUNCTION(0x0, "gpio_in"),
515 SUNXI_FUNCTION(0x1, "gpio_out"),
516 SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
517 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
518 SUNXI_FUNCTION(0x0, "gpio_in"),
519 SUNXI_FUNCTION(0x1, "gpio_out"),
520 SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
521 SUNXI_FUNCTION(0x4, "uart0")), /* TX */
522 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
523 SUNXI_FUNCTION(0x0, "gpio_in"),
524 SUNXI_FUNCTION(0x1, "gpio_out"),
525 SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
526 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
527 SUNXI_FUNCTION(0x0, "gpio_in"),
528 SUNXI_FUNCTION(0x1, "gpio_out"),
529 SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
530 SUNXI_FUNCTION(0x4, "uart0")), /* RX */
531 SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
532 SUNXI_FUNCTION(0x0, "gpio_in"),
533 SUNXI_FUNCTION(0x1, "gpio_out"),
534 SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
535
536 /* Hole */
537 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
538 SUNXI_FUNCTION(0x0, "gpio_in"),
539 SUNXI_FUNCTION(0x1, "gpio_out"),
540 SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
541 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */
542 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
543 SUNXI_FUNCTION(0x0, "gpio_in"),
544 SUNXI_FUNCTION(0x1, "gpio_out"),
545 SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
546 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */
547 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
548 SUNXI_FUNCTION(0x0, "gpio_in"),
549 SUNXI_FUNCTION(0x1, "gpio_out"),
550 SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
551 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */
552 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
553 SUNXI_FUNCTION(0x0, "gpio_in"),
554 SUNXI_FUNCTION(0x1, "gpio_out"),
555 SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
556 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */
557 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
558 SUNXI_FUNCTION(0x0, "gpio_in"),
559 SUNXI_FUNCTION(0x1, "gpio_out"),
560 SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
561 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */
562 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
563 SUNXI_FUNCTION(0x0, "gpio_in"),
564 SUNXI_FUNCTION(0x1, "gpio_out"),
565 SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
566 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */
567 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
568 SUNXI_FUNCTION(0x0, "gpio_in"),
569 SUNXI_FUNCTION(0x1, "gpio_out"),
570 SUNXI_FUNCTION(0x2, "uart2"), /* TX */
571 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */
572 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
573 SUNXI_FUNCTION(0x0, "gpio_in"),
574 SUNXI_FUNCTION(0x1, "gpio_out"),
575 SUNXI_FUNCTION(0x2, "uart2"), /* RX */
576 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */
577 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
578 SUNXI_FUNCTION(0x0, "gpio_in"),
579 SUNXI_FUNCTION(0x1, "gpio_out"),
580 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
581 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */
582 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
583 SUNXI_FUNCTION(0x0, "gpio_in"),
584 SUNXI_FUNCTION(0x1, "gpio_out"),
585 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
586 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */
587 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
588 SUNXI_FUNCTION(0x0, "gpio_in"),
589 SUNXI_FUNCTION(0x1, "gpio_out"),
590 SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
591 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
592 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
593 SUNXI_FUNCTION(0x0, "gpio_in"),
594 SUNXI_FUNCTION(0x1, "gpio_out"),
595 SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
596 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
597 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
598 SUNXI_FUNCTION(0x0, "gpio_in"),
599 SUNXI_FUNCTION(0x1, "gpio_out"),
600 SUNXI_FUNCTION(0x2, "uart4"), /* TX */
601 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */
602 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
603 SUNXI_FUNCTION(0x0, "gpio_in"),
604 SUNXI_FUNCTION(0x1, "gpio_out"),
605 SUNXI_FUNCTION(0x2, "uart4"), /* RX */
606 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */
607 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
608 SUNXI_FUNCTION(0x0, "gpio_in"),
609 SUNXI_FUNCTION(0x1, "gpio_out"),
610 SUNXI_FUNCTION(0x2, "uart4"), /* RTS */
611 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */
612 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
613 SUNXI_FUNCTION(0x0, "gpio_in"),
614 SUNXI_FUNCTION(0x1, "gpio_out"),
615 SUNXI_FUNCTION(0x2, "uart4"), /* CTS */
616 SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */
617
618 /* Hole */
619 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
620 SUNXI_FUNCTION(0x0, "gpio_in"),
621 SUNXI_FUNCTION(0x1, "gpio_out"),
622 SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
623 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
624 SUNXI_FUNCTION(0x0, "gpio_in"),
625 SUNXI_FUNCTION(0x1, "gpio_out"),
626 SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
627 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
628 SUNXI_FUNCTION(0x0, "gpio_in"),
629 SUNXI_FUNCTION(0x1, "gpio_out"),
630 SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
631 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
632 SUNXI_FUNCTION(0x0, "gpio_in"),
633 SUNXI_FUNCTION(0x1, "gpio_out"),
634 SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
635 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
636 SUNXI_FUNCTION(0x0, "gpio_in"),
637 SUNXI_FUNCTION(0x1, "gpio_out"),
638 SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
639 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
640 SUNXI_FUNCTION(0x0, "gpio_in"),
641 SUNXI_FUNCTION(0x1, "gpio_out"),
642 SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
643 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
644 SUNXI_FUNCTION(0x0, "gpio_in"),
645 SUNXI_FUNCTION(0x1, "gpio_out"),
646 SUNXI_FUNCTION(0x2, "pwm0")),
647
648 /* Hole */
649 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
650 SUNXI_FUNCTION(0x0, "gpio_in"),
651 SUNXI_FUNCTION(0x1, "gpio_out"),
652 SUNXI_FUNCTION(0x3, "pwm1"), /* Positive */
653 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 8)), /* PH_EINT8 */
654 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
655 SUNXI_FUNCTION(0x0, "gpio_in"),
656 SUNXI_FUNCTION(0x1, "gpio_out"),
657 SUNXI_FUNCTION(0x3, "pwm1"), /* Negative */
658 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 9)), /* PH_EINT9 */
659 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
660 SUNXI_FUNCTION(0x0, "gpio_in"),
661 SUNXI_FUNCTION(0x1, "gpio_out"),
662 SUNXI_FUNCTION(0x3, "pwm2"), /* Positive */
663 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 10)), /* PH_EINT10 */
664 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
665 SUNXI_FUNCTION(0x0, "gpio_in"),
666 SUNXI_FUNCTION(0x1, "gpio_out"),
667 SUNXI_FUNCTION(0x3, "pwm2"), /* Negative */
668 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 11)), /* PH_EINT12 */
669 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
670 SUNXI_FUNCTION(0x0, "gpio_in"),
671 SUNXI_FUNCTION(0x1, "gpio_out"),
672 SUNXI_FUNCTION(0x2, "uart0"), /* TX */
673 SUNXI_FUNCTION(0x3, "spi3"), /* CS2 */
674 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 12)), /* PH_EINT12 */
675 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
676 SUNXI_FUNCTION(0x0, "gpio_in"),
677 SUNXI_FUNCTION(0x1, "gpio_out"),
678 SUNXI_FUNCTION(0x2, "uart0"), /* RX */
679 SUNXI_FUNCTION(0x3, "spi3"), /* CS2 */
680 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 13)), /* PH_EINT13 */
681 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
682 SUNXI_FUNCTION(0x0, "gpio_in"),
683 SUNXI_FUNCTION(0x1, "gpio_out"),
684 SUNXI_FUNCTION(0x2, "spi3"), /* CLK */
685 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 14)), /* PH_EINT14 */
686 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
687 SUNXI_FUNCTION(0x0, "gpio_in"),
688 SUNXI_FUNCTION(0x1, "gpio_out"),
689 SUNXI_FUNCTION(0x2, "spi3"), /* MOSI */
690 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 15)), /* PH_EINT15 */
691 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
692 SUNXI_FUNCTION(0x0, "gpio_in"),
693 SUNXI_FUNCTION(0x1, "gpio_out"),
694 SUNXI_FUNCTION(0x2, "spi3"), /* MISO */
695 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 16)), /* PH_EINT16 */
696 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
697 SUNXI_FUNCTION(0x0, "gpio_in"),
698 SUNXI_FUNCTION(0x1, "gpio_out"),
699 SUNXI_FUNCTION(0x2, "spi3"), /* CS0 */
700 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 17)), /* PH_EINT17 */
701 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
702 SUNXI_FUNCTION(0x0, "gpio_in"),
703 SUNXI_FUNCTION(0x1, "gpio_out"),
704 SUNXI_FUNCTION(0x2, "spi3"), /* CS1 */
705 SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 18)), /* PH_EINT18 */
706 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
707 SUNXI_FUNCTION(0x0, "gpio_in"),
708 SUNXI_FUNCTION(0x1, "gpio_out"),
709 SUNXI_FUNCTION(0x2, "hdmi")), /* SCL */
710 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 20),
711 SUNXI_FUNCTION(0x0, "gpio_in"),
712 SUNXI_FUNCTION(0x1, "gpio_out"),
713 SUNXI_FUNCTION(0x2, "hdmi")), /* SDA */
714 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 21),
715 SUNXI_FUNCTION(0x0, "gpio_in"),
716 SUNXI_FUNCTION(0x1, "gpio_out"),
717 SUNXI_FUNCTION(0x2, "hdmi")), /* CEC */
718};
719
720static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
721 .pins = sun9i_a80_pins,
722 .npins = ARRAY_SIZE(sun9i_a80_pins),
723 .irq_banks = 5,
724};
725
726static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
727{
728 return sunxi_pinctrl_init(pdev,
729 &sun9i_a80_pinctrl_data);
730}
731
732static struct of_device_id sun9i_a80_pinctrl_match[] = {
733 { .compatible = "allwinner,sun9i-a80-pinctrl", },
734 {}
735};
736MODULE_DEVICE_TABLE(of, sun9i_a80_pinctrl_match);
737
738static struct platform_driver sun9i_a80_pinctrl_driver = {
739 .probe = sun9i_a80_pinctrl_probe,
740 .driver = {
741 .name = "sun9i-a80-pinctrl",
742 .of_match_table = sun9i_a80_pinctrl_match,
743 },
744};
745module_platform_driver(sun9i_a80_pinctrl_driver);
746
747MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
748MODULE_DESCRIPTION("Allwinner A80 pinctrl driver");
749MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 4245b96c7996..5a51523a3459 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -27,6 +27,7 @@
27#define PI_BASE 256 27#define PI_BASE 256
28#define PL_BASE 352 28#define PL_BASE 352
29#define PM_BASE 384 29#define PM_BASE 384
30#define PN_BASE 416
30 31
31#define SUNXI_PINCTRL_PIN(bank, pin) \ 32#define SUNXI_PINCTRL_PIN(bank, pin) \
32 PINCTRL_PIN(P ## bank ## _BASE + (pin), "P" #bank #pin) 33 PINCTRL_PIN(P ## bank ## _BASE + (pin), "P" #bank #pin)
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index f65ff49bb275..028e76504519 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -71,6 +71,15 @@ config POWER_RESET_HISI
71 help 71 help
72 Reboot support for Hisilicon boards. 72 Reboot support for Hisilicon boards.
73 73
74config POWER_RESET_IMX
75 bool "IMX6 power-off driver"
76 depends on POWER_RESET && SOC_IMX6
77 help
78 This driver support power off external PMIC by PMIC_ON_REQ on i.mx6
79 boards.If you want to use other pin to control external power,please
80 say N here or disable in dts to make sure pm_power_off never be
81 overwrote wrongly by this driver.
82
74config POWER_RESET_MSM 83config POWER_RESET_MSM
75 bool "Qualcomm MSM power-off driver" 84 bool "Qualcomm MSM power-off driver"
76 depends on ARCH_QCOM 85 depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 76ce1c59469b..1d4804d6b323 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
6obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o 6obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
7obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o 7obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
8obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o 8obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
9obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
9obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o 10obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
10obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o 11obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
11obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o 12obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 3cb36693343a..69a75d99ae92 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -19,8 +19,8 @@
19 19
20#include <asm/system_misc.h> 20#include <asm/system_misc.h>
21 21
22#include <mach/at91sam9_ddrsdr.h> 22#include <soc/at91/at91sam9_ddrsdr.h>
23#include <mach/at91sam9_sdramc.h> 23#include <soc/at91/at91sam9_sdramc.h>
24 24
25#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */ 25#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
26#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */ 26#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
diff --git a/drivers/power/reset/imx-snvs-poweroff.c b/drivers/power/reset/imx-snvs-poweroff.c
new file mode 100644
index 000000000000..ad6ce5020ea7
--- /dev/null
+++ b/drivers/power/reset/imx-snvs-poweroff.c
@@ -0,0 +1,66 @@
1/* Power off driver for i.mx6
2 * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved.
3 *
4 * based on msm-poweroff.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/platform_device.h>
25
26static void __iomem *snvs_base;
27
28static void do_imx_poweroff(void)
29{
30 u32 value = readl(snvs_base);
31
32 /* set TOP and DP_EN bit */
33 writel(value | 0x60, snvs_base);
34}
35
36static int imx_poweroff_probe(struct platform_device *pdev)
37{
38 snvs_base = of_iomap(pdev->dev.of_node, 0);
39 if (!snvs_base) {
40 dev_err(&pdev->dev, "failed to get memory\n");
41 return -ENODEV;
42 }
43
44 pm_power_off = do_imx_poweroff;
45 return 0;
46}
47
48static const struct of_device_id of_imx_poweroff_match[] = {
49 { .compatible = "fsl,sec-v4.0-poweroff", },
50 {},
51};
52MODULE_DEVICE_TABLE(of, of_imx_poweroff_match);
53
54static struct platform_driver imx_poweroff_driver = {
55 .probe = imx_poweroff_probe,
56 .driver = {
57 .name = "imx-snvs-poweroff",
58 .of_match_table = of_match_ptr(of_imx_poweroff_match),
59 },
60};
61
62static int __init imx_poweroff_init(void)
63{
64 return platform_driver_register(&imx_poweroff_driver);
65}
66device_initcall(imx_poweroff_init);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index a7c81b53d88a..85727ef6ce8e 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -17,7 +17,7 @@ if POWERCAP
17# Client driver configurations go here. 17# Client driver configurations go here.
18config INTEL_RAPL 18config INTEL_RAPL
19 tristate "Intel RAPL Support" 19 tristate "Intel RAPL Support"
20 depends on X86 20 depends on X86 && IOSF_MBI
21 default n 21 default n
22 ---help--- 22 ---help---
23 This enables support for the Intel Running Average Power Limit (RAPL) 23 This enables support for the Intel Running Average Power Limit (RAPL)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 45e05b32f9b6..c71443c4f265 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
29#include <linux/sysfs.h> 29#include <linux/sysfs.h>
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/powercap.h> 31#include <linux/powercap.h>
32#include <asm/iosf_mbi.h>
32 33
33#include <asm/processor.h> 34#include <asm/processor.h>
34#include <asm/cpu_device_id.h> 35#include <asm/cpu_device_id.h>
@@ -70,11 +71,6 @@
70#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ 71#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
71#define RAPL_PRIMITIVE_DUMMY BIT(2) 72#define RAPL_PRIMITIVE_DUMMY BIT(2)
72 73
73/* scale RAPL units to avoid floating point math inside kernel */
74#define POWER_UNIT_SCALE (1000000)
75#define ENERGY_UNIT_SCALE (1000000)
76#define TIME_UNIT_SCALE (1000000)
77
78#define TIME_WINDOW_MAX_MSEC 40000 74#define TIME_WINDOW_MAX_MSEC 40000
79#define TIME_WINDOW_MIN_MSEC 250 75#define TIME_WINDOW_MIN_MSEC 250
80 76
@@ -175,9 +171,9 @@ struct rapl_package {
175 unsigned int id; /* physical package/socket id */ 171 unsigned int id; /* physical package/socket id */
176 unsigned int nr_domains; 172 unsigned int nr_domains;
177 unsigned long domain_map; /* bit map of active domains */ 173 unsigned long domain_map; /* bit map of active domains */
178 unsigned int power_unit_divisor; 174 unsigned int power_unit;
179 unsigned int energy_unit_divisor; 175 unsigned int energy_unit;
180 unsigned int time_unit_divisor; 176 unsigned int time_unit;
181 struct rapl_domain *domains; /* array of domains, sized at runtime */ 177 struct rapl_domain *domains; /* array of domains, sized at runtime */
182 struct powercap_zone *power_zone; /* keep track of parent zone */ 178 struct powercap_zone *power_zone; /* keep track of parent zone */
183 int nr_cpus; /* active cpus on the package, topology info is lost during 179 int nr_cpus; /* active cpus on the package, topology info is lost during
@@ -188,6 +184,18 @@ struct rapl_package {
188 */ 184 */
189 struct list_head plist; 185 struct list_head plist;
190}; 186};
187
188struct rapl_defaults {
189 int (*check_unit)(struct rapl_package *rp, int cpu);
190 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
191 u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
192 bool to_raw);
193};
194static struct rapl_defaults *rapl_defaults;
195
196/* Sideband MBI registers */
197#define IOSF_CPU_POWER_BUDGET_CTL (0x2)
198
191#define PACKAGE_PLN_INT_SAVED BIT(0) 199#define PACKAGE_PLN_INT_SAVED BIT(0)
192#define MAX_PRIM_NAME (32) 200#define MAX_PRIM_NAME (32)
193 201
@@ -339,23 +347,13 @@ static int find_nr_power_limit(struct rapl_domain *rd)
339static int set_domain_enable(struct powercap_zone *power_zone, bool mode) 347static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
340{ 348{
341 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); 349 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
342 int nr_powerlimit;
343 350
344 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) 351 if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
345 return -EACCES; 352 return -EACCES;
353
346 get_online_cpus(); 354 get_online_cpus();
347 nr_powerlimit = find_nr_power_limit(rd);
348 /* here we activate/deactivate the hardware for power limiting */
349 rapl_write_data_raw(rd, PL1_ENABLE, mode); 355 rapl_write_data_raw(rd, PL1_ENABLE, mode);
350 /* always enable clamp such that p-state can go below OS requested 356 rapl_defaults->set_floor_freq(rd, mode);
351 * range. power capping priority over guranteed frequency.
352 */
353 rapl_write_data_raw(rd, PL1_CLAMP, mode);
354 /* some domains have pl2 */
355 if (nr_powerlimit > 1) {
356 rapl_write_data_raw(rd, PL2_ENABLE, mode);
357 rapl_write_data_raw(rd, PL2_CLAMP, mode);
358 }
359 put_online_cpus(); 357 put_online_cpus();
360 358
361 return 0; 359 return 0;
@@ -653,9 +651,7 @@ static void rapl_init_domains(struct rapl_package *rp)
653static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 651static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
654 int to_raw) 652 int to_raw)
655{ 653{
656 u64 divisor = 1; 654 u64 units = 1;
657 int scale = 1; /* scale to user friendly data without floating point */
658 u64 f, y; /* fraction and exp. used for time unit */
659 struct rapl_package *rp; 655 struct rapl_package *rp;
660 656
661 rp = find_package_by_id(package); 657 rp = find_package_by_id(package);
@@ -664,42 +660,24 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
664 660
665 switch (type) { 661 switch (type) {
666 case POWER_UNIT: 662 case POWER_UNIT:
667 divisor = rp->power_unit_divisor; 663 units = rp->power_unit;
668 scale = POWER_UNIT_SCALE;
669 break; 664 break;
670 case ENERGY_UNIT: 665 case ENERGY_UNIT:
671 scale = ENERGY_UNIT_SCALE; 666 units = rp->energy_unit;
672 divisor = rp->energy_unit_divisor;
673 break; 667 break;
674 case TIME_UNIT: 668 case TIME_UNIT:
675 divisor = rp->time_unit_divisor; 669 return rapl_defaults->compute_time_window(rp, value, to_raw);
676 scale = TIME_UNIT_SCALE;
677 /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
678 * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
679 */
680 if (!to_raw) {
681 f = (value & 0x60) >> 5;
682 y = value & 0x1f;
683 value = (1 << y) * (4 + f) * scale / 4;
684 return div64_u64(value, divisor);
685 } else {
686 do_div(value, scale);
687 value *= divisor;
688 y = ilog2(value);
689 f = div64_u64(4 * (value - (1 << y)), 1 << y);
690 value = (y & 0x1f) | ((f & 0x3) << 5);
691 return value;
692 }
693 break;
694 case ARBITRARY_UNIT: 670 case ARBITRARY_UNIT:
695 default: 671 default:
696 return value; 672 return value;
697 }; 673 };
698 674
699 if (to_raw) 675 if (to_raw)
700 return div64_u64(value * divisor, scale); 676 return div64_u64(value, units);
701 else 677
702 return div64_u64(value * scale, divisor); 678 value *= units;
679
680 return value;
703} 681}
704 682
705/* in the order of enum rapl_primitives */ 683/* in the order of enum rapl_primitives */
@@ -833,12 +811,18 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
833 return 0; 811 return 0;
834} 812}
835 813
836static const struct x86_cpu_id energy_unit_quirk_ids[] = { 814/*
837 { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */ 815 * Raw RAPL data stored in MSRs are in certain scales. We need to
838 {} 816 * convert them into standard units based on the units reported in
839}; 817 * the RAPL unit MSRs. This is specific to CPUs as the method to
840 818 * calculate units differ on different CPUs.
841static int rapl_check_unit(struct rapl_package *rp, int cpu) 819 * We convert the units to below format based on CPUs.
820 * i.e.
821 * energy unit: microJoules : Represented in microJoules by default
822 * power unit : microWatts : Represented in milliWatts by default
823 * time unit : microseconds: Represented in seconds by default
824 */
825static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
842{ 826{
843 u64 msr_val; 827 u64 msr_val;
844 u32 value; 828 u32 value;
@@ -849,36 +833,47 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
849 return -ENODEV; 833 return -ENODEV;
850 } 834 }
851 835
852 /* Raw RAPL data stored in MSRs are in certain scales. We need to
853 * convert them into standard units based on the divisors reported in
854 * the RAPL unit MSRs.
855 * i.e.
856 * energy unit: 1/enery_unit_divisor Joules
857 * power unit: 1/power_unit_divisor Watts
858 * time unit: 1/time_unit_divisor Seconds
859 */
860 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 836 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
861 /* some CPUs have different way to calculate energy unit */ 837 rp->energy_unit = 1000000 / (1 << value);
862 if (x86_match_cpu(energy_unit_quirk_ids))
863 rp->energy_unit_divisor = 1000000 / (1 << value);
864 else
865 rp->energy_unit_divisor = 1 << value;
866 838
867 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 839 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
868 rp->power_unit_divisor = 1 << value; 840 rp->power_unit = 1000000 / (1 << value);
869 841
870 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 842 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
871 rp->time_unit_divisor = 1 << value; 843 rp->time_unit = 1000000 / (1 << value);
872 844
873 pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n", 845 pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n",
874 rp->id, 846 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
875 rp->energy_unit_divisor,
876 rp->time_unit_divisor,
877 rp->power_unit_divisor);
878 847
879 return 0; 848 return 0;
880} 849}
881 850
851static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
852{
853 u64 msr_val;
854 u32 value;
855
856 if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
857 pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
858 MSR_RAPL_POWER_UNIT, cpu);
859 return -ENODEV;
860 }
861 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
862 rp->energy_unit = 1 << value;
863
864 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
865 rp->power_unit = (1 << value) * 1000;
866
867 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
868 rp->time_unit = 1000000 / (1 << value);
869
870 pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n",
871 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
872
873 return 0;
874}
875
876
882/* REVISIT: 877/* REVISIT:
883 * When package power limit is set artificially low by RAPL, LVT 878 * When package power limit is set artificially low by RAPL, LVT
884 * thermal interrupt for package power limit should be ignored 879 * thermal interrupt for package power limit should be ignored
@@ -946,16 +941,107 @@ static void package_power_limit_irq_restore(int package_id)
946 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 941 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
947} 942}
948 943
944static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
945{
946 int nr_powerlimit = find_nr_power_limit(rd);
947
948 /* always enable clamp such that p-state can go below OS requested
949 * range. power capping priority over guranteed frequency.
950 */
951 rapl_write_data_raw(rd, PL1_CLAMP, mode);
952
953 /* some domains have pl2 */
954 if (nr_powerlimit > 1) {
955 rapl_write_data_raw(rd, PL2_ENABLE, mode);
956 rapl_write_data_raw(rd, PL2_CLAMP, mode);
957 }
958}
959
960static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
961{
962 static u32 power_ctrl_orig_val;
963 u32 mdata;
964
965 if (!power_ctrl_orig_val)
966 iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_PMC_READ,
967 IOSF_CPU_POWER_BUDGET_CTL, &power_ctrl_orig_val);
968 mdata = power_ctrl_orig_val;
969 if (enable) {
970 mdata &= ~(0x7f << 8);
971 mdata |= 1 << 8;
972 }
973 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_PMC_WRITE,
974 IOSF_CPU_POWER_BUDGET_CTL, mdata);
975}
976
977static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
978 bool to_raw)
979{
980 u64 f, y; /* fraction and exp. used for time unit */
981
982 /*
983 * Special processing based on 2^Y*(1+F/4), refer
984 * to Intel Software Developer's manual Vol.3B: CH 14.9.3.
985 */
986 if (!to_raw) {
987 f = (value & 0x60) >> 5;
988 y = value & 0x1f;
989 value = (1 << y) * (4 + f) * rp->time_unit / 4;
990 } else {
991 do_div(value, rp->time_unit);
992 y = ilog2(value);
993 f = div64_u64(4 * (value - (1 << y)), 1 << y);
994 value = (y & 0x1f) | ((f & 0x3) << 5);
995 }
996 return value;
997}
998
999static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value,
1000 bool to_raw)
1001{
1002 /*
1003 * Atom time unit encoding is straight forward val * time_unit,
1004 * where time_unit is default to 1 sec. Never 0.
1005 */
1006 if (!to_raw)
1007 return (value) ? value *= rp->time_unit : rp->time_unit;
1008 else
1009 value = div64_u64(value, rp->time_unit);
1010
1011 return value;
1012}
1013
1014static const struct rapl_defaults rapl_defaults_core = {
1015 .check_unit = rapl_check_unit_core,
1016 .set_floor_freq = set_floor_freq_default,
1017 .compute_time_window = rapl_compute_time_window_core,
1018};
1019
1020static const struct rapl_defaults rapl_defaults_atom = {
1021 .check_unit = rapl_check_unit_atom,
1022 .set_floor_freq = set_floor_freq_atom,
1023 .compute_time_window = rapl_compute_time_window_atom,
1024};
1025
1026#define RAPL_CPU(_model, _ops) { \
1027 .vendor = X86_VENDOR_INTEL, \
1028 .family = 6, \
1029 .model = _model, \
1030 .driver_data = (kernel_ulong_t)&_ops, \
1031 }
1032
949static const struct x86_cpu_id rapl_ids[] = { 1033static const struct x86_cpu_id rapl_ids[] = {
950 { X86_VENDOR_INTEL, 6, 0x2a},/* Sandy Bridge */ 1034 RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
951 { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */ 1035 RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
952 { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */ 1036 RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */
953 { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ 1037 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
954 { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ 1038 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
955 { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ 1039 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
956 { X86_VENDOR_INTEL, 6, 0x3f},/* Haswell */ 1040 RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */
957 { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ 1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
958 /* TODO: Add more CPU IDs after testing */ 1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
1044 RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
959 {} 1045 {}
960}; 1046};
961MODULE_DEVICE_TABLE(x86cpu, rapl_ids); 1047MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1241,7 +1327,7 @@ static int rapl_detect_topology(void)
1241 1327
1242 /* check if the package contains valid domains */ 1328 /* check if the package contains valid domains */
1243 if (rapl_detect_domains(new_package, i) || 1329 if (rapl_detect_domains(new_package, i) ||
1244 rapl_check_unit(new_package, i)) { 1330 rapl_defaults->check_unit(new_package, i)) {
1245 kfree(new_package->domains); 1331 kfree(new_package->domains);
1246 kfree(new_package); 1332 kfree(new_package);
1247 /* free up the packages already initialized */ 1333 /* free up the packages already initialized */
@@ -1296,7 +1382,7 @@ static int rapl_add_package(int cpu)
1296 rp->nr_cpus = 1; 1382 rp->nr_cpus = 1;
1297 /* check if the package contains valid domains */ 1383 /* check if the package contains valid domains */
1298 if (rapl_detect_domains(rp, cpu) || 1384 if (rapl_detect_domains(rp, cpu) ||
1299 rapl_check_unit(rp, cpu)) { 1385 rapl_defaults->check_unit(rp, cpu)) {
1300 ret = -ENODEV; 1386 ret = -ENODEV;
1301 goto err_free_package; 1387 goto err_free_package;
1302 } 1388 }
@@ -1358,14 +1444,18 @@ static struct notifier_block rapl_cpu_notifier = {
1358static int __init rapl_init(void) 1444static int __init rapl_init(void)
1359{ 1445{
1360 int ret = 0; 1446 int ret = 0;
1447 const struct x86_cpu_id *id;
1361 1448
1362 if (!x86_match_cpu(rapl_ids)) { 1449 id = x86_match_cpu(rapl_ids);
1450 if (!id) {
1363 pr_err("driver does not support CPU family %d model %d\n", 1451 pr_err("driver does not support CPU family %d model %d\n",
1364 boot_cpu_data.x86, boot_cpu_data.x86_model); 1452 boot_cpu_data.x86, boot_cpu_data.x86_model);
1365 1453
1366 return -ENODEV; 1454 return -ENODEV;
1367 } 1455 }
1368 1456
1457 rapl_defaults = (struct rapl_defaults *)id->driver_data;
1458
1369 cpu_notifier_register_begin(); 1459 cpu_notifier_register_begin();
1370 1460
1371 /* prevent CPU hotplug during detection */ 1461 /* prevent CPU hotplug during detection */
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 6d77dcd7dcf6..3fe47bd66153 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -330,7 +330,8 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
330 for_each_child_of_node(nproot, np) { 330 for_each_child_of_node(nproot, np) {
331 if (!of_node_cmp(np->name, info->desc.name)) { 331 if (!of_node_cmp(np->name, info->desc.name)) {
332 config->init_data = 332 config->init_data =
333 of_get_regulator_init_data(&pdev->dev, np); 333 of_get_regulator_init_data(&pdev->dev, np,
334 &info->desc);
334 config->of_node = np; 335 config->of_node = np;
335 break; 336 break;
336 } 337 }
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 55d7b7b0f2e0..c3a60b57a865 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -521,6 +521,14 @@ config REGULATOR_RN5T618
521 help 521 help
522 Say y here to support the regulators found on Ricoh RN5T618 PMIC. 522 Say y here to support the regulators found on Ricoh RN5T618 PMIC.
523 523
524config REGULATOR_RT5033
525 tristate "Richtek RT5033 Regulators"
526 depends on MFD_RT5033
527 help
528 This adds support for voltage and current regulators in Richtek
529 RT5033 PMIC. The device supports multiple regulators like
530 current source, LDO and Buck.
531
524config REGULATOR_S2MPA01 532config REGULATOR_S2MPA01
525 tristate "Samsung S2MPA01 voltage regulator" 533 tristate "Samsung S2MPA01 voltage regulator"
526 depends on MFD_SEC_CORE 534 depends on MFD_SEC_CORE
@@ -529,13 +537,13 @@ config REGULATOR_S2MPA01
529 via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs. 537 via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
530 538
531config REGULATOR_S2MPS11 539config REGULATOR_S2MPS11
532 tristate "Samsung S2MPS11/S2MPS14/S2MPU02 voltage regulator" 540 tristate "Samsung S2MPS11/S2MPS13/S2MPS14/S2MPU02 voltage regulator"
533 depends on MFD_SEC_CORE 541 depends on MFD_SEC_CORE
534 help 542 help
535 This driver supports a Samsung S2MPS11/S2MPS14/S2MPU02 voltage output 543 This driver supports a Samsung S2MPS11/S2MPS13/S2MPS14/S2MPU02 voltage
536 regulator via I2C bus. The chip is comprised of high efficient Buck 544 output regulator via I2C bus. The chip is comprised of high efficient
537 converters including Dual-Phase Buck converter, Buck-Boost converter, 545 Buck converters including Dual-Phase Buck converter, Buck-Boost
538 various LDOs. 546 converter, various LDOs.
539 547
540config REGULATOR_S5M8767 548config REGULATOR_S5M8767
541 tristate "Samsung S5M8767A voltage regulator" 549 tristate "Samsung S5M8767A voltage regulator"
@@ -547,7 +555,7 @@ config REGULATOR_S5M8767
547 555
548config REGULATOR_SKY81452 556config REGULATOR_SKY81452
549 tristate "Skyworks Solutions SKY81452 voltage regulator" 557 tristate "Skyworks Solutions SKY81452 voltage regulator"
550 depends on SKY81452 558 depends on MFD_SKY81452
551 help 559 help
552 This driver supports Skyworks SKY81452 voltage output regulator 560 This driver supports Skyworks SKY81452 voltage output regulator
553 via I2C bus. SKY81452 has one voltage linear regulator can be 561 via I2C bus. SKY81452 has one voltage linear regulator can be
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1029ed39c512..1f28ebfc6f3a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
69obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o 69obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
70obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o 70obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o
71obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o 71obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
72obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
72obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o 73obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
73obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o 74obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
74obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o 75obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index afd06f92dfdf..9eec453b745d 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -61,6 +61,8 @@
61#define ACT8846_REG12_VSET 0xa0 61#define ACT8846_REG12_VSET 0xa0
62#define ACT8846_REG12_CTRL 0xa1 62#define ACT8846_REG12_CTRL 0xa1
63#define ACT8846_REG13_CTRL 0xb1 63#define ACT8846_REG13_CTRL 0xb1
64#define ACT8846_GLB_OFF_CTRL 0xc3
65#define ACT8846_OFF_SYSMASK 0x18
64 66
65/* 67/*
66 * ACT8865 Global Register Map. 68 * ACT8865 Global Register Map.
@@ -84,6 +86,7 @@
84#define ACT8865_LDO3_CTRL 0x61 86#define ACT8865_LDO3_CTRL 0x61
85#define ACT8865_LDO4_VSET 0x64 87#define ACT8865_LDO4_VSET 0x64
86#define ACT8865_LDO4_CTRL 0x65 88#define ACT8865_LDO4_CTRL 0x65
89#define ACT8865_MSTROFF 0x20
87 90
88/* 91/*
89 * Field Definitions. 92 * Field Definitions.
@@ -98,6 +101,8 @@
98 101
99struct act8865 { 102struct act8865 {
100 struct regmap *regmap; 103 struct regmap *regmap;
104 int off_reg;
105 int off_mask;
101}; 106};
102 107
103static const struct regmap_config act8865_regmap_config = { 108static const struct regmap_config act8865_regmap_config = {
@@ -275,6 +280,16 @@ static struct regulator_init_data
275 return NULL; 280 return NULL;
276} 281}
277 282
283static struct i2c_client *act8865_i2c_client;
284static void act8865_power_off(void)
285{
286 struct act8865 *act8865;
287
288 act8865 = i2c_get_clientdata(act8865_i2c_client);
289 regmap_write(act8865->regmap, act8865->off_reg, act8865->off_mask);
290 while (1);
291}
292
278static int act8865_pmic_probe(struct i2c_client *client, 293static int act8865_pmic_probe(struct i2c_client *client,
279 const struct i2c_device_id *i2c_id) 294 const struct i2c_device_id *i2c_id)
280{ 295{
@@ -285,6 +300,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
285 int i, ret, num_regulators; 300 int i, ret, num_regulators;
286 struct act8865 *act8865; 301 struct act8865 *act8865;
287 unsigned long type; 302 unsigned long type;
303 int off_reg, off_mask;
288 304
289 pdata = dev_get_platdata(dev); 305 pdata = dev_get_platdata(dev);
290 306
@@ -304,10 +320,14 @@ static int act8865_pmic_probe(struct i2c_client *client,
304 case ACT8846: 320 case ACT8846:
305 regulators = act8846_regulators; 321 regulators = act8846_regulators;
306 num_regulators = ARRAY_SIZE(act8846_regulators); 322 num_regulators = ARRAY_SIZE(act8846_regulators);
323 off_reg = ACT8846_GLB_OFF_CTRL;
324 off_mask = ACT8846_OFF_SYSMASK;
307 break; 325 break;
308 case ACT8865: 326 case ACT8865:
309 regulators = act8865_regulators; 327 regulators = act8865_regulators;
310 num_regulators = ARRAY_SIZE(act8865_regulators); 328 num_regulators = ARRAY_SIZE(act8865_regulators);
329 off_reg = ACT8865_SYS_CTRL;
330 off_mask = ACT8865_MSTROFF;
311 break; 331 break;
312 default: 332 default:
313 dev_err(dev, "invalid device id %lu\n", type); 333 dev_err(dev, "invalid device id %lu\n", type);
@@ -345,6 +365,17 @@ static int act8865_pmic_probe(struct i2c_client *client,
345 return ret; 365 return ret;
346 } 366 }
347 367
368 if (of_device_is_system_power_controller(dev->of_node)) {
369 if (!pm_power_off) {
370 act8865_i2c_client = client;
371 act8865->off_reg = off_reg;
372 act8865->off_mask = off_mask;
373 pm_power_off = act8865_power_off;
374 } else {
375 dev_err(dev, "Failed to set poweroff capability, already defined\n");
376 }
377 }
378
348 /* Finally register devices */ 379 /* Finally register devices */
349 for (i = 0; i < num_regulators; i++) { 380 for (i = 0; i < num_regulators; i++) {
350 const struct regulator_desc *desc = &regulators[i]; 381 const struct regulator_desc *desc = &regulators[i];
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 4f730af70e7c..3586571814b2 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -189,17 +189,18 @@ static int anatop_regulator_probe(struct platform_device *pdev)
189 int ret = 0; 189 int ret = 0;
190 u32 val; 190 u32 val;
191 191
192 initdata = of_get_regulator_init_data(dev, np);
193 sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL); 192 sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
194 if (!sreg) 193 if (!sreg)
195 return -ENOMEM; 194 return -ENOMEM;
196 sreg->initdata = initdata;
197 sreg->name = of_get_property(np, "regulator-name", NULL); 195 sreg->name = of_get_property(np, "regulator-name", NULL);
198 rdesc = &sreg->rdesc; 196 rdesc = &sreg->rdesc;
199 rdesc->name = sreg->name; 197 rdesc->name = sreg->name;
200 rdesc->type = REGULATOR_VOLTAGE; 198 rdesc->type = REGULATOR_VOLTAGE;
201 rdesc->owner = THIS_MODULE; 199 rdesc->owner = THIS_MODULE;
202 200
201 initdata = of_get_regulator_init_data(dev, np, rdesc);
202 sreg->initdata = initdata;
203
203 anatop_np = of_get_parent(np); 204 anatop_np = of_get_parent(np);
204 if (!anatop_np) 205 if (!anatop_np)
205 return -ENODEV; 206 return -ENODEV;
@@ -283,6 +284,19 @@ static int anatop_regulator_probe(struct platform_device *pdev)
283 sreg->sel = 0; 284 sreg->sel = 0;
284 sreg->bypass = true; 285 sreg->bypass = true;
285 } 286 }
287
288 /*
289 * In case vddpu was disabled by the bootloader, we need to set
290 * a sane default until imx6-cpufreq was probed and changes the
291 * voltage to the correct value. In this case we set 1.25V.
292 */
293 if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
294 sreg->sel = 22;
295
296 if (!sreg->sel) {
297 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
298 return -EINVAL;
299 }
286 } else { 300 } else {
287 rdesc->ops = &anatop_rops; 301 rdesc->ops = &anatop_rops;
288 } 302 }
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 4c9db589f6c1..d071b2119a60 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -179,7 +179,8 @@ static const struct regulator_init_data arizona_ldo1_default = {
179}; 179};
180 180
181static int arizona_ldo1_of_get_pdata(struct arizona *arizona, 181static int arizona_ldo1_of_get_pdata(struct arizona *arizona,
182 struct regulator_config *config) 182 struct regulator_config *config,
183 const struct regulator_desc *desc)
183{ 184{
184 struct arizona_pdata *pdata = &arizona->pdata; 185 struct arizona_pdata *pdata = &arizona->pdata;
185 struct arizona_ldo1 *ldo1 = config->driver_data; 186 struct arizona_ldo1 *ldo1 = config->driver_data;
@@ -194,7 +195,8 @@ static int arizona_ldo1_of_get_pdata(struct arizona *arizona,
194 if (init_node) { 195 if (init_node) {
195 config->of_node = init_node; 196 config->of_node = init_node;
196 197
197 init_data = of_get_regulator_init_data(arizona->dev, init_node); 198 init_data = of_get_regulator_init_data(arizona->dev, init_node,
199 desc);
198 200
199 if (init_data) { 201 if (init_data) {
200 init_data->consumer_supplies = &ldo1->supply; 202 init_data->consumer_supplies = &ldo1->supply;
@@ -257,9 +259,11 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
257 259
258 if (IS_ENABLED(CONFIG_OF)) { 260 if (IS_ENABLED(CONFIG_OF)) {
259 if (!dev_get_platdata(arizona->dev)) { 261 if (!dev_get_platdata(arizona->dev)) {
260 ret = arizona_ldo1_of_get_pdata(arizona, &config); 262 ret = arizona_ldo1_of_get_pdata(arizona, &config, desc);
261 if (ret < 0) 263 if (ret < 0)
262 return ret; 264 return ret;
265
266 config.ena_gpio_initialized = true;
263 } 267 }
264 } 268 }
265 269
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index ce9aca5f8ee7..c313ef4c3a2f 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -198,7 +198,8 @@ static const struct regulator_init_data arizona_micsupp_ext_default = {
198}; 198};
199 199
200static int arizona_micsupp_of_get_pdata(struct arizona *arizona, 200static int arizona_micsupp_of_get_pdata(struct arizona *arizona,
201 struct regulator_config *config) 201 struct regulator_config *config,
202 const struct regulator_desc *desc)
202{ 203{
203 struct arizona_pdata *pdata = &arizona->pdata; 204 struct arizona_pdata *pdata = &arizona->pdata;
204 struct arizona_micsupp *micsupp = config->driver_data; 205 struct arizona_micsupp *micsupp = config->driver_data;
@@ -210,7 +211,7 @@ static int arizona_micsupp_of_get_pdata(struct arizona *arizona,
210 if (np) { 211 if (np) {
211 config->of_node = np; 212 config->of_node = np;
212 213
213 init_data = of_get_regulator_init_data(arizona->dev, np); 214 init_data = of_get_regulator_init_data(arizona->dev, np, desc);
214 215
215 if (init_data) { 216 if (init_data) {
216 init_data->consumer_supplies = &micsupp->supply; 217 init_data->consumer_supplies = &micsupp->supply;
@@ -264,7 +265,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
264 265
265 if (IS_ENABLED(CONFIG_OF)) { 266 if (IS_ENABLED(CONFIG_OF)) {
266 if (!dev_get_platdata(arizona->dev)) { 267 if (!dev_get_platdata(arizona->dev)) {
267 ret = arizona_micsupp_of_get_pdata(arizona, &config); 268 ret = arizona_micsupp_of_get_pdata(arizona, &config,
269 desc);
268 if (ret < 0) 270 if (ret < 0)
269 return ret; 271 return ret;
270 } 272 }
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index cd87c0c37034..e225711bb8bc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -828,7 +828,7 @@ static void print_constraints(struct regulator_dev *rdev)
828 if (!count) 828 if (!count)
829 sprintf(buf, "no parameters"); 829 sprintf(buf, "no parameters");
830 830
831 rdev_info(rdev, "%s\n", buf); 831 rdev_dbg(rdev, "%s\n", buf);
832 832
833 if ((constraints->min_uV != constraints->max_uV) && 833 if ((constraints->min_uV != constraints->max_uV) &&
834 !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) 834 !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
@@ -1713,6 +1713,8 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
1713 gpiod_put(pin->gpiod); 1713 gpiod_put(pin->gpiod);
1714 list_del(&pin->list); 1714 list_del(&pin->list);
1715 kfree(pin); 1715 kfree(pin);
1716 rdev->ena_pin = NULL;
1717 return;
1716 } else { 1718 } else {
1717 pin->request_count--; 1719 pin->request_count--;
1718 } 1720 }
@@ -1976,9 +1978,18 @@ static int _regulator_disable(struct regulator_dev *rdev)
1976 1978
1977 /* we are last user */ 1979 /* we are last user */
1978 if (_regulator_can_change_status(rdev)) { 1980 if (_regulator_can_change_status(rdev)) {
1981 ret = _notifier_call_chain(rdev,
1982 REGULATOR_EVENT_PRE_DISABLE,
1983 NULL);
1984 if (ret & NOTIFY_STOP_MASK)
1985 return -EINVAL;
1986
1979 ret = _regulator_do_disable(rdev); 1987 ret = _regulator_do_disable(rdev);
1980 if (ret < 0) { 1988 if (ret < 0) {
1981 rdev_err(rdev, "failed to disable\n"); 1989 rdev_err(rdev, "failed to disable\n");
1990 _notifier_call_chain(rdev,
1991 REGULATOR_EVENT_ABORT_DISABLE,
1992 NULL);
1982 return ret; 1993 return ret;
1983 } 1994 }
1984 _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, 1995 _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
@@ -2035,9 +2046,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
2035{ 2046{
2036 int ret = 0; 2047 int ret = 0;
2037 2048
2049 ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
2050 REGULATOR_EVENT_PRE_DISABLE, NULL);
2051 if (ret & NOTIFY_STOP_MASK)
2052 return -EINVAL;
2053
2038 ret = _regulator_do_disable(rdev); 2054 ret = _regulator_do_disable(rdev);
2039 if (ret < 0) { 2055 if (ret < 0) {
2040 rdev_err(rdev, "failed to force disable\n"); 2056 rdev_err(rdev, "failed to force disable\n");
2057 _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
2058 REGULATOR_EVENT_ABORT_DISABLE, NULL);
2041 return ret; 2059 return ret;
2042 } 2060 }
2043 2061
@@ -3650,7 +3668,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3650 3668
3651 dev_set_drvdata(&rdev->dev, rdev); 3669 dev_set_drvdata(&rdev->dev, rdev);
3652 3670
3653 if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { 3671 if ((config->ena_gpio || config->ena_gpio_initialized) &&
3672 gpio_is_valid(config->ena_gpio)) {
3654 ret = regulator_ena_gpio_request(rdev, config); 3673 ret = regulator_ena_gpio_request(rdev, config);
3655 if (ret != 0) { 3674 if (ret != 0) {
3656 rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", 3675 rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 00033625a09c..3945f1006d23 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -436,7 +436,8 @@ static int da9052_regulator_probe(struct platform_device *pdev)
436 if (!of_node_cmp(np->name, 436 if (!of_node_cmp(np->name,
437 regulator->info->reg_desc.name)) { 437 regulator->info->reg_desc.name)) {
438 config.init_data = of_get_regulator_init_data( 438 config.init_data = of_get_regulator_init_data(
439 &pdev->dev, np); 439 &pdev->dev, np,
440 &regulator->info->reg_desc);
440 config.of_node = np; 441 config.of_node = np;
441 break; 442 break;
442 } 443 }
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 7c9461d13313..37dd42759ca9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -867,17 +867,14 @@ static int da9063_regulator_probe(struct platform_device *pdev)
867 return irq; 867 return irq;
868 } 868 }
869 869
870 regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq); 870 ret = request_threaded_irq(irq,
871 if (regulators->irq_ldo_lim >= 0) { 871 NULL, da9063_ldo_lim_event,
872 ret = request_threaded_irq(regulators->irq_ldo_lim, 872 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
873 NULL, da9063_ldo_lim_event, 873 "LDO_LIM", regulators);
874 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 874 if (ret) {
875 "LDO_LIM", regulators); 875 dev_err(&pdev->dev,
876 if (ret) { 876 "Failed to request LDO_LIM IRQ.\n");
877 dev_err(&pdev->dev, 877 regulators->irq_ldo_lim = -ENXIO;
878 "Failed to request LDO_LIM IRQ.\n");
879 regulators->irq_ldo_lim = -ENXIO;
880 }
881 } 878 }
882 879
883 return 0; 880 return 0;
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 7a320dd11c46..bc6100103f7f 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -147,7 +147,7 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
147 147
148 config.dev = &i2c->dev; 148 config.dev = &i2c->dev;
149 config.init_data = pdata ? &pdata->da9210_constraints : 149 config.init_data = pdata ? &pdata->da9210_constraints :
150 of_get_regulator_init_data(dev, dev->of_node); 150 of_get_regulator_init_data(dev, dev->of_node, &da9210_reg);
151 config.driver_data = chip; 151 config.driver_data = chip;
152 config.regmap = chip->regmap; 152 config.regmap = chip->regmap;
153 config.of_node = dev->of_node; 153 config.of_node = dev->of_node;
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 2436db9e2ca3..7aef9e4c6fbf 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -33,7 +33,7 @@ static struct regulator_init_data dummy_initdata = {
33 33
34static struct regulator_ops dummy_ops; 34static struct regulator_ops dummy_ops;
35 35
36static struct regulator_desc dummy_desc = { 36static const struct regulator_desc dummy_desc = {
37 .name = "regulator-dummy", 37 .name = "regulator-dummy",
38 .id = -1, 38 .id = -1,
39 .type = REGULATOR_VOLTAGE, 39 .type = REGULATOR_VOLTAGE,
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index f8e4257aef92..6c43ab2d5121 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -302,7 +302,8 @@ static struct regmap_config fan53555_regmap_config = {
302}; 302};
303 303
304static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev, 304static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
305 struct device_node *np) 305 struct device_node *np,
306 const struct regulator_desc *desc)
306{ 307{
307 struct fan53555_platform_data *pdata; 308 struct fan53555_platform_data *pdata;
308 int ret; 309 int ret;
@@ -312,7 +313,7 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
312 if (!pdata) 313 if (!pdata)
313 return NULL; 314 return NULL;
314 315
315 pdata->regulator = of_get_regulator_init_data(dev, np); 316 pdata->regulator = of_get_regulator_init_data(dev, np, desc);
316 317
317 ret = of_property_read_u32(np, "fcs,suspend-voltage-selector", 318 ret = of_property_read_u32(np, "fcs,suspend-voltage-selector",
318 &tmp); 319 &tmp);
@@ -347,20 +348,20 @@ static int fan53555_regulator_probe(struct i2c_client *client,
347 unsigned int val; 348 unsigned int val;
348 int ret; 349 int ret;
349 350
351 di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
352 GFP_KERNEL);
353 if (!di)
354 return -ENOMEM;
355
350 pdata = dev_get_platdata(&client->dev); 356 pdata = dev_get_platdata(&client->dev);
351 if (!pdata) 357 if (!pdata)
352 pdata = fan53555_parse_dt(&client->dev, np); 358 pdata = fan53555_parse_dt(&client->dev, np, &di->desc);
353 359
354 if (!pdata || !pdata->regulator) { 360 if (!pdata || !pdata->regulator) {
355 dev_err(&client->dev, "Platform data not found!\n"); 361 dev_err(&client->dev, "Platform data not found!\n");
356 return -ENODEV; 362 return -ENODEV;
357 } 363 }
358 364
359 di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
360 GFP_KERNEL);
361 if (!di)
362 return -ENOMEM;
363
364 di->regulator = pdata->regulator; 365 di->regulator = pdata->regulator;
365 if (client->dev.of_node) { 366 if (client->dev.of_node) {
366 const struct of_device_id *match; 367 const struct of_device_id *match;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 354105eff1f8..d21da27c0eb6 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -40,13 +40,15 @@ struct fixed_voltage_data {
40/** 40/**
41 * of_get_fixed_voltage_config - extract fixed_voltage_config structure info 41 * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
42 * @dev: device requesting for fixed_voltage_config 42 * @dev: device requesting for fixed_voltage_config
43 * @desc: regulator description
43 * 44 *
44 * Populates fixed_voltage_config structure by extracting data from device 45 * Populates fixed_voltage_config structure by extracting data from device
45 * tree node, returns a pointer to the populated structure of NULL if memory 46 * tree node, returns a pointer to the populated structure of NULL if memory
46 * alloc fails. 47 * alloc fails.
47 */ 48 */
48static struct fixed_voltage_config * 49static struct fixed_voltage_config *
49of_get_fixed_voltage_config(struct device *dev) 50of_get_fixed_voltage_config(struct device *dev,
51 const struct regulator_desc *desc)
50{ 52{
51 struct fixed_voltage_config *config; 53 struct fixed_voltage_config *config;
52 struct device_node *np = dev->of_node; 54 struct device_node *np = dev->of_node;
@@ -57,7 +59,7 @@ of_get_fixed_voltage_config(struct device *dev)
57 if (!config) 59 if (!config)
58 return ERR_PTR(-ENOMEM); 60 return ERR_PTR(-ENOMEM);
59 61
60 config->init_data = of_get_regulator_init_data(dev, dev->of_node); 62 config->init_data = of_get_regulator_init_data(dev, dev->of_node, desc);
61 if (!config->init_data) 63 if (!config->init_data)
62 return ERR_PTR(-EINVAL); 64 return ERR_PTR(-EINVAL);
63 65
@@ -112,8 +114,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
112 struct regulator_config cfg = { }; 114 struct regulator_config cfg = { };
113 int ret; 115 int ret;
114 116
117 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
118 GFP_KERNEL);
119 if (!drvdata)
120 return -ENOMEM;
121
115 if (pdev->dev.of_node) { 122 if (pdev->dev.of_node) {
116 config = of_get_fixed_voltage_config(&pdev->dev); 123 config = of_get_fixed_voltage_config(&pdev->dev,
124 &drvdata->desc);
117 if (IS_ERR(config)) 125 if (IS_ERR(config))
118 return PTR_ERR(config); 126 return PTR_ERR(config);
119 } else { 127 } else {
@@ -123,11 +131,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
123 if (!config) 131 if (!config)
124 return -ENOMEM; 132 return -ENOMEM;
125 133
126 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
127 GFP_KERNEL);
128 if (!drvdata)
129 return -ENOMEM;
130
131 drvdata->desc.name = devm_kstrdup(&pdev->dev, 134 drvdata->desc.name = devm_kstrdup(&pdev->dev,
132 config->supply_name, 135 config->supply_name,
133 GFP_KERNEL); 136 GFP_KERNEL);
@@ -157,8 +160,11 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
157 160
158 drvdata->desc.fixed_uV = config->microvolts; 161 drvdata->desc.fixed_uV = config->microvolts;
159 162
160 if (config->gpio >= 0) 163 if (gpio_is_valid(config->gpio)) {
161 cfg.ena_gpio = config->gpio; 164 cfg.ena_gpio = config->gpio;
165 if (pdev->dev.of_node)
166 cfg.ena_gpio_initialized = true;
167 }
162 cfg.ena_gpio_invert = !config->enable_high; 168 cfg.ena_gpio_invert = !config->enable_high;
163 if (config->enabled_at_boot) { 169 if (config->enabled_at_boot) {
164 if (config->enable_high) 170 if (config->enable_high)
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 989b23b377c0..c888a9a9482c 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -133,7 +133,8 @@ static struct regulator_ops gpio_regulator_voltage_ops = {
133}; 133};
134 134
135static struct gpio_regulator_config * 135static struct gpio_regulator_config *
136of_get_gpio_regulator_config(struct device *dev, struct device_node *np) 136of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
137 const struct regulator_desc *desc)
137{ 138{
138 struct gpio_regulator_config *config; 139 struct gpio_regulator_config *config;
139 const char *regtype; 140 const char *regtype;
@@ -146,7 +147,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
146 if (!config) 147 if (!config)
147 return ERR_PTR(-ENOMEM); 148 return ERR_PTR(-ENOMEM);
148 149
149 config->init_data = of_get_regulator_init_data(dev, np); 150 config->init_data = of_get_regulator_init_data(dev, np, desc);
150 if (!config->init_data) 151 if (!config->init_data)
151 return ERR_PTR(-EINVAL); 152 return ERR_PTR(-EINVAL);
152 153
@@ -162,34 +163,41 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
162 163
163 config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); 164 config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
164 165
165 /* Fetch GPIOs. */ 166 /* Fetch GPIOs. - optional property*/
166 config->nr_gpios = of_gpio_count(np); 167 ret = of_gpio_count(np);
167 168 if ((ret < 0) && (ret != -ENOENT))
168 config->gpios = devm_kzalloc(dev, 169 return ERR_PTR(ret);
169 sizeof(struct gpio) * config->nr_gpios, 170
170 GFP_KERNEL); 171 if (ret > 0) {
171 if (!config->gpios) 172 config->nr_gpios = ret;
172 return ERR_PTR(-ENOMEM); 173 config->gpios = devm_kzalloc(dev,
173 174 sizeof(struct gpio) * config->nr_gpios,
174 proplen = of_property_count_u32_elems(np, "gpios-states"); 175 GFP_KERNEL);
175 /* optional property */ 176 if (!config->gpios)
176 if (proplen < 0) 177 return ERR_PTR(-ENOMEM);
177 proplen = 0; 178
178 179 proplen = of_property_count_u32_elems(np, "gpios-states");
179 if (proplen > 0 && proplen != config->nr_gpios) { 180 /* optional property */
180 dev_warn(dev, "gpios <-> gpios-states mismatch\n"); 181 if (proplen < 0)
181 proplen = 0; 182 proplen = 0;
182 } 183
184 if (proplen > 0 && proplen != config->nr_gpios) {
185 dev_warn(dev, "gpios <-> gpios-states mismatch\n");
186 proplen = 0;
187 }
183 188
184 for (i = 0; i < config->nr_gpios; i++) { 189 for (i = 0; i < config->nr_gpios; i++) {
185 gpio = of_get_named_gpio(np, "gpios", i); 190 gpio = of_get_named_gpio(np, "gpios", i);
186 if (gpio < 0) 191 if (gpio < 0)
187 break; 192 break;
188 config->gpios[i].gpio = gpio; 193 config->gpios[i].gpio = gpio;
189 if (proplen > 0) { 194 if (proplen > 0) {
190 of_property_read_u32_index(np, "gpios-states", i, &ret); 195 of_property_read_u32_index(np, "gpios-states",
191 if (ret) 196 i, &ret);
192 config->gpios[i].flags = GPIOF_OUT_INIT_HIGH; 197 if (ret)
198 config->gpios[i].flags =
199 GPIOF_OUT_INIT_HIGH;
200 }
193 } 201 }
194 } 202 }
195 203
@@ -243,17 +251,18 @@ static int gpio_regulator_probe(struct platform_device *pdev)
243 struct regulator_config cfg = { }; 251 struct regulator_config cfg = { };
244 int ptr, ret, state; 252 int ptr, ret, state;
245 253
246 if (np) {
247 config = of_get_gpio_regulator_config(&pdev->dev, np);
248 if (IS_ERR(config))
249 return PTR_ERR(config);
250 }
251
252 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), 254 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
253 GFP_KERNEL); 255 GFP_KERNEL);
254 if (drvdata == NULL) 256 if (drvdata == NULL)
255 return -ENOMEM; 257 return -ENOMEM;
256 258
259 if (np) {
260 config = of_get_gpio_regulator_config(&pdev->dev, np,
261 &drvdata->desc);
262 if (IS_ERR(config))
263 return PTR_ERR(config);
264 }
265
257 drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); 266 drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
258 if (drvdata->desc.name == NULL) { 267 if (drvdata->desc.name == NULL) {
259 dev_err(&pdev->dev, "Failed to allocate supply name\n"); 268 dev_err(&pdev->dev, "Failed to allocate supply name\n");
@@ -261,13 +270,23 @@ static int gpio_regulator_probe(struct platform_device *pdev)
261 goto err; 270 goto err;
262 } 271 }
263 272
264 drvdata->gpios = kmemdup(config->gpios, 273 if (config->nr_gpios != 0) {
265 config->nr_gpios * sizeof(struct gpio), 274 drvdata->gpios = kmemdup(config->gpios,
266 GFP_KERNEL); 275 config->nr_gpios * sizeof(struct gpio),
267 if (drvdata->gpios == NULL) { 276 GFP_KERNEL);
268 dev_err(&pdev->dev, "Failed to allocate gpio data\n"); 277 if (drvdata->gpios == NULL) {
269 ret = -ENOMEM; 278 dev_err(&pdev->dev, "Failed to allocate gpio data\n");
270 goto err_name; 279 ret = -ENOMEM;
280 goto err_name;
281 }
282
283 drvdata->nr_gpios = config->nr_gpios;
284 ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
285 if (ret) {
286 dev_err(&pdev->dev,
287 "Could not obtain regulator setting GPIOs: %d\n", ret);
288 goto err_memstate;
289 }
271 } 290 }
272 291
273 drvdata->states = kmemdup(config->states, 292 drvdata->states = kmemdup(config->states,
@@ -301,14 +320,6 @@ static int gpio_regulator_probe(struct platform_device *pdev)
301 goto err_memgpio; 320 goto err_memgpio;
302 } 321 }
303 322
304 drvdata->nr_gpios = config->nr_gpios;
305 ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
306 if (ret) {
307 dev_err(&pdev->dev,
308 "Could not obtain regulator setting GPIOs: %d\n", ret);
309 goto err_memstate;
310 }
311
312 /* build initial state from gpio init data. */ 323 /* build initial state from gpio init data. */
313 state = 0; 324 state = 0;
314 for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) { 325 for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) {
@@ -322,8 +333,10 @@ static int gpio_regulator_probe(struct platform_device *pdev)
322 cfg.driver_data = drvdata; 333 cfg.driver_data = drvdata;
323 cfg.of_node = np; 334 cfg.of_node = np;
324 335
325 if (config->enable_gpio >= 0) 336 if (gpio_is_valid(config->enable_gpio)) {
326 cfg.ena_gpio = config->enable_gpio; 337 cfg.ena_gpio = config->enable_gpio;
338 cfg.ena_gpio_initialized = true;
339 }
327 cfg.ena_gpio_invert = !config->enable_high; 340 cfg.ena_gpio_invert = !config->enable_high;
328 if (config->enabled_at_boot) { 341 if (config->enabled_at_boot) {
329 if (config->enable_high) 342 if (config->enable_high)
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index f69320e1738f..871b96bcd2d0 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -45,6 +45,23 @@
45#define MAX77686_DVS_MINUV 600000 45#define MAX77686_DVS_MINUV 600000
46#define MAX77686_DVS_UVSTEP 12500 46#define MAX77686_DVS_UVSTEP 12500
47 47
48/*
49 * Values used for configuring LDOs and bucks.
50 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
51 */
52#define MAX77686_LDO_LOWPOWER 0x1
53/*
54 * On/off controlled by PWRREQ:
55 * - LDO2, 6-8, 10-12, 14-16
56 * - buck[1234]
57 */
58#define MAX77686_OFF_PWRREQ 0x1
59/* Low power mode controlled by PWRREQ: All LDOs */
60#define MAX77686_LDO_LOWPOWER_PWRREQ 0x2
61/* Forcing low power mode: buck[234] */
62#define MAX77686_BUCK_LOWPOWER 0x2
63#define MAX77686_NORMAL 0x3
64
48#define MAX77686_OPMODE_SHIFT 6 65#define MAX77686_OPMODE_SHIFT 6
49#define MAX77686_OPMODE_BUCK234_SHIFT 4 66#define MAX77686_OPMODE_BUCK234_SHIFT 4
50#define MAX77686_OPMODE_MASK 0x3 67#define MAX77686_OPMODE_MASK 0x3
@@ -65,23 +82,36 @@ enum max77686_ramp_rate {
65}; 82};
66 83
67struct max77686_data { 84struct max77686_data {
85 /* Array indexed by regulator id */
68 unsigned int opmode[MAX77686_REGULATORS]; 86 unsigned int opmode[MAX77686_REGULATORS];
69}; 87};
70 88
71/* Some BUCKS supports Normal[ON/OFF] mode during suspend */ 89static unsigned int max77686_get_opmode_shift(int id)
72static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
73{ 90{
74 unsigned int val; 91 switch (id) {
92 case MAX77686_BUCK1:
93 case MAX77686_BUCK5 ... MAX77686_BUCK9:
94 return 0;
95 case MAX77686_BUCK2 ... MAX77686_BUCK4:
96 return MAX77686_OPMODE_BUCK234_SHIFT;
97 default:
98 /* all LDOs */
99 return MAX77686_OPMODE_SHIFT;
100 }
101}
102
103/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
104static int max77686_set_suspend_disable(struct regulator_dev *rdev)
105{
106 unsigned int val, shift;
75 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 107 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
76 int ret, id = rdev_get_id(rdev); 108 int ret, id = rdev_get_id(rdev);
77 109
78 if (id == MAX77686_BUCK1) 110 shift = max77686_get_opmode_shift(id);
79 val = 0x1; 111 val = MAX77686_OFF_PWRREQ;
80 else
81 val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
82 112
83 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 113 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
84 rdev->desc->enable_mask, val); 114 rdev->desc->enable_mask, val << shift);
85 if (ret) 115 if (ret)
86 return ret; 116 return ret;
87 117
@@ -103,10 +133,10 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
103 133
104 switch (mode) { 134 switch (mode) {
105 case REGULATOR_MODE_IDLE: /* ON in LP Mode */ 135 case REGULATOR_MODE_IDLE: /* ON in LP Mode */
106 val = 0x2 << MAX77686_OPMODE_SHIFT; 136 val = MAX77686_LDO_LOWPOWER_PWRREQ;
107 break; 137 break;
108 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 138 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
109 val = 0x3 << MAX77686_OPMODE_SHIFT; 139 val = MAX77686_NORMAL;
110 break; 140 break;
111 default: 141 default:
112 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 142 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -115,7 +145,8 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
115 } 145 }
116 146
117 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 147 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
118 rdev->desc->enable_mask, val); 148 rdev->desc->enable_mask,
149 val << MAX77686_OPMODE_SHIFT);
119 if (ret) 150 if (ret)
120 return ret; 151 return ret;
121 152
@@ -133,13 +164,13 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
133 164
134 switch (mode) { 165 switch (mode) {
135 case REGULATOR_MODE_STANDBY: /* switch off */ 166 case REGULATOR_MODE_STANDBY: /* switch off */
136 val = 0x1 << MAX77686_OPMODE_SHIFT; 167 val = MAX77686_OFF_PWRREQ;
137 break; 168 break;
138 case REGULATOR_MODE_IDLE: /* ON in LP Mode */ 169 case REGULATOR_MODE_IDLE: /* ON in LP Mode */
139 val = 0x2 << MAX77686_OPMODE_SHIFT; 170 val = MAX77686_LDO_LOWPOWER_PWRREQ;
140 break; 171 break;
141 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 172 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
142 val = 0x3 << MAX77686_OPMODE_SHIFT; 173 val = MAX77686_NORMAL;
143 break; 174 break;
144 default: 175 default:
145 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 176 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -148,7 +179,8 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
148 } 179 }
149 180
150 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 181 ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
151 rdev->desc->enable_mask, val); 182 rdev->desc->enable_mask,
183 val << MAX77686_OPMODE_SHIFT);
152 if (ret) 184 if (ret)
153 return ret; 185 return ret;
154 186
@@ -159,10 +191,17 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
159static int max77686_enable(struct regulator_dev *rdev) 191static int max77686_enable(struct regulator_dev *rdev)
160{ 192{
161 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 193 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
194 unsigned int shift;
195 int id = rdev_get_id(rdev);
196
197 shift = max77686_get_opmode_shift(id);
198
199 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
200 max77686->opmode[id] = MAX77686_NORMAL;
162 201
163 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 202 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
164 rdev->desc->enable_mask, 203 rdev->desc->enable_mask,
165 max77686->opmode[rdev_get_id(rdev)]); 204 max77686->opmode[id] << shift);
166} 205}
167 206
168static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) 207static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
@@ -212,6 +251,7 @@ static struct regulator_ops max77686_ldo_ops = {
212 .set_voltage_sel = regulator_set_voltage_sel_regmap, 251 .set_voltage_sel = regulator_set_voltage_sel_regmap,
213 .set_voltage_time_sel = regulator_set_voltage_time_sel, 252 .set_voltage_time_sel = regulator_set_voltage_time_sel,
214 .set_suspend_mode = max77686_ldo_set_suspend_mode, 253 .set_suspend_mode = max77686_ldo_set_suspend_mode,
254 .set_suspend_disable = max77686_set_suspend_disable,
215}; 255};
216 256
217static struct regulator_ops max77686_buck1_ops = { 257static struct regulator_ops max77686_buck1_ops = {
@@ -223,7 +263,7 @@ static struct regulator_ops max77686_buck1_ops = {
223 .get_voltage_sel = regulator_get_voltage_sel_regmap, 263 .get_voltage_sel = regulator_get_voltage_sel_regmap,
224 .set_voltage_sel = regulator_set_voltage_sel_regmap, 264 .set_voltage_sel = regulator_set_voltage_sel_regmap,
225 .set_voltage_time_sel = regulator_set_voltage_time_sel, 265 .set_voltage_time_sel = regulator_set_voltage_time_sel,
226 .set_suspend_disable = max77686_buck_set_suspend_disable, 266 .set_suspend_disable = max77686_set_suspend_disable,
227}; 267};
228 268
229static struct regulator_ops max77686_buck_dvs_ops = { 269static struct regulator_ops max77686_buck_dvs_ops = {
@@ -236,11 +276,13 @@ static struct regulator_ops max77686_buck_dvs_ops = {
236 .set_voltage_sel = regulator_set_voltage_sel_regmap, 276 .set_voltage_sel = regulator_set_voltage_sel_regmap,
237 .set_voltage_time_sel = regulator_set_voltage_time_sel, 277 .set_voltage_time_sel = regulator_set_voltage_time_sel,
238 .set_ramp_delay = max77686_set_ramp_delay, 278 .set_ramp_delay = max77686_set_ramp_delay,
239 .set_suspend_disable = max77686_buck_set_suspend_disable, 279 .set_suspend_disable = max77686_set_suspend_disable,
240}; 280};
241 281
242#define regulator_desc_ldo(num) { \ 282#define regulator_desc_ldo(num) { \
243 .name = "LDO"#num, \ 283 .name = "LDO"#num, \
284 .of_match = of_match_ptr("LDO"#num), \
285 .regulators_node = of_match_ptr("voltage-regulators"), \
244 .id = MAX77686_LDO##num, \ 286 .id = MAX77686_LDO##num, \
245 .ops = &max77686_ops, \ 287 .ops = &max77686_ops, \
246 .type = REGULATOR_VOLTAGE, \ 288 .type = REGULATOR_VOLTAGE, \
@@ -257,6 +299,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
257} 299}
258#define regulator_desc_lpm_ldo(num) { \ 300#define regulator_desc_lpm_ldo(num) { \
259 .name = "LDO"#num, \ 301 .name = "LDO"#num, \
302 .of_match = of_match_ptr("LDO"#num), \
303 .regulators_node = of_match_ptr("voltage-regulators"), \
260 .id = MAX77686_LDO##num, \ 304 .id = MAX77686_LDO##num, \
261 .ops = &max77686_ldo_ops, \ 305 .ops = &max77686_ldo_ops, \
262 .type = REGULATOR_VOLTAGE, \ 306 .type = REGULATOR_VOLTAGE, \
@@ -273,6 +317,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
273} 317}
274#define regulator_desc_ldo_low(num) { \ 318#define regulator_desc_ldo_low(num) { \
275 .name = "LDO"#num, \ 319 .name = "LDO"#num, \
320 .of_match = of_match_ptr("LDO"#num), \
321 .regulators_node = of_match_ptr("voltage-regulators"), \
276 .id = MAX77686_LDO##num, \ 322 .id = MAX77686_LDO##num, \
277 .ops = &max77686_ldo_ops, \ 323 .ops = &max77686_ldo_ops, \
278 .type = REGULATOR_VOLTAGE, \ 324 .type = REGULATOR_VOLTAGE, \
@@ -289,6 +335,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
289} 335}
290#define regulator_desc_ldo1_low(num) { \ 336#define regulator_desc_ldo1_low(num) { \
291 .name = "LDO"#num, \ 337 .name = "LDO"#num, \
338 .of_match = of_match_ptr("LDO"#num), \
339 .regulators_node = of_match_ptr("voltage-regulators"), \
292 .id = MAX77686_LDO##num, \ 340 .id = MAX77686_LDO##num, \
293 .ops = &max77686_ops, \ 341 .ops = &max77686_ops, \
294 .type = REGULATOR_VOLTAGE, \ 342 .type = REGULATOR_VOLTAGE, \
@@ -305,6 +353,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
305} 353}
306#define regulator_desc_buck(num) { \ 354#define regulator_desc_buck(num) { \
307 .name = "BUCK"#num, \ 355 .name = "BUCK"#num, \
356 .of_match = of_match_ptr("BUCK"#num), \
357 .regulators_node = of_match_ptr("voltage-regulators"), \
308 .id = MAX77686_BUCK##num, \ 358 .id = MAX77686_BUCK##num, \
309 .ops = &max77686_ops, \ 359 .ops = &max77686_ops, \
310 .type = REGULATOR_VOLTAGE, \ 360 .type = REGULATOR_VOLTAGE, \
@@ -320,6 +370,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
320} 370}
321#define regulator_desc_buck1(num) { \ 371#define regulator_desc_buck1(num) { \
322 .name = "BUCK"#num, \ 372 .name = "BUCK"#num, \
373 .of_match = of_match_ptr("BUCK"#num), \
374 .regulators_node = of_match_ptr("voltage-regulators"), \
323 .id = MAX77686_BUCK##num, \ 375 .id = MAX77686_BUCK##num, \
324 .ops = &max77686_buck1_ops, \ 376 .ops = &max77686_buck1_ops, \
325 .type = REGULATOR_VOLTAGE, \ 377 .type = REGULATOR_VOLTAGE, \
@@ -335,6 +387,8 @@ static struct regulator_ops max77686_buck_dvs_ops = {
335} 387}
336#define regulator_desc_buck_dvs(num) { \ 388#define regulator_desc_buck_dvs(num) { \
337 .name = "BUCK"#num, \ 389 .name = "BUCK"#num, \
390 .of_match = of_match_ptr("BUCK"#num), \
391 .regulators_node = of_match_ptr("voltage-regulators"), \
338 .id = MAX77686_BUCK##num, \ 392 .id = MAX77686_BUCK##num, \
339 .ops = &max77686_buck_dvs_ops, \ 393 .ops = &max77686_buck_dvs_ops, \
340 .type = REGULATOR_VOLTAGE, \ 394 .type = REGULATOR_VOLTAGE, \
@@ -350,7 +404,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
350 << MAX77686_OPMODE_BUCK234_SHIFT, \ 404 << MAX77686_OPMODE_BUCK234_SHIFT, \
351} 405}
352 406
353static struct regulator_desc regulators[] = { 407static const struct regulator_desc regulators[] = {
354 regulator_desc_ldo1_low(1), 408 regulator_desc_ldo1_low(1),
355 regulator_desc_ldo_low(2), 409 regulator_desc_ldo_low(2),
356 regulator_desc_ldo(3), 410 regulator_desc_ldo(3),
@@ -388,103 +442,37 @@ static struct regulator_desc regulators[] = {
388 regulator_desc_buck(9), 442 regulator_desc_buck(9),
389}; 443};
390 444
391#ifdef CONFIG_OF
392static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
393 struct max77686_platform_data *pdata)
394{
395 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
396 struct device_node *pmic_np, *regulators_np;
397 struct max77686_regulator_data *rdata;
398 struct of_regulator_match rmatch = { };
399 unsigned int i;
400
401 pmic_np = iodev->dev->of_node;
402 regulators_np = of_get_child_by_name(pmic_np, "voltage-regulators");
403 if (!regulators_np) {
404 dev_err(&pdev->dev, "could not find regulators sub-node\n");
405 return -EINVAL;
406 }
407
408 pdata->num_regulators = ARRAY_SIZE(regulators);
409 rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
410 pdata->num_regulators, GFP_KERNEL);
411 if (!rdata) {
412 of_node_put(regulators_np);
413 return -ENOMEM;
414 }
415
416 for (i = 0; i < pdata->num_regulators; i++) {
417 rmatch.name = regulators[i].name;
418 rmatch.init_data = NULL;
419 rmatch.of_node = NULL;
420 of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
421 rdata[i].initdata = rmatch.init_data;
422 rdata[i].of_node = rmatch.of_node;
423 }
424
425 pdata->regulators = rdata;
426 of_node_put(regulators_np);
427
428 return 0;
429}
430#else
431static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
432 struct max77686_platform_data *pdata)
433{
434 return 0;
435}
436#endif /* CONFIG_OF */
437
438static int max77686_pmic_probe(struct platform_device *pdev) 445static int max77686_pmic_probe(struct platform_device *pdev)
439{ 446{
440 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 447 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
441 struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
442 struct max77686_data *max77686; 448 struct max77686_data *max77686;
443 int i, ret = 0; 449 int i;
444 struct regulator_config config = { }; 450 struct regulator_config config = { };
445 451
446 dev_dbg(&pdev->dev, "%s\n", __func__); 452 dev_dbg(&pdev->dev, "%s\n", __func__);
447 453
448 if (!pdata) {
449 dev_err(&pdev->dev, "no platform data found for regulator\n");
450 return -ENODEV;
451 }
452
453 if (iodev->dev->of_node) {
454 ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
455 if (ret)
456 return ret;
457 }
458
459 if (pdata->num_regulators != MAX77686_REGULATORS) {
460 dev_err(&pdev->dev,
461 "Invalid initial data for regulator's initialiation\n");
462 return -EINVAL;
463 }
464
465 max77686 = devm_kzalloc(&pdev->dev, sizeof(struct max77686_data), 454 max77686 = devm_kzalloc(&pdev->dev, sizeof(struct max77686_data),
466 GFP_KERNEL); 455 GFP_KERNEL);
467 if (!max77686) 456 if (!max77686)
468 return -ENOMEM; 457 return -ENOMEM;
469 458
470 config.dev = &pdev->dev; 459 config.dev = iodev->dev;
471 config.regmap = iodev->regmap; 460 config.regmap = iodev->regmap;
472 config.driver_data = max77686; 461 config.driver_data = max77686;
473 platform_set_drvdata(pdev, max77686); 462 platform_set_drvdata(pdev, max77686);
474 463
475 for (i = 0; i < MAX77686_REGULATORS; i++) { 464 for (i = 0; i < MAX77686_REGULATORS; i++) {
476 struct regulator_dev *rdev; 465 struct regulator_dev *rdev;
466 int id = regulators[i].id;
477 467
478 config.init_data = pdata->regulators[i].initdata; 468 max77686->opmode[id] = MAX77686_NORMAL;
479 config.of_node = pdata->regulators[i].of_node;
480
481 max77686->opmode[i] = regulators[i].enable_mask;
482 rdev = devm_regulator_register(&pdev->dev, 469 rdev = devm_regulator_register(&pdev->dev,
483 &regulators[i], &config); 470 &regulators[i], &config);
484 if (IS_ERR(rdev)) { 471 if (IS_ERR(rdev)) {
472 int ret = PTR_ERR(rdev);
485 dev_err(&pdev->dev, 473 dev_err(&pdev->dev,
486 "regulator init failed for %d\n", i); 474 "regulator init failed for %d: %d\n", i, ret);
487 return PTR_ERR(rdev); 475 return ret;
488 } 476 }
489 } 477 }
490 478
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index d158f71fa128..7b9755a6c3b5 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -139,7 +139,7 @@ static struct regulator_ops max77693_charger_ops = {
139 .enable_mask = SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK , \ 139 .enable_mask = SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK , \
140} 140}
141 141
142static struct regulator_desc regulators[] = { 142static const struct regulator_desc regulators[] = {
143 regulator_desc_esafeout(1), 143 regulator_desc_esafeout(1),
144 regulator_desc_esafeout(2), 144 regulator_desc_esafeout(2),
145 { 145 {
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c
index 45fa240fe243..0766615c60bc 100644
--- a/drivers/regulator/max77802.c
+++ b/drivers/regulator/max77802.c
@@ -33,6 +33,7 @@
33#include <linux/regulator/of_regulator.h> 33#include <linux/regulator/of_regulator.h>
34#include <linux/mfd/max77686.h> 34#include <linux/mfd/max77686.h>
35#include <linux/mfd/max77686-private.h> 35#include <linux/mfd/max77686-private.h>
36#include <dt-bindings/regulator/maxim,max77802.h>
36 37
37/* Default ramp delay in case it is not manually set */ 38/* Default ramp delay in case it is not manually set */
38#define MAX77802_RAMP_DELAY 100000 /* uV/us */ 39#define MAX77802_RAMP_DELAY 100000 /* uV/us */
@@ -49,6 +50,10 @@
49#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0 50#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0
50#define MAX77802_RAMP_RATE_SHIFT_4BIT 4 51#define MAX77802_RAMP_RATE_SHIFT_4BIT 4
51 52
53#define MAX77802_STATUS_OFF 0x0
54#define MAX77802_OFF_PWRREQ 0x1
55#define MAX77802_LP_PWRREQ 0x2
56
52/* MAX77802 has two register formats: 2-bit and 4-bit */ 57/* MAX77802 has two register formats: 2-bit and 4-bit */
53static const unsigned int ramp_table_77802_2bit[] = { 58static const unsigned int ramp_table_77802_2bit[] = {
54 12500, 59 12500,
@@ -65,9 +70,16 @@ static unsigned int ramp_table_77802_4bit[] = {
65}; 70};
66 71
67struct max77802_regulator_prv { 72struct max77802_regulator_prv {
73 /* Array indexed by regulator id */
68 unsigned int opmode[MAX77802_REG_MAX]; 74 unsigned int opmode[MAX77802_REG_MAX];
69}; 75};
70 76
77static inline unsigned int max77802_map_mode(unsigned int mode)
78{
79 return mode == MAX77802_OPMODE_NORMAL ?
80 REGULATOR_MODE_NORMAL : REGULATOR_MODE_STANDBY;
81}
82
71static int max77802_get_opmode_shift(int id) 83static int max77802_get_opmode_shift(int id)
72{ 84{
73 if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 && 85 if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 &&
@@ -83,17 +95,16 @@ static int max77802_get_opmode_shift(int id)
83 return -EINVAL; 95 return -EINVAL;
84} 96}
85 97
86/* 98/**
87 * Some BUCKS supports Normal[ON/OFF] mode during suspend 99 * max77802_set_suspend_disable - Disable the regulator during system suspend
100 * @rdev: regulator to mark as disabled
88 * 101 *
89 * BUCK 1, 6, 2-4, 5, 7-10 (all) 102 * All regulators expect LDO 1, 3, 20 and 21 support OFF by PWRREQ.
90 * 103 * Configure the regulator so the PMIC will turn it OFF during system suspend.
91 * The other mode (0x02) will make PWRREQ switch between normal
92 * and low power.
93 */ 104 */
94static int max77802_buck_set_suspend_disable(struct regulator_dev *rdev) 105static int max77802_set_suspend_disable(struct regulator_dev *rdev)
95{ 106{
96 unsigned int val = MAX77802_OPMODE_STANDBY; 107 unsigned int val = MAX77802_OFF_PWRREQ;
97 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); 108 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
98 int id = rdev_get_id(rdev); 109 int id = rdev_get_id(rdev);
99 int shift = max77802_get_opmode_shift(id); 110 int shift = max77802_get_opmode_shift(id);
@@ -104,14 +115,11 @@ static int max77802_buck_set_suspend_disable(struct regulator_dev *rdev)
104} 115}
105 116
106/* 117/*
107 * Some LDOs supports LPM-ON/OFF/Normal-ON mode during suspend state 118 * Some LDOs support Low Power Mode while the system is running.
108 * (Enable Control Logic1 by PWRREQ)
109 *
110 * LDOs 2, 4-19, 22-35.
111 * 119 *
120 * LDOs 1, 3, 20, 21.
112 */ 121 */
113static int max77802_ldo_set_suspend_mode_logic1(struct regulator_dev *rdev, 122static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
114 unsigned int mode)
115{ 123{
116 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); 124 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
117 int id = rdev_get_id(rdev); 125 int id = rdev_get_id(rdev);
@@ -119,14 +127,11 @@ static int max77802_ldo_set_suspend_mode_logic1(struct regulator_dev *rdev,
119 int shift = max77802_get_opmode_shift(id); 127 int shift = max77802_get_opmode_shift(id);
120 128
121 switch (mode) { 129 switch (mode) {
122 case REGULATOR_MODE_IDLE: /* ON in LP Mode */ 130 case REGULATOR_MODE_STANDBY:
123 val = MAX77802_OPMODE_LP; 131 val = MAX77802_OPMODE_LP; /* ON in Low Power Mode */
124 break; 132 break;
125 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 133 case REGULATOR_MODE_NORMAL:
126 val = MAX77802_OPMODE_NORMAL; 134 val = MAX77802_OPMODE_NORMAL; /* ON in Normal Mode */
127 break;
128 case REGULATOR_MODE_STANDBY: /* ON/OFF by PWRREQ */
129 val = MAX77802_OPMODE_STANDBY;
130 break; 135 break;
131 default: 136 default:
132 dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n", 137 dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
@@ -139,35 +144,76 @@ static int max77802_ldo_set_suspend_mode_logic1(struct regulator_dev *rdev,
139 rdev->desc->enable_mask, val << shift); 144 rdev->desc->enable_mask, val << shift);
140} 145}
141 146
142/* 147static unsigned max77802_get_mode(struct regulator_dev *rdev)
143 * Mode 1 (Output[ON/OFF] by PWRREQ) is not supported on some LDOs 148{
144 * (Enable Control Logic2 by PWRREQ) 149 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
150 int id = rdev_get_id(rdev);
151
152 return max77802_map_mode(max77802->opmode[id]);
153}
154
155/**
156 * max77802_set_suspend_mode - set regulator opmode when the system is suspended
157 * @rdev: regulator to change mode
158 * @mode: operating mode to be set
145 * 159 *
146 * LDOs 1, 20, 21, and 3, 160 * Will set the operating mode for the regulators during system suspend.
161 * This function is valid for the three different enable control logics:
147 * 162 *
163 * Enable Control Logic1 by PWRREQ (BUCK 2-4 and LDOs 2, 4-19, 22-35)
164 * Enable Control Logic2 by PWRREQ (LDOs 1, 20, 21)
165 * Enable Control Logic3 by PWRREQ (LDO 3)
166 *
167 * If setting the regulator mode fails, the function only warns but does
168 * not return an error code to avoid the regulator core to stop setting
169 * the operating mode for the remaining regulators.
148 */ 170 */
149static int max77802_ldo_set_suspend_mode_logic2(struct regulator_dev *rdev, 171static int max77802_set_suspend_mode(struct regulator_dev *rdev,
150 unsigned int mode) 172 unsigned int mode)
151{ 173{
152 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); 174 struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
153 int id = rdev_get_id(rdev); 175 int id = rdev_get_id(rdev);
154 unsigned int val; 176 unsigned int val;
155 int shift = max77802_get_opmode_shift(id); 177 int shift = max77802_get_opmode_shift(id);
156 178
179 /*
180 * If the regulator has been disabled for suspend
181 * then is invalid to try setting a suspend mode.
182 */
183 if (max77802->opmode[id] == MAX77802_OFF_PWRREQ) {
184 dev_warn(&rdev->dev, "%s: is disabled, mode: 0x%x not set\n",
185 rdev->desc->name, mode);
186 return 0;
187 }
188
157 switch (mode) { 189 switch (mode) {
158 case REGULATOR_MODE_IDLE: /* ON in LP Mode */ 190 case REGULATOR_MODE_STANDBY:
159 val = MAX77802_OPMODE_LP; 191 /*
160 break; 192 * If the regulator opmode is normal then enable
161 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 193 * ON in Low Power Mode by PWRREQ. If the mode is
162 val = MAX77802_OPMODE_NORMAL; 194 * already Low Power then no action is required.
195 */
196 if (max77802->opmode[id] == MAX77802_OPMODE_NORMAL)
197 val = MAX77802_LP_PWRREQ;
198 else
199 return 0;
163 break; 200 break;
201 case REGULATOR_MODE_NORMAL:
202 /*
203 * If the regulator operating mode is Low Power then
204 * normal is not a valid opmode in suspend. If the
205 * mode is already normal then no action is required.
206 */
207 if (max77802->opmode[id] == MAX77802_OPMODE_LP)
208 dev_warn(&rdev->dev, "%s: in Low Power: 0x%x invalid\n",
209 rdev->desc->name, mode);
210 return 0;
164 default: 211 default:
165 dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n", 212 dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
166 rdev->desc->name, mode); 213 rdev->desc->name, mode);
167 return -EINVAL; 214 return -EINVAL;
168 } 215 }
169 216
170 max77802->opmode[id] = val;
171 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 217 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
172 rdev->desc->enable_mask, val << shift); 218 rdev->desc->enable_mask, val << shift);
173} 219}
@@ -178,6 +224,9 @@ static int max77802_enable(struct regulator_dev *rdev)
178 int id = rdev_get_id(rdev); 224 int id = rdev_get_id(rdev);
179 int shift = max77802_get_opmode_shift(id); 225 int shift = max77802_get_opmode_shift(id);
180 226
227 if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
228 max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
229
181 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 230 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
182 rdev->desc->enable_mask, 231 rdev->desc->enable_mask,
183 max77802->opmode[id] << shift); 232 max77802->opmode[id] << shift);
@@ -247,7 +296,8 @@ static struct regulator_ops max77802_ldo_ops_logic1 = {
247 .get_voltage_sel = regulator_get_voltage_sel_regmap, 296 .get_voltage_sel = regulator_get_voltage_sel_regmap,
248 .set_voltage_sel = regulator_set_voltage_sel_regmap, 297 .set_voltage_sel = regulator_set_voltage_sel_regmap,
249 .set_voltage_time_sel = regulator_set_voltage_time_sel, 298 .set_voltage_time_sel = regulator_set_voltage_time_sel,
250 .set_suspend_mode = max77802_ldo_set_suspend_mode_logic1, 299 .set_suspend_disable = max77802_set_suspend_disable,
300 .set_suspend_mode = max77802_set_suspend_mode,
251}; 301};
252 302
253/* 303/*
@@ -262,7 +312,9 @@ static struct regulator_ops max77802_ldo_ops_logic2 = {
262 .get_voltage_sel = regulator_get_voltage_sel_regmap, 312 .get_voltage_sel = regulator_get_voltage_sel_regmap,
263 .set_voltage_sel = regulator_set_voltage_sel_regmap, 313 .set_voltage_sel = regulator_set_voltage_sel_regmap,
264 .set_voltage_time_sel = regulator_set_voltage_time_sel, 314 .set_voltage_time_sel = regulator_set_voltage_time_sel,
265 .set_suspend_mode = max77802_ldo_set_suspend_mode_logic2, 315 .set_mode = max77802_set_mode,
316 .get_mode = max77802_get_mode,
317 .set_suspend_mode = max77802_set_suspend_mode,
266}; 318};
267 319
268/* BUCKS 1, 6 */ 320/* BUCKS 1, 6 */
@@ -276,10 +328,25 @@ static struct regulator_ops max77802_buck_16_dvs_ops = {
276 .set_voltage_sel = regulator_set_voltage_sel_regmap, 328 .set_voltage_sel = regulator_set_voltage_sel_regmap,
277 .set_voltage_time_sel = regulator_set_voltage_time_sel, 329 .set_voltage_time_sel = regulator_set_voltage_time_sel,
278 .set_ramp_delay = max77802_set_ramp_delay_4bit, 330 .set_ramp_delay = max77802_set_ramp_delay_4bit,
279 .set_suspend_disable = max77802_buck_set_suspend_disable, 331 .set_suspend_disable = max77802_set_suspend_disable,
280}; 332};
281 333
282/* BUCKs 2-4, 5, 7-10 */ 334/* BUCKs 2-4 */
335static struct regulator_ops max77802_buck_234_ops = {
336 .list_voltage = regulator_list_voltage_linear,
337 .map_voltage = regulator_map_voltage_linear,
338 .is_enabled = regulator_is_enabled_regmap,
339 .enable = max77802_enable,
340 .disable = regulator_disable_regmap,
341 .get_voltage_sel = regulator_get_voltage_sel_regmap,
342 .set_voltage_sel = regulator_set_voltage_sel_regmap,
343 .set_voltage_time_sel = regulator_set_voltage_time_sel,
344 .set_ramp_delay = max77802_set_ramp_delay_2bit,
345 .set_suspend_disable = max77802_set_suspend_disable,
346 .set_suspend_mode = max77802_set_suspend_mode,
347};
348
349/* BUCKs 5, 7-10 */
283static struct regulator_ops max77802_buck_dvs_ops = { 350static struct regulator_ops max77802_buck_dvs_ops = {
284 .list_voltage = regulator_list_voltage_linear, 351 .list_voltage = regulator_list_voltage_linear,
285 .map_voltage = regulator_map_voltage_linear, 352 .map_voltage = regulator_map_voltage_linear,
@@ -290,12 +357,14 @@ static struct regulator_ops max77802_buck_dvs_ops = {
290 .set_voltage_sel = regulator_set_voltage_sel_regmap, 357 .set_voltage_sel = regulator_set_voltage_sel_regmap,
291 .set_voltage_time_sel = regulator_set_voltage_time_sel, 358 .set_voltage_time_sel = regulator_set_voltage_time_sel,
292 .set_ramp_delay = max77802_set_ramp_delay_2bit, 359 .set_ramp_delay = max77802_set_ramp_delay_2bit,
293 .set_suspend_disable = max77802_buck_set_suspend_disable, 360 .set_suspend_disable = max77802_set_suspend_disable,
294}; 361};
295 362
296/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */ 363/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */
297#define regulator_77802_desc_p_ldo(num, supply, log) { \ 364#define regulator_77802_desc_p_ldo(num, supply, log) { \
298 .name = "LDO"#num, \ 365 .name = "LDO"#num, \
366 .of_match = of_match_ptr("LDO"#num), \
367 .regulators_node = of_match_ptr("regulators"), \
299 .id = MAX77802_LDO##num, \ 368 .id = MAX77802_LDO##num, \
300 .supply_name = "inl"#supply, \ 369 .supply_name = "inl"#supply, \
301 .ops = &max77802_ldo_ops_logic##log, \ 370 .ops = &max77802_ldo_ops_logic##log, \
@@ -309,11 +378,14 @@ static struct regulator_ops max77802_buck_dvs_ops = {
309 .vsel_mask = MAX77802_VSEL_MASK, \ 378 .vsel_mask = MAX77802_VSEL_MASK, \
310 .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ 379 .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
311 .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \ 380 .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
381 .of_map_mode = max77802_map_mode, \
312} 382}
313 383
314/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */ 384/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */
315#define regulator_77802_desc_n_ldo(num, supply, log) { \ 385#define regulator_77802_desc_n_ldo(num, supply, log) { \
316 .name = "LDO"#num, \ 386 .name = "LDO"#num, \
387 .of_match = of_match_ptr("LDO"#num), \
388 .regulators_node = of_match_ptr("regulators"), \
317 .id = MAX77802_LDO##num, \ 389 .id = MAX77802_LDO##num, \
318 .supply_name = "inl"#supply, \ 390 .supply_name = "inl"#supply, \
319 .ops = &max77802_ldo_ops_logic##log, \ 391 .ops = &max77802_ldo_ops_logic##log, \
@@ -327,11 +399,14 @@ static struct regulator_ops max77802_buck_dvs_ops = {
327 .vsel_mask = MAX77802_VSEL_MASK, \ 399 .vsel_mask = MAX77802_VSEL_MASK, \
328 .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ 400 .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
329 .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \ 401 .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
402 .of_map_mode = max77802_map_mode, \
330} 403}
331 404
332/* BUCKs 1, 6 */ 405/* BUCKs 1, 6 */
333#define regulator_77802_desc_16_buck(num) { \ 406#define regulator_77802_desc_16_buck(num) { \
334 .name = "BUCK"#num, \ 407 .name = "BUCK"#num, \
408 .of_match = of_match_ptr("BUCK"#num), \
409 .regulators_node = of_match_ptr("regulators"), \
335 .id = MAX77802_BUCK##num, \ 410 .id = MAX77802_BUCK##num, \
336 .supply_name = "inb"#num, \ 411 .supply_name = "inb"#num, \
337 .ops = &max77802_buck_16_dvs_ops, \ 412 .ops = &max77802_buck_16_dvs_ops, \
@@ -345,14 +420,17 @@ static struct regulator_ops max77802_buck_dvs_ops = {
345 .vsel_mask = MAX77802_DVS_VSEL_MASK, \ 420 .vsel_mask = MAX77802_DVS_VSEL_MASK, \
346 .enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \ 421 .enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
347 .enable_mask = MAX77802_OPMODE_MASK, \ 422 .enable_mask = MAX77802_OPMODE_MASK, \
423 .of_map_mode = max77802_map_mode, \
348} 424}
349 425
350/* BUCKS 2-4 */ 426/* BUCKS 2-4 */
351#define regulator_77802_desc_234_buck(num) { \ 427#define regulator_77802_desc_234_buck(num) { \
352 .name = "BUCK"#num, \ 428 .name = "BUCK"#num, \
429 .of_match = of_match_ptr("BUCK"#num), \
430 .regulators_node = of_match_ptr("regulators"), \
353 .id = MAX77802_BUCK##num, \ 431 .id = MAX77802_BUCK##num, \
354 .supply_name = "inb"#num, \ 432 .supply_name = "inb"#num, \
355 .ops = &max77802_buck_dvs_ops, \ 433 .ops = &max77802_buck_234_ops, \
356 .type = REGULATOR_VOLTAGE, \ 434 .type = REGULATOR_VOLTAGE, \
357 .owner = THIS_MODULE, \ 435 .owner = THIS_MODULE, \
358 .min_uV = 600000, \ 436 .min_uV = 600000, \
@@ -364,11 +442,14 @@ static struct regulator_ops max77802_buck_dvs_ops = {
364 .enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \ 442 .enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
365 .enable_mask = MAX77802_OPMODE_MASK << \ 443 .enable_mask = MAX77802_OPMODE_MASK << \
366 MAX77802_OPMODE_BUCK234_SHIFT, \ 444 MAX77802_OPMODE_BUCK234_SHIFT, \
445 .of_map_mode = max77802_map_mode, \
367} 446}
368 447
369/* BUCK 5 */ 448/* BUCK 5 */
370#define regulator_77802_desc_buck5(num) { \ 449#define regulator_77802_desc_buck5(num) { \
371 .name = "BUCK"#num, \ 450 .name = "BUCK"#num, \
451 .of_match = of_match_ptr("BUCK"#num), \
452 .regulators_node = of_match_ptr("regulators"), \
372 .id = MAX77802_BUCK##num, \ 453 .id = MAX77802_BUCK##num, \
373 .supply_name = "inb"#num, \ 454 .supply_name = "inb"#num, \
374 .ops = &max77802_buck_dvs_ops, \ 455 .ops = &max77802_buck_dvs_ops, \
@@ -382,11 +463,14 @@ static struct regulator_ops max77802_buck_dvs_ops = {
382 .vsel_mask = MAX77802_VSEL_MASK, \ 463 .vsel_mask = MAX77802_VSEL_MASK, \
383 .enable_reg = MAX77802_REG_BUCK5CTRL, \ 464 .enable_reg = MAX77802_REG_BUCK5CTRL, \
384 .enable_mask = MAX77802_OPMODE_MASK, \ 465 .enable_mask = MAX77802_OPMODE_MASK, \
466 .of_map_mode = max77802_map_mode, \
385} 467}
386 468
387/* BUCKs 7-10 */ 469/* BUCKs 7-10 */
388#define regulator_77802_desc_buck7_10(num) { \ 470#define regulator_77802_desc_buck7_10(num) { \
389 .name = "BUCK"#num, \ 471 .name = "BUCK"#num, \
472 .of_match = of_match_ptr("BUCK"#num), \
473 .regulators_node = of_match_ptr("regulators"), \
390 .id = MAX77802_BUCK##num, \ 474 .id = MAX77802_BUCK##num, \
391 .supply_name = "inb"#num, \ 475 .supply_name = "inb"#num, \
392 .ops = &max77802_buck_dvs_ops, \ 476 .ops = &max77802_buck_dvs_ops, \
@@ -400,9 +484,10 @@ static struct regulator_ops max77802_buck_dvs_ops = {
400 .vsel_mask = MAX77802_VSEL_MASK, \ 484 .vsel_mask = MAX77802_VSEL_MASK, \
401 .enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \ 485 .enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \
402 .enable_mask = MAX77802_OPMODE_MASK, \ 486 .enable_mask = MAX77802_OPMODE_MASK, \
487 .of_map_mode = max77802_map_mode, \
403} 488}
404 489
405static struct regulator_desc regulators[] = { 490static const struct regulator_desc regulators[] = {
406 regulator_77802_desc_16_buck(1), 491 regulator_77802_desc_16_buck(1),
407 regulator_77802_desc_234_buck(2), 492 regulator_77802_desc_234_buck(2),
408 regulator_77802_desc_234_buck(3), 493 regulator_77802_desc_234_buck(3),
@@ -447,85 +532,19 @@ static struct regulator_desc regulators[] = {
447 regulator_77802_desc_n_ldo(35, 2, 1), 532 regulator_77802_desc_n_ldo(35, 2, 1),
448}; 533};
449 534
450#ifdef CONFIG_OF
451static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
452 struct max77686_platform_data *pdata)
453{
454 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
455 struct device_node *pmic_np, *regulators_np;
456 struct max77686_regulator_data *rdata;
457 struct of_regulator_match rmatch = { };
458 unsigned int i;
459
460 pmic_np = iodev->dev->of_node;
461 regulators_np = of_get_child_by_name(pmic_np, "regulators");
462 if (!regulators_np) {
463 dev_err(&pdev->dev, "could not find regulators sub-node\n");
464 return -EINVAL;
465 }
466
467 pdata->num_regulators = ARRAY_SIZE(regulators);
468 rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
469 pdata->num_regulators, GFP_KERNEL);
470 if (!rdata) {
471 of_node_put(regulators_np);
472 return -ENOMEM;
473 }
474
475 for (i = 0; i < pdata->num_regulators; i++) {
476 rmatch.name = regulators[i].name;
477 rmatch.init_data = NULL;
478 rmatch.of_node = NULL;
479 if (of_regulator_match(&pdev->dev, regulators_np, &rmatch,
480 1) != 1) {
481 dev_warn(&pdev->dev, "No matching regulator for '%s'\n",
482 rmatch.name);
483 continue;
484 }
485 rdata[i].initdata = rmatch.init_data;
486 rdata[i].of_node = rmatch.of_node;
487 rdata[i].id = regulators[i].id;
488 }
489
490 pdata->regulators = rdata;
491 of_node_put(regulators_np);
492
493 return 0;
494}
495#else
496static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
497 struct max77686_platform_data *pdata)
498{
499 return 0;
500}
501#endif /* CONFIG_OF */
502
503static int max77802_pmic_probe(struct platform_device *pdev) 535static int max77802_pmic_probe(struct platform_device *pdev)
504{ 536{
505 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 537 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
506 struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
507 struct max77802_regulator_prv *max77802; 538 struct max77802_regulator_prv *max77802;
508 int i, ret = 0, val; 539 int i, val;
509 struct regulator_config config = { }; 540 struct regulator_config config = { };
510 541
511 /* This is allocated by the MFD driver */
512 if (!pdata) {
513 dev_err(&pdev->dev, "no platform data found for regulator\n");
514 return -ENODEV;
515 }
516
517 max77802 = devm_kzalloc(&pdev->dev, 542 max77802 = devm_kzalloc(&pdev->dev,
518 sizeof(struct max77802_regulator_prv), 543 sizeof(struct max77802_regulator_prv),
519 GFP_KERNEL); 544 GFP_KERNEL);
520 if (!max77802) 545 if (!max77802)
521 return -ENOMEM; 546 return -ENOMEM;
522 547
523 if (iodev->dev->of_node) {
524 ret = max77802_pmic_dt_parse_pdata(pdev, pdata);
525 if (ret)
526 return ret;
527 }
528
529 config.dev = iodev->dev; 548 config.dev = iodev->dev;
530 config.regmap = iodev->regmap; 549 config.regmap = iodev->regmap;
531 config.driver_data = max77802; 550 config.driver_data = max77802;
@@ -533,21 +552,25 @@ static int max77802_pmic_probe(struct platform_device *pdev)
533 552
534 for (i = 0; i < MAX77802_REG_MAX; i++) { 553 for (i = 0; i < MAX77802_REG_MAX; i++) {
535 struct regulator_dev *rdev; 554 struct regulator_dev *rdev;
536 int id = pdata->regulators[i].id; 555 int id = regulators[i].id;
537 int shift = max77802_get_opmode_shift(id); 556 int shift = max77802_get_opmode_shift(id);
538 557 int ret;
539 config.init_data = pdata->regulators[i].initdata;
540 config.of_node = pdata->regulators[i].of_node;
541 558
542 ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val); 559 ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val);
543 val = val >> shift & MAX77802_OPMODE_MASK; 560 if (ret < 0) {
561 dev_warn(&pdev->dev,
562 "cannot read current mode for %d\n", i);
563 val = MAX77802_OPMODE_NORMAL;
564 } else {
565 val = val >> shift & MAX77802_OPMODE_MASK;
566 }
544 567
545 /* 568 /*
546 * If the regulator is disabled and the system warm rebooted, 569 * If the regulator is disabled and the system warm rebooted,
547 * the hardware reports OFF as the regulator operating mode. 570 * the hardware reports OFF as the regulator operating mode.
548 * Default to operating mode NORMAL in that case. 571 * Default to operating mode NORMAL in that case.
549 */ 572 */
550 if (val == MAX77802_OPMODE_OFF) 573 if (val == MAX77802_STATUS_OFF)
551 max77802->opmode[id] = MAX77802_OPMODE_NORMAL; 574 max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
552 else 575 else
553 max77802->opmode[id] = val; 576 max77802->opmode[id] = val;
@@ -555,9 +578,10 @@ static int max77802_pmic_probe(struct platform_device *pdev)
555 rdev = devm_regulator_register(&pdev->dev, 578 rdev = devm_regulator_register(&pdev->dev,
556 &regulators[i], &config); 579 &regulators[i], &config);
557 if (IS_ERR(rdev)) { 580 if (IS_ERR(rdev)) {
581 ret = PTR_ERR(rdev);
558 dev_err(&pdev->dev, 582 dev_err(&pdev->dev,
559 "regulator init failed for %d\n", i); 583 "regulator init failed for %d: %d\n", i, ret);
560 return PTR_ERR(rdev); 584 return ret;
561 } 585 }
562 } 586 }
563 587
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index f7f9efcfedb7..1af8f4a2ab86 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -174,7 +174,7 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
174 if (of_property_read_u32(np, "max8952,ramp-speed", &pd->ramp_speed)) 174 if (of_property_read_u32(np, "max8952,ramp-speed", &pd->ramp_speed))
175 dev_warn(dev, "max8952,ramp-speed property not specified, defaulting to 32mV/us\n"); 175 dev_warn(dev, "max8952,ramp-speed property not specified, defaulting to 32mV/us\n");
176 176
177 pd->reg_data = of_get_regulator_init_data(dev, np); 177 pd->reg_data = of_get_regulator_init_data(dev, np, &regulator);
178 if (!pd->reg_data) { 178 if (!pd->reg_data) {
179 dev_err(dev, "Failed to parse regulator init data\n"); 179 dev_err(dev, "Failed to parse regulator init data\n");
180 return NULL; 180 return NULL;
@@ -225,6 +225,8 @@ static int max8952_pmic_probe(struct i2c_client *client,
225 config.of_node = client->dev.of_node; 225 config.of_node = client->dev.of_node;
226 226
227 config.ena_gpio = pdata->gpio_en; 227 config.ena_gpio = pdata->gpio_en;
228 if (client->dev.of_node)
229 config.ena_gpio_initialized = true;
228 if (pdata->reg_data->constraints.boot_on) 230 if (pdata->reg_data->constraints.boot_on)
229 config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; 231 config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
230 232
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index dbedf1768db0..c3d55c2db593 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -458,7 +458,8 @@ static int max8973_probe(struct i2c_client *client,
458 458
459 config.dev = &client->dev; 459 config.dev = &client->dev;
460 config.init_data = pdata ? pdata->reg_init_data : 460 config.init_data = pdata ? pdata->reg_init_data :
461 of_get_regulator_init_data(&client->dev, client->dev.of_node); 461 of_get_regulator_init_data(&client->dev, client->dev.of_node,
462 &max->desc);
462 config.driver_data = max; 463 config.driver_data = max;
463 config.of_node = client->dev.of_node; 464 config.of_node = client->dev.of_node;
464 config.regmap = max->regmap; 465 config.regmap = max->regmap;
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 9c31e215a521..726fde1d883e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -953,7 +953,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
953 953
954 rdata->id = i; 954 rdata->id = i;
955 rdata->initdata = of_get_regulator_init_data(&pdev->dev, 955 rdata->initdata = of_get_regulator_init_data(&pdev->dev,
956 reg_np); 956 reg_np,
957 &regulators[i]);
957 rdata->reg_node = reg_np; 958 rdata->reg_node = reg_np;
958 rdata++; 959 rdata++;
959 } 960 }
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 961091b46557..59e34a05a4a2 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -686,8 +686,9 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
686 continue; 686 continue;
687 687
688 rdata->id = regulators[i].id; 688 rdata->id = regulators[i].id;
689 rdata->initdata = of_get_regulator_init_data( 689 rdata->initdata = of_get_regulator_init_data(iodev->dev,
690 iodev->dev, reg_np); 690 reg_np,
691 &regulators[i]);
691 rdata->reg_node = reg_np; 692 rdata->reg_node = reg_np;
692 ++rdata; 693 ++rdata;
693 } 694 }
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index afba024953e1..0281c31ae2ed 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -194,7 +194,8 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
194 regulators[i].desc.name)) { 194 regulators[i].desc.name)) {
195 p->id = i; 195 p->id = i;
196 p->init_data = of_get_regulator_init_data( 196 p->init_data = of_get_regulator_init_data(
197 &pdev->dev, child); 197 &pdev->dev, child,
198 &regulators[i].desc);
198 p->node = child; 199 p->node = child;
199 p++; 200 p++;
200 201
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 5a1d4afa4776..91eaaf010524 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -19,12 +19,20 @@
19 19
20#include "internal.h" 20#include "internal.h"
21 21
22static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
23 [PM_SUSPEND_MEM] = "regulator-state-mem",
24 [PM_SUSPEND_MAX] = "regulator-state-disk",
25};
26
22static void of_get_regulation_constraints(struct device_node *np, 27static void of_get_regulation_constraints(struct device_node *np,
23 struct regulator_init_data **init_data) 28 struct regulator_init_data **init_data,
29 const struct regulator_desc *desc)
24{ 30{
25 const __be32 *min_uV, *max_uV; 31 const __be32 *min_uV, *max_uV;
26 struct regulation_constraints *constraints = &(*init_data)->constraints; 32 struct regulation_constraints *constraints = &(*init_data)->constraints;
27 int ret; 33 struct regulator_state *suspend_state;
34 struct device_node *suspend_np;
35 int ret, i;
28 u32 pval; 36 u32 pval;
29 37
30 constraints->name = of_get_property(np, "regulator-name", NULL); 38 constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -73,18 +81,84 @@ static void of_get_regulation_constraints(struct device_node *np,
73 ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval); 81 ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
74 if (!ret) 82 if (!ret)
75 constraints->enable_time = pval; 83 constraints->enable_time = pval;
84
85 if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
86 if (desc && desc->of_map_mode) {
87 ret = desc->of_map_mode(pval);
88 if (ret == -EINVAL)
89 pr_err("%s: invalid mode %u\n", np->name, pval);
90 else
91 constraints->initial_mode = ret;
92 } else {
93 pr_warn("%s: mapping for mode %d not defined\n",
94 np->name, pval);
95 }
96 }
97
98 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
99 switch (i) {
100 case PM_SUSPEND_MEM:
101 suspend_state = &constraints->state_mem;
102 break;
103 case PM_SUSPEND_MAX:
104 suspend_state = &constraints->state_disk;
105 break;
106 case PM_SUSPEND_ON:
107 case PM_SUSPEND_FREEZE:
108 case PM_SUSPEND_STANDBY:
109 default:
110 continue;
111 };
112
113 suspend_np = of_get_child_by_name(np, regulator_states[i]);
114 if (!suspend_np || !suspend_state)
115 continue;
116
117 if (!of_property_read_u32(suspend_np, "regulator-mode",
118 &pval)) {
119 if (desc && desc->of_map_mode) {
120 ret = desc->of_map_mode(pval);
121 if (ret == -EINVAL)
122 pr_err("%s: invalid mode %u\n",
123 np->name, pval);
124 else
125 suspend_state->mode = ret;
126 } else {
127 pr_warn("%s: mapping for mode %d not defined\n",
128 np->name, pval);
129 }
130 }
131
132 if (of_property_read_bool(suspend_np,
133 "regulator-on-in-suspend"))
134 suspend_state->enabled = true;
135 else if (of_property_read_bool(suspend_np,
136 "regulator-off-in-suspend"))
137 suspend_state->disabled = true;
138
139 if (!of_property_read_u32(suspend_np,
140 "regulator-suspend-microvolt", &pval))
141 suspend_state->uV = pval;
142
143 of_node_put(suspend_np);
144 suspend_state = NULL;
145 suspend_np = NULL;
146 }
76} 147}
77 148
78/** 149/**
79 * of_get_regulator_init_data - extract regulator_init_data structure info 150 * of_get_regulator_init_data - extract regulator_init_data structure info
80 * @dev: device requesting for regulator_init_data 151 * @dev: device requesting for regulator_init_data
152 * @node: regulator device node
153 * @desc: regulator description
81 * 154 *
82 * Populates regulator_init_data structure by extracting data from device 155 * Populates regulator_init_data structure by extracting data from device
83 * tree node, returns a pointer to the populated struture or NULL if memory 156 * tree node, returns a pointer to the populated struture or NULL if memory
84 * alloc fails. 157 * alloc fails.
85 */ 158 */
86struct regulator_init_data *of_get_regulator_init_data(struct device *dev, 159struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
87 struct device_node *node) 160 struct device_node *node,
161 const struct regulator_desc *desc)
88{ 162{
89 struct regulator_init_data *init_data; 163 struct regulator_init_data *init_data;
90 164
@@ -95,7 +169,7 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
95 if (!init_data) 169 if (!init_data)
96 return NULL; /* Out of memory? */ 170 return NULL; /* Out of memory? */
97 171
98 of_get_regulation_constraints(node, &init_data); 172 of_get_regulation_constraints(node, &init_data, desc);
99 return init_data; 173 return init_data;
100} 174}
101EXPORT_SYMBOL_GPL(of_get_regulator_init_data); 175EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
@@ -176,7 +250,8 @@ int of_regulator_match(struct device *dev, struct device_node *node,
176 continue; 250 continue;
177 251
178 match->init_data = 252 match->init_data =
179 of_get_regulator_init_data(dev, child); 253 of_get_regulator_init_data(dev, child,
254 match->desc);
180 if (!match->init_data) { 255 if (!match->init_data) {
181 dev_err(dev, 256 dev_err(dev,
182 "failed to parse DT for regulator %s\n", 257 "failed to parse DT for regulator %s\n",
@@ -224,7 +299,7 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
224 if (strcmp(desc->of_match, name)) 299 if (strcmp(desc->of_match, name))
225 continue; 300 continue;
226 301
227 init_data = of_get_regulator_init_data(dev, child); 302 init_data = of_get_regulator_init_data(dev, child, desc);
228 if (!init_data) { 303 if (!init_data) {
229 dev_err(dev, 304 dev_err(dev,
230 "failed to parse DT for regulator %s\n", 305 "failed to parse DT for regulator %s\n",
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index d3f55eaea058..91f34ca3a9ac 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -149,7 +149,8 @@ static int pwm_regulator_probe(struct platform_device *pdev)
149 return ret; 149 return ret;
150 } 150 }
151 151
152 config.init_data = of_get_regulator_init_data(&pdev->dev, np); 152 config.init_data = of_get_regulator_init_data(&pdev->dev, np,
153 &drvdata->desc);
153 if (!config.init_data) 154 if (!config.init_data)
154 return -ENOMEM; 155 return -ENOMEM;
155 156
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index b55cd5b50ebe..183598b111f9 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -183,6 +183,13 @@ static const struct regulator_linear_range ftsmps_ranges[] = {
183 REGULATOR_LINEAR_RANGE(1500000, 64, 100, 50000), 183 REGULATOR_LINEAR_RANGE(1500000, 64, 100, 50000),
184}; 184};
185 185
186static const struct regulator_linear_range smb208_ranges[] = {
187 REGULATOR_LINEAR_RANGE( 375000, 0, 29, 12500),
188 REGULATOR_LINEAR_RANGE( 750000, 30, 89, 12500),
189 REGULATOR_LINEAR_RANGE(1500000, 90, 153, 25000),
190 REGULATOR_LINEAR_RANGE(3100000, 154, 234, 25000),
191};
192
186static const struct regulator_linear_range ncp_ranges[] = { 193static const struct regulator_linear_range ncp_ranges[] = {
187 REGULATOR_LINEAR_RANGE(1500000, 0, 31, 50000), 194 REGULATOR_LINEAR_RANGE(1500000, 0, 31, 50000),
188}; 195};
@@ -559,6 +566,16 @@ static const struct qcom_rpm_reg pm8921_switch = {
559 .parts = &rpm8960_switch_parts, 566 .parts = &rpm8960_switch_parts,
560}; 567};
561 568
569static const struct qcom_rpm_reg smb208_smps = {
570 .desc.linear_ranges = smb208_ranges,
571 .desc.n_linear_ranges = ARRAY_SIZE(smb208_ranges),
572 .desc.n_voltages = 235,
573 .desc.ops = &uV_ops,
574 .parts = &rpm8960_smps_parts,
575 .supports_force_mode_auto = false,
576 .supports_force_mode_bypass = false,
577};
578
562static const struct of_device_id rpm_of_match[] = { 579static const struct of_device_id rpm_of_match[] = {
563 { .compatible = "qcom,rpm-pm8058-pldo", .data = &pm8058_pldo }, 580 { .compatible = "qcom,rpm-pm8058-pldo", .data = &pm8058_pldo },
564 { .compatible = "qcom,rpm-pm8058-nldo", .data = &pm8058_nldo }, 581 { .compatible = "qcom,rpm-pm8058-nldo", .data = &pm8058_nldo },
@@ -578,6 +595,8 @@ static const struct of_device_id rpm_of_match[] = {
578 { .compatible = "qcom,rpm-pm8921-ftsmps", .data = &pm8921_ftsmps }, 595 { .compatible = "qcom,rpm-pm8921-ftsmps", .data = &pm8921_ftsmps },
579 { .compatible = "qcom,rpm-pm8921-ncp", .data = &pm8921_ncp }, 596 { .compatible = "qcom,rpm-pm8921-ncp", .data = &pm8921_ncp },
580 { .compatible = "qcom,rpm-pm8921-switch", .data = &pm8921_switch }, 597 { .compatible = "qcom,rpm-pm8921-switch", .data = &pm8921_switch },
598
599 { .compatible = "qcom,rpm-smb208", .data = &smb208_smps },
581 { } 600 { }
582}; 601};
583MODULE_DEVICE_TABLE(of, rpm_of_match); 602MODULE_DEVICE_TABLE(of, rpm_of_match);
@@ -643,10 +662,6 @@ static int rpm_reg_probe(struct platform_device *pdev)
643 match = of_match_device(rpm_of_match, &pdev->dev); 662 match = of_match_device(rpm_of_match, &pdev->dev);
644 template = match->data; 663 template = match->data;
645 664
646 initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
647 if (!initdata)
648 return -EINVAL;
649
650 vreg = devm_kmalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); 665 vreg = devm_kmalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
651 if (!vreg) { 666 if (!vreg) {
652 dev_err(&pdev->dev, "failed to allocate vreg\n"); 667 dev_err(&pdev->dev, "failed to allocate vreg\n");
@@ -666,6 +681,11 @@ static int rpm_reg_probe(struct platform_device *pdev)
666 return -ENODEV; 681 return -ENODEV;
667 } 682 }
668 683
684 initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
685 &vreg->desc);
686 if (!initdata)
687 return -EINVAL;
688
669 key = "reg"; 689 key = "reg";
670 ret = of_property_read_u32(pdev->dev.of_node, key, &val); 690 ret = of_property_read_u32(pdev->dev.of_node, key, &val);
671 if (ret) { 691 if (ret) {
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 196a5c8838c4..ea9d05eabd0a 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -36,6 +36,12 @@
36#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET) 36#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET)
37#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET) 37#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET)
38 38
39/* Offset from XXX_ON_VSEL to XXX_SLP_VSEL */
40#define RK808_SLP_REG_OFFSET 1
41
42/* Offset from XXX_EN_REG to SLEEP_SET_OFF_XXX */
43#define RK808_SLP_SET_OFF_REG_OFFSET 2
44
39static const int rk808_buck_config_regs[] = { 45static const int rk808_buck_config_regs[] = {
40 RK808_BUCK1_CONFIG_REG, 46 RK808_BUCK1_CONFIG_REG,
41 RK808_BUCK2_CONFIG_REG, 47 RK808_BUCK2_CONFIG_REG,
@@ -91,6 +97,43 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
91 RK808_RAMP_RATE_MASK, ramp_value); 97 RK808_RAMP_RATE_MASK, ramp_value);
92} 98}
93 99
100int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
101{
102 unsigned int reg;
103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
104
105 if (sel < 0)
106 return -EINVAL;
107
108 reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
109
110 return regmap_update_bits(rdev->regmap, reg,
111 rdev->desc->vsel_mask,
112 sel);
113}
114
115int rk808_set_suspend_enable(struct regulator_dev *rdev)
116{
117 unsigned int reg;
118
119 reg = rdev->desc->enable_reg + RK808_SLP_SET_OFF_REG_OFFSET;
120
121 return regmap_update_bits(rdev->regmap, reg,
122 rdev->desc->enable_mask,
123 0);
124}
125
126int rk808_set_suspend_disable(struct regulator_dev *rdev)
127{
128 unsigned int reg;
129
130 reg = rdev->desc->enable_reg + RK808_SLP_SET_OFF_REG_OFFSET;
131
132 return regmap_update_bits(rdev->regmap, reg,
133 rdev->desc->enable_mask,
134 rdev->desc->enable_mask);
135}
136
94static struct regulator_ops rk808_buck1_2_ops = { 137static struct regulator_ops rk808_buck1_2_ops = {
95 .list_voltage = regulator_list_voltage_linear_range, 138 .list_voltage = regulator_list_voltage_linear_range,
96 .map_voltage = regulator_map_voltage_linear_range, 139 .map_voltage = regulator_map_voltage_linear_range,
@@ -100,6 +143,9 @@ static struct regulator_ops rk808_buck1_2_ops = {
100 .disable = regulator_disable_regmap, 143 .disable = regulator_disable_regmap,
101 .is_enabled = regulator_is_enabled_regmap, 144 .is_enabled = regulator_is_enabled_regmap,
102 .set_ramp_delay = rk808_set_ramp_delay, 145 .set_ramp_delay = rk808_set_ramp_delay,
146 .set_suspend_voltage = rk808_set_suspend_voltage,
147 .set_suspend_enable = rk808_set_suspend_enable,
148 .set_suspend_disable = rk808_set_suspend_disable,
103}; 149};
104 150
105static struct regulator_ops rk808_reg_ops = { 151static struct regulator_ops rk808_reg_ops = {
@@ -110,12 +156,17 @@ static struct regulator_ops rk808_reg_ops = {
110 .enable = regulator_enable_regmap, 156 .enable = regulator_enable_regmap,
111 .disable = regulator_disable_regmap, 157 .disable = regulator_disable_regmap,
112 .is_enabled = regulator_is_enabled_regmap, 158 .is_enabled = regulator_is_enabled_regmap,
159 .set_suspend_voltage = rk808_set_suspend_voltage,
160 .set_suspend_enable = rk808_set_suspend_enable,
161 .set_suspend_disable = rk808_set_suspend_disable,
113}; 162};
114 163
115static struct regulator_ops rk808_switch_ops = { 164static struct regulator_ops rk808_switch_ops = {
116 .enable = regulator_enable_regmap, 165 .enable = regulator_enable_regmap,
117 .disable = regulator_disable_regmap, 166 .disable = regulator_disable_regmap,
118 .is_enabled = regulator_is_enabled_regmap, 167 .is_enabled = regulator_is_enabled_regmap,
168 .set_suspend_enable = rk808_set_suspend_enable,
169 .set_suspend_disable = rk808_set_suspend_disable,
119}; 170};
120 171
121static const struct regulator_desc rk808_reg[] = { 172static const struct regulator_desc rk808_reg[] = {
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index e58d79aeb393..b85ceb8ff911 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -31,6 +31,8 @@ static struct regulator_ops rn5t618_reg_ops = {
31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ 31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
32 [RN5T618_##rid] = { \ 32 [RN5T618_##rid] = { \
33 .name = #rid, \ 33 .name = #rid, \
34 .of_match = of_match_ptr(#rid), \
35 .regulators_node = of_match_ptr("regulators"), \
34 .id = RN5T618_##rid, \ 36 .id = RN5T618_##rid, \
35 .type = REGULATOR_VOLTAGE, \ 37 .type = REGULATOR_VOLTAGE, \
36 .owner = THIS_MODULE, \ 38 .owner = THIS_MODULE, \
@@ -60,60 +62,15 @@ static struct regulator_desc rn5t618_regulators[] = {
60 REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000), 62 REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
61}; 63};
62 64
63static struct of_regulator_match rn5t618_matches[] = {
64 [RN5T618_DCDC1] = { .name = "DCDC1" },
65 [RN5T618_DCDC2] = { .name = "DCDC2" },
66 [RN5T618_DCDC3] = { .name = "DCDC3" },
67 [RN5T618_LDO1] = { .name = "LDO1" },
68 [RN5T618_LDO2] = { .name = "LDO2" },
69 [RN5T618_LDO3] = { .name = "LDO3" },
70 [RN5T618_LDO4] = { .name = "LDO4" },
71 [RN5T618_LDO5] = { .name = "LDO5" },
72 [RN5T618_LDORTC1] = { .name = "LDORTC1" },
73 [RN5T618_LDORTC2] = { .name = "LDORTC2" },
74};
75
76static int rn5t618_regulator_parse_dt(struct platform_device *pdev)
77{
78 struct device_node *np, *regulators;
79 int ret;
80
81 np = of_node_get(pdev->dev.parent->of_node);
82 if (!np)
83 return 0;
84
85 regulators = of_get_child_by_name(np, "regulators");
86 if (!regulators) {
87 dev_err(&pdev->dev, "regulators node not found\n");
88 return -EINVAL;
89 }
90
91 ret = of_regulator_match(&pdev->dev, regulators, rn5t618_matches,
92 ARRAY_SIZE(rn5t618_matches));
93 of_node_put(regulators);
94 if (ret < 0) {
95 dev_err(&pdev->dev, "error parsing regulator init data: %d\n",
96 ret);
97 }
98
99 return 0;
100}
101
102static int rn5t618_regulator_probe(struct platform_device *pdev) 65static int rn5t618_regulator_probe(struct platform_device *pdev)
103{ 66{
104 struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent); 67 struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
105 struct regulator_config config = { }; 68 struct regulator_config config = { };
106 struct regulator_dev *rdev; 69 struct regulator_dev *rdev;
107 int ret, i; 70 int i;
108
109 ret = rn5t618_regulator_parse_dt(pdev);
110 if (ret)
111 return ret;
112 71
113 for (i = 0; i < RN5T618_REG_NUM; i++) { 72 for (i = 0; i < RN5T618_REG_NUM; i++) {
114 config.dev = &pdev->dev; 73 config.dev = pdev->dev.parent;
115 config.init_data = rn5t618_matches[i].init_data;
116 config.of_node = rn5t618_matches[i].of_node;
117 config.regmap = rn5t618->regmap; 74 config.regmap = rn5t618->regmap;
118 75
119 rdev = devm_regulator_register(&pdev->dev, 76 rdev = devm_regulator_register(&pdev->dev,
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
new file mode 100644
index 000000000000..870cc49438db
--- /dev/null
+++ b/drivers/regulator/rt5033-regulator.c
@@ -0,0 +1,123 @@
1/*
2 * Regulator driver for the Richtek RT5033
3 *
4 * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
5 * Author: Beomho Seo <beomho.seo@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published bythe Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/regulator/driver.h>
15#include <linux/mfd/rt5033.h>
16#include <linux/mfd/rt5033-private.h>
17#include <linux/regulator/of_regulator.h>
18
19static struct regulator_ops rt5033_safe_ldo_ops = {
20 .is_enabled = regulator_is_enabled_regmap,
21 .enable = regulator_enable_regmap,
22 .disable = regulator_disable_regmap,
23 .list_voltage = regulator_list_voltage_linear,
24};
25
26static struct regulator_ops rt5033_buck_ops = {
27 .is_enabled = regulator_is_enabled_regmap,
28 .enable = regulator_enable_regmap,
29 .disable = regulator_disable_regmap,
30 .list_voltage = regulator_list_voltage_linear,
31 .map_voltage = regulator_map_voltage_linear,
32 .get_voltage_sel = regulator_get_voltage_sel_regmap,
33 .set_voltage_sel = regulator_set_voltage_sel_regmap,
34};
35
36static const struct regulator_desc rt5033_supported_regulators[] = {
37 [RT5033_BUCK] = {
38 .name = "BUCK",
39 .id = RT5033_BUCK,
40 .ops = &rt5033_buck_ops,
41 .type = REGULATOR_VOLTAGE,
42 .owner = THIS_MODULE,
43 .n_voltages = RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM,
44 .min_uV = RT5033_REGULATOR_BUCK_VOLTAGE_MIN,
45 .uV_step = RT5033_REGULATOR_BUCK_VOLTAGE_STEP,
46 .enable_reg = RT5033_REG_CTRL,
47 .enable_mask = RT5033_CTRL_EN_BUCK_MASK,
48 .vsel_reg = RT5033_REG_BUCK_CTRL,
49 .vsel_mask = RT5033_BUCK_CTRL_MASK,
50 },
51 [RT5033_LDO] = {
52 .name = "LDO",
53 .id = RT5033_LDO,
54 .ops = &rt5033_buck_ops,
55 .type = REGULATOR_VOLTAGE,
56 .owner = THIS_MODULE,
57 .n_voltages = RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM,
58 .min_uV = RT5033_REGULATOR_LDO_VOLTAGE_MIN,
59 .uV_step = RT5033_REGULATOR_LDO_VOLTAGE_STEP,
60 .enable_reg = RT5033_REG_CTRL,
61 .enable_mask = RT5033_CTRL_EN_LDO_MASK,
62 .vsel_reg = RT5033_REG_LDO_CTRL,
63 .vsel_mask = RT5033_LDO_CTRL_MASK,
64 },
65 [RT5033_SAFE_LDO] = {
66 .name = "SAFE_LDO",
67 .id = RT5033_SAFE_LDO,
68 .ops = &rt5033_safe_ldo_ops,
69 .type = REGULATOR_VOLTAGE,
70 .owner = THIS_MODULE,
71 .n_voltages = 1,
72 .min_uV = RT5033_REGULATOR_SAFE_LDO_VOLTAGE,
73 .enable_reg = RT5033_REG_CTRL,
74 .enable_mask = RT5033_CTRL_EN_SAFE_LDO_MASK,
75 },
76};
77
78static int rt5033_regulator_probe(struct platform_device *pdev)
79{
80 struct rt5033_dev *rt5033 = dev_get_drvdata(pdev->dev.parent);
81 int ret, i;
82 struct regulator_config config = {};
83
84 config.dev = &pdev->dev;
85 config.driver_data = rt5033;
86
87 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
88 struct regulator_dev *regulator;
89
90 config.regmap = rt5033->regmap;
91
92 regulator = devm_regulator_register(&pdev->dev,
93 &rt5033_supported_regulators[i], &config);
94 if (IS_ERR(regulator)) {
95 ret = PTR_ERR(regulator);
96 dev_err(&pdev->dev,
97 "Regulator init failed %d: with error: %d\n",
98 i, ret);
99 return ret;
100 }
101 }
102
103 return 0;
104}
105
106static const struct platform_device_id rt5033_regulator_id[] = {
107 { "rt5033-regulator", },
108 { }
109};
110MODULE_DEVICE_TABLE(platform, rt5033_regulator_id);
111
112static struct platform_driver rt5033_regulator_driver = {
113 .driver = {
114 .name = "rt5033-regulator",
115 },
116 .probe = rt5033_regulator_probe,
117 .id_table = rt5033_regulator_id,
118};
119module_platform_driver(rt5033_regulator_driver);
120
121MODULE_DESCRIPTION("Richtek RT5033 Regulator driver");
122MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
123MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 7633b9bfbe6e..5db4e12a7e04 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -298,7 +298,7 @@ static struct regulator_ops s2mpa01_buck_ops = {
298 .enable_mask = S2MPA01_ENABLE_MASK \ 298 .enable_mask = S2MPA01_ENABLE_MASK \
299} 299}
300 300
301static struct regulator_desc regulators[] = { 301static const struct regulator_desc regulators[] = {
302 regulator_desc_ldo(1, STEP_25_MV), 302 regulator_desc_ldo(1, STEP_25_MV),
303 regulator_desc_ldo(2, STEP_50_MV), 303 regulator_desc_ldo(2, STEP_50_MV),
304 regulator_desc_ldo(3, STEP_50_MV), 304 regulator_desc_ldo(3, STEP_50_MV),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index adab82d5279f..b345cf51225a 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -30,6 +30,7 @@
30#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
31#include <linux/mfd/samsung/core.h> 31#include <linux/mfd/samsung/core.h>
32#include <linux/mfd/samsung/s2mps11.h> 32#include <linux/mfd/samsung/s2mps11.h>
33#include <linux/mfd/samsung/s2mps13.h>
33#include <linux/mfd/samsung/s2mps14.h> 34#include <linux/mfd/samsung/s2mps14.h>
34#include <linux/mfd/samsung/s2mpu02.h> 35#include <linux/mfd/samsung/s2mpu02.h>
35 36
@@ -45,10 +46,10 @@ struct s2mps11_info {
45 enum sec_device_type dev_type; 46 enum sec_device_type dev_type;
46 47
47 /* 48 /*
48 * One bit for each S2MPS14/S2MPU02 regulator whether the suspend mode 49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
49 * was enabled. 50 * the suspend mode was enabled.
50 */ 51 */
51 unsigned long long s2mps14_suspend_state:35; 52 unsigned long long s2mps14_suspend_state:50;
52 53
53 /* Array of size rdev_num with GPIO-s for external sleep control */ 54 /* Array of size rdev_num with GPIO-s for external sleep control */
54 int *ext_control_gpio; 55 int *ext_control_gpio;
@@ -369,12 +370,101 @@ static const struct regulator_desc s2mps11_regulators[] = {
369 regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV), 370 regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
370}; 371};
371 372
373static struct regulator_ops s2mps14_reg_ops;
374
375#define regulator_desc_s2mps13_ldo(num, min, step, min_sel) { \
376 .name = "LDO"#num, \
377 .id = S2MPS13_LDO##num, \
378 .ops = &s2mps14_reg_ops, \
379 .type = REGULATOR_VOLTAGE, \
380 .owner = THIS_MODULE, \
381 .min_uV = min, \
382 .uV_step = step, \
383 .linear_min_sel = min_sel, \
384 .n_voltages = S2MPS14_LDO_N_VOLTAGES, \
385 .vsel_reg = S2MPS13_REG_L1CTRL + num - 1, \
386 .vsel_mask = S2MPS14_LDO_VSEL_MASK, \
387 .enable_reg = S2MPS13_REG_L1CTRL + num - 1, \
388 .enable_mask = S2MPS14_ENABLE_MASK \
389}
390
391#define regulator_desc_s2mps13_buck(num, min, step, min_sel) { \
392 .name = "BUCK"#num, \
393 .id = S2MPS13_BUCK##num, \
394 .ops = &s2mps14_reg_ops, \
395 .type = REGULATOR_VOLTAGE, \
396 .owner = THIS_MODULE, \
397 .min_uV = min, \
398 .uV_step = step, \
399 .linear_min_sel = min_sel, \
400 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
401 .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
402 .vsel_reg = S2MPS13_REG_B1OUT + (num - 1) * 2, \
403 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
404 .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
405 .enable_mask = S2MPS14_ENABLE_MASK \
406}
407
408static const struct regulator_desc s2mps13_regulators[] = {
409 regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
410 regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
411 regulator_desc_s2mps13_ldo(3, MIN_1000_MV, STEP_25_MV, 0x08),
412 regulator_desc_s2mps13_ldo(4, MIN_800_MV, STEP_12_5_MV, 0x00),
413 regulator_desc_s2mps13_ldo(5, MIN_800_MV, STEP_12_5_MV, 0x00),
414 regulator_desc_s2mps13_ldo(6, MIN_800_MV, STEP_12_5_MV, 0x00),
415 regulator_desc_s2mps13_ldo(7, MIN_1000_MV, STEP_25_MV, 0x08),
416 regulator_desc_s2mps13_ldo(8, MIN_1000_MV, STEP_25_MV, 0x08),
417 regulator_desc_s2mps13_ldo(9, MIN_1000_MV, STEP_25_MV, 0x08),
418 regulator_desc_s2mps13_ldo(10, MIN_1400_MV, STEP_50_MV, 0x0C),
419 regulator_desc_s2mps13_ldo(11, MIN_800_MV, STEP_25_MV, 0x10),
420 regulator_desc_s2mps13_ldo(12, MIN_800_MV, STEP_25_MV, 0x10),
421 regulator_desc_s2mps13_ldo(13, MIN_800_MV, STEP_25_MV, 0x10),
422 regulator_desc_s2mps13_ldo(14, MIN_800_MV, STEP_12_5_MV, 0x00),
423 regulator_desc_s2mps13_ldo(15, MIN_800_MV, STEP_12_5_MV, 0x00),
424 regulator_desc_s2mps13_ldo(16, MIN_1400_MV, STEP_50_MV, 0x0C),
425 regulator_desc_s2mps13_ldo(17, MIN_1400_MV, STEP_50_MV, 0x0C),
426 regulator_desc_s2mps13_ldo(18, MIN_1000_MV, STEP_25_MV, 0x08),
427 regulator_desc_s2mps13_ldo(19, MIN_1000_MV, STEP_25_MV, 0x08),
428 regulator_desc_s2mps13_ldo(20, MIN_1400_MV, STEP_50_MV, 0x0C),
429 regulator_desc_s2mps13_ldo(21, MIN_1000_MV, STEP_25_MV, 0x08),
430 regulator_desc_s2mps13_ldo(22, MIN_1000_MV, STEP_25_MV, 0x08),
431 regulator_desc_s2mps13_ldo(23, MIN_800_MV, STEP_12_5_MV, 0x00),
432 regulator_desc_s2mps13_ldo(24, MIN_800_MV, STEP_12_5_MV, 0x00),
433 regulator_desc_s2mps13_ldo(25, MIN_1400_MV, STEP_50_MV, 0x0C),
434 regulator_desc_s2mps13_ldo(26, MIN_1400_MV, STEP_50_MV, 0x0C),
435 regulator_desc_s2mps13_ldo(27, MIN_1400_MV, STEP_50_MV, 0x0C),
436 regulator_desc_s2mps13_ldo(28, MIN_1000_MV, STEP_25_MV, 0x08),
437 regulator_desc_s2mps13_ldo(29, MIN_1400_MV, STEP_50_MV, 0x0C),
438 regulator_desc_s2mps13_ldo(30, MIN_1400_MV, STEP_50_MV, 0x0C),
439 regulator_desc_s2mps13_ldo(31, MIN_1000_MV, STEP_25_MV, 0x08),
440 regulator_desc_s2mps13_ldo(32, MIN_1000_MV, STEP_25_MV, 0x08),
441 regulator_desc_s2mps13_ldo(33, MIN_1400_MV, STEP_50_MV, 0x0C),
442 regulator_desc_s2mps13_ldo(34, MIN_1000_MV, STEP_25_MV, 0x08),
443 regulator_desc_s2mps13_ldo(35, MIN_1400_MV, STEP_50_MV, 0x0C),
444 regulator_desc_s2mps13_ldo(36, MIN_800_MV, STEP_12_5_MV, 0x00),
445 regulator_desc_s2mps13_ldo(37, MIN_1000_MV, STEP_25_MV, 0x08),
446 regulator_desc_s2mps13_ldo(38, MIN_1400_MV, STEP_50_MV, 0x0C),
447 regulator_desc_s2mps13_ldo(39, MIN_1000_MV, STEP_25_MV, 0x08),
448 regulator_desc_s2mps13_ldo(40, MIN_1400_MV, STEP_50_MV, 0x0C),
449 regulator_desc_s2mps13_buck(1, MIN_500_MV, STEP_6_25_MV, 0x10),
450 regulator_desc_s2mps13_buck(2, MIN_500_MV, STEP_6_25_MV, 0x10),
451 regulator_desc_s2mps13_buck(3, MIN_500_MV, STEP_6_25_MV, 0x10),
452 regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
453 regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
454 regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
455 regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10),
456 regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
457 regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
458 regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10),
459};
460
372static int s2mps14_regulator_enable(struct regulator_dev *rdev) 461static int s2mps14_regulator_enable(struct regulator_dev *rdev)
373{ 462{
374 struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev); 463 struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
375 unsigned int val; 464 unsigned int val;
376 465
377 switch (s2mps11->dev_type) { 466 switch (s2mps11->dev_type) {
467 case S2MPS13X:
378 case S2MPS14X: 468 case S2MPS14X:
379 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 469 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
380 val = S2MPS14_ENABLE_SUSPEND; 470 val = S2MPS14_ENABLE_SUSPEND;
@@ -406,6 +496,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
406 496
407 /* Below LDO should be always on or does not support suspend mode. */ 497 /* Below LDO should be always on or does not support suspend mode. */
408 switch (s2mps11->dev_type) { 498 switch (s2mps11->dev_type) {
499 case S2MPS13X:
409 case S2MPS14X: 500 case S2MPS14X:
410 switch (rdev_id) { 501 switch (rdev_id) {
411 case S2MPS14_LDO3: 502 case S2MPS14_LDO3:
@@ -831,6 +922,10 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
831 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 922 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
832 regulators = s2mps11_regulators; 923 regulators = s2mps11_regulators;
833 break; 924 break;
925 case S2MPS13X:
926 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
927 regulators = s2mps13_regulators;
928 break;
834 case S2MPS14X: 929 case S2MPS14X:
835 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 930 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
836 regulators = s2mps14_regulators; 931 regulators = s2mps14_regulators;
@@ -845,7 +940,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
845 return -EINVAL; 940 return -EINVAL;
846 }; 941 };
847 942
848 s2mps11->ext_control_gpio = devm_kzalloc(&pdev->dev, 943 s2mps11->ext_control_gpio = devm_kmalloc(&pdev->dev,
849 sizeof(*s2mps11->ext_control_gpio) * s2mps11->rdev_num, 944 sizeof(*s2mps11->ext_control_gpio) * s2mps11->rdev_num,
850 GFP_KERNEL); 945 GFP_KERNEL);
851 if (!s2mps11->ext_control_gpio) 946 if (!s2mps11->ext_control_gpio)
@@ -886,6 +981,7 @@ common_reg:
886 config.regmap = iodev->regmap_pmic; 981 config.regmap = iodev->regmap_pmic;
887 config.driver_data = s2mps11; 982 config.driver_data = s2mps11;
888 config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH; 983 config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
984 config.ena_gpio_initialized = true;
889 for (i = 0; i < s2mps11->rdev_num; i++) { 985 for (i = 0; i < s2mps11->rdev_num; i++) {
890 struct regulator_dev *regulator; 986 struct regulator_dev *regulator;
891 987
@@ -927,6 +1023,7 @@ out:
927 1023
928static const struct platform_device_id s2mps11_pmic_id[] = { 1024static const struct platform_device_id s2mps11_pmic_id[] = {
929 { "s2mps11-pmic", S2MPS11X}, 1025 { "s2mps11-pmic", S2MPS11X},
1026 { "s2mps13-pmic", S2MPS13X},
930 { "s2mps14-pmic", S2MPS14X}, 1027 { "s2mps14-pmic", S2MPS14X},
931 { "s2mpu02-pmic", S2MPU02}, 1028 { "s2mpu02-pmic", S2MPU02},
932 { }, 1029 { },
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 0ab5cbeeb797..dc1328c0c71c 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -581,7 +581,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
581 581
582 rdata->id = i; 582 rdata->id = i;
583 rdata->initdata = of_get_regulator_init_data( 583 rdata->initdata = of_get_regulator_init_data(
584 &pdev->dev, reg_np); 584 &pdev->dev, reg_np,
585 &regulators[i]);
585 rdata->reg_node = reg_np; 586 rdata->reg_node = reg_np;
586 rdata++; 587 rdata++;
587 rmode->id = i; 588 rmode->id = i;
@@ -950,6 +951,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
950 config.of_node = pdata->regulators[i].reg_node; 951 config.of_node = pdata->regulators[i].reg_node;
951 config.ena_gpio = -EINVAL; 952 config.ena_gpio = -EINVAL;
952 config.ena_gpio_flags = 0; 953 config.ena_gpio_flags = 0;
954 config.ena_gpio_initialized = true;
953 if (gpio_is_valid(pdata->regulators[i].ext_control_gpio)) 955 if (gpio_is_valid(pdata->regulators[i].ext_control_gpio))
954 s5m8767_regulator_config_ext_control(s5m8767, 956 s5m8767_regulator_config_ext_control(s5m8767,
955 &pdata->regulators[i], &config); 957 &pdata->regulators[i], &config);
diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c
index 97aff0ccd65f..647860611916 100644
--- a/drivers/regulator/sky81452-regulator.c
+++ b/drivers/regulator/sky81452-regulator.c
@@ -5,9 +5,8 @@
5 * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com> 5 * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License version 2
9 * Free Software Foundation; either version 2, or (at your option) any 9 * as published by the Free Software Foundation.
10 * later version.
11 * 10 *
12 * This program is distributed in the hope that it will be useful, but 11 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -52,6 +51,8 @@ static const struct regulator_linear_range sky81452_reg_ranges[] = {
52 51
53static const struct regulator_desc sky81452_reg = { 52static const struct regulator_desc sky81452_reg = {
54 .name = "LOUT", 53 .name = "LOUT",
54 .of_match = of_match_ptr("lout"),
55 .regulators_node = of_match_ptr("regulator"),
55 .ops = &sky81452_reg_ops, 56 .ops = &sky81452_reg_ops,
56 .type = REGULATOR_VOLTAGE, 57 .type = REGULATOR_VOLTAGE,
57 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
@@ -64,30 +65,6 @@ static const struct regulator_desc sky81452_reg = {
64 .enable_mask = SKY81452_LEN, 65 .enable_mask = SKY81452_LEN,
65}; 66};
66 67
67#ifdef CONFIG_OF
68static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev)
69{
70 struct regulator_init_data *init_data;
71 struct device_node *np;
72
73 np = of_get_child_by_name(dev->parent->of_node, "regulator");
74 if (unlikely(!np)) {
75 dev_err(dev, "regulator node not found");
76 return NULL;
77 }
78
79 init_data = of_get_regulator_init_data(dev, np);
80
81 of_node_put(np);
82 return init_data;
83}
84#else
85static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev)
86{
87 return ERR_PTR(-EINVAL);
88}
89#endif
90
91static int sky81452_reg_probe(struct platform_device *pdev) 68static int sky81452_reg_probe(struct platform_device *pdev)
92{ 69{
93 struct device *dev = &pdev->dev; 70 struct device *dev = &pdev->dev;
@@ -95,20 +72,16 @@ static int sky81452_reg_probe(struct platform_device *pdev)
95 struct regulator_config config = { }; 72 struct regulator_config config = { };
96 struct regulator_dev *rdev; 73 struct regulator_dev *rdev;
97 74
98 if (!init_data) { 75 config.dev = dev->parent;
99 init_data = sky81452_reg_parse_dt(dev);
100 if (IS_ERR(init_data))
101 return PTR_ERR(init_data);
102 }
103
104 config.dev = dev;
105 config.init_data = init_data; 76 config.init_data = init_data;
106 config.of_node = dev->of_node; 77 config.of_node = dev->of_node;
107 config.regmap = dev_get_drvdata(dev->parent); 78 config.regmap = dev_get_drvdata(dev->parent);
108 79
109 rdev = devm_regulator_register(dev, &sky81452_reg, &config); 80 rdev = devm_regulator_register(dev, &sky81452_reg, &config);
110 if (IS_ERR(rdev)) 81 if (IS_ERR(rdev)) {
82 dev_err(dev, "failed to register. err=%ld\n", PTR_ERR(rdev));
111 return PTR_ERR(rdev); 83 return PTR_ERR(rdev);
84 }
112 85
113 platform_set_drvdata(pdev, rdev); 86 platform_set_drvdata(pdev, rdev);
114 87
@@ -126,5 +99,4 @@ module_platform_driver(sky81452_reg_driver);
126 99
127MODULE_DESCRIPTION("Skyworks SKY81452 Regulator driver"); 100MODULE_DESCRIPTION("Skyworks SKY81452 Regulator driver");
128MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>"); 101MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>");
129MODULE_LICENSE("GPL"); 102MODULE_LICENSE("GPL v2");
130MODULE_VERSION("1.0");
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index a7e152696a02..b4f1696456a7 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -72,7 +72,8 @@ static int stw481x_vmmc_regulator_probe(struct platform_device *pdev)
72 config.regmap = stw481x->map; 72 config.regmap = stw481x->map;
73 config.of_node = pdev->dev.of_node; 73 config.of_node = pdev->dev.of_node;
74 config.init_data = of_get_regulator_init_data(&pdev->dev, 74 config.init_data = of_get_regulator_init_data(&pdev->dev,
75 pdev->dev.of_node); 75 pdev->dev.of_node,
76 &vmmc_regulator);
76 77
77 stw481x->vmmc_regulator = devm_regulator_register(&pdev->dev, 78 stw481x->vmmc_regulator = devm_regulator_register(&pdev->dev,
78 &vmmc_regulator, &config); 79 &vmmc_regulator, &config);
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index a2dabb575b97..1ef5aba96f17 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -837,7 +837,8 @@ skip_opt:
837 return -EINVAL; 837 return -EINVAL;
838 } 838 }
839 839
840 initdata = of_get_regulator_init_data(dev, pdev->dev.of_node); 840 initdata = of_get_regulator_init_data(dev, pdev->dev.of_node,
841 &abb->rdesc);
841 if (!initdata) { 842 if (!initdata) {
842 dev_err(dev, "%s: Unable to alloc regulator init data\n", 843 dev_err(dev, "%s: Unable to alloc regulator init data\n",
843 __func__); 844 __func__);
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index f31f22e3e1bd..c213e37eb69e 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -221,7 +221,8 @@ static const struct of_device_id tps51632_of_match[] = {
221MODULE_DEVICE_TABLE(of, tps51632_of_match); 221MODULE_DEVICE_TABLE(of, tps51632_of_match);
222 222
223static struct tps51632_regulator_platform_data * 223static struct tps51632_regulator_platform_data *
224 of_get_tps51632_platform_data(struct device *dev) 224 of_get_tps51632_platform_data(struct device *dev,
225 const struct regulator_desc *desc)
225{ 226{
226 struct tps51632_regulator_platform_data *pdata; 227 struct tps51632_regulator_platform_data *pdata;
227 struct device_node *np = dev->of_node; 228 struct device_node *np = dev->of_node;
@@ -230,7 +231,8 @@ static struct tps51632_regulator_platform_data *
230 if (!pdata) 231 if (!pdata)
231 return NULL; 232 return NULL;
232 233
233 pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node); 234 pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node,
235 desc);
234 if (!pdata->reg_init_data) { 236 if (!pdata->reg_init_data) {
235 dev_err(dev, "Not able to get OF regulator init data\n"); 237 dev_err(dev, "Not able to get OF regulator init data\n");
236 return NULL; 238 return NULL;
@@ -248,7 +250,8 @@ static struct tps51632_regulator_platform_data *
248} 250}
249#else 251#else
250static struct tps51632_regulator_platform_data * 252static struct tps51632_regulator_platform_data *
251 of_get_tps51632_platform_data(struct device *dev) 253 of_get_tps51632_platform_data(struct device *dev,
254 const struct regulator_desc *desc)
252{ 255{
253 return NULL; 256 return NULL;
254} 257}
@@ -273,9 +276,25 @@ static int tps51632_probe(struct i2c_client *client,
273 } 276 }
274 } 277 }
275 278
279 tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
280 if (!tps)
281 return -ENOMEM;
282
283 tps->dev = &client->dev;
284 tps->desc.name = client->name;
285 tps->desc.id = 0;
286 tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
287 tps->desc.min_uV = TPS51632_MIN_VOLTAGE;
288 tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV;
289 tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
290 tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
291 tps->desc.ops = &tps51632_dcdc_ops;
292 tps->desc.type = REGULATOR_VOLTAGE;
293 tps->desc.owner = THIS_MODULE;
294
276 pdata = dev_get_platdata(&client->dev); 295 pdata = dev_get_platdata(&client->dev);
277 if (!pdata && client->dev.of_node) 296 if (!pdata && client->dev.of_node)
278 pdata = of_get_tps51632_platform_data(&client->dev); 297 pdata = of_get_tps51632_platform_data(&client->dev, &tps->desc);
279 if (!pdata) { 298 if (!pdata) {
280 dev_err(&client->dev, "No Platform data\n"); 299 dev_err(&client->dev, "No Platform data\n");
281 return -EINVAL; 300 return -EINVAL;
@@ -296,22 +315,6 @@ static int tps51632_probe(struct i2c_client *client,
296 } 315 }
297 } 316 }
298 317
299 tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
300 if (!tps)
301 return -ENOMEM;
302
303 tps->dev = &client->dev;
304 tps->desc.name = client->name;
305 tps->desc.id = 0;
306 tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
307 tps->desc.min_uV = TPS51632_MIN_VOLTAGE;
308 tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV;
309 tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
310 tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
311 tps->desc.ops = &tps51632_dcdc_ops;
312 tps->desc.type = REGULATOR_VOLTAGE;
313 tps->desc.owner = THIS_MODULE;
314
315 if (pdata->enable_pwm_dvfs) 318 if (pdata->enable_pwm_dvfs)
316 tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG; 319 tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG;
317 else 320 else
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index a1672044e519..a1fd626c6c96 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -293,7 +293,8 @@ static const struct regmap_config tps62360_regmap_config = {
293}; 293};
294 294
295static struct tps62360_regulator_platform_data * 295static struct tps62360_regulator_platform_data *
296 of_get_tps62360_platform_data(struct device *dev) 296 of_get_tps62360_platform_data(struct device *dev,
297 const struct regulator_desc *desc)
297{ 298{
298 struct tps62360_regulator_platform_data *pdata; 299 struct tps62360_regulator_platform_data *pdata;
299 struct device_node *np = dev->of_node; 300 struct device_node *np = dev->of_node;
@@ -302,7 +303,8 @@ static struct tps62360_regulator_platform_data *
302 if (!pdata) 303 if (!pdata)
303 return NULL; 304 return NULL;
304 305
305 pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node); 306 pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node,
307 desc);
306 if (!pdata->reg_init_data) { 308 if (!pdata->reg_init_data) {
307 dev_err(dev, "Not able to get OF regulator init data\n"); 309 dev_err(dev, "Not able to get OF regulator init data\n");
308 return NULL; 310 return NULL;
@@ -350,6 +352,17 @@ static int tps62360_probe(struct i2c_client *client,
350 352
351 pdata = dev_get_platdata(&client->dev); 353 pdata = dev_get_platdata(&client->dev);
352 354
355 tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
356 if (!tps)
357 return -ENOMEM;
358
359 tps->desc.name = client->name;
360 tps->desc.id = 0;
361 tps->desc.ops = &tps62360_dcdc_ops;
362 tps->desc.type = REGULATOR_VOLTAGE;
363 tps->desc.owner = THIS_MODULE;
364 tps->desc.uV_step = 10000;
365
353 if (client->dev.of_node) { 366 if (client->dev.of_node) {
354 const struct of_device_id *match; 367 const struct of_device_id *match;
355 match = of_match_device(of_match_ptr(tps62360_of_match), 368 match = of_match_device(of_match_ptr(tps62360_of_match),
@@ -360,7 +373,8 @@ static int tps62360_probe(struct i2c_client *client,
360 } 373 }
361 chip_id = (int)(long)match->data; 374 chip_id = (int)(long)match->data;
362 if (!pdata) 375 if (!pdata)
363 pdata = of_get_tps62360_platform_data(&client->dev); 376 pdata = of_get_tps62360_platform_data(&client->dev,
377 &tps->desc);
364 } else if (id) { 378 } else if (id) {
365 chip_id = id->driver_data; 379 chip_id = id->driver_data;
366 } else { 380 } else {
@@ -374,10 +388,6 @@ static int tps62360_probe(struct i2c_client *client,
374 return -EIO; 388 return -EIO;
375 } 389 }
376 390
377 tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
378 if (!tps)
379 return -ENOMEM;
380
381 tps->en_discharge = pdata->en_discharge; 391 tps->en_discharge = pdata->en_discharge;
382 tps->en_internal_pulldn = pdata->en_internal_pulldn; 392 tps->en_internal_pulldn = pdata->en_internal_pulldn;
383 tps->vsel0_gpio = pdata->vsel0_gpio; 393 tps->vsel0_gpio = pdata->vsel0_gpio;
@@ -401,13 +411,6 @@ static int tps62360_probe(struct i2c_client *client,
401 return -ENODEV; 411 return -ENODEV;
402 } 412 }
403 413
404 tps->desc.name = client->name;
405 tps->desc.id = 0;
406 tps->desc.ops = &tps62360_dcdc_ops;
407 tps->desc.type = REGULATOR_VOLTAGE;
408 tps->desc.owner = THIS_MODULE;
409 tps->desc.uV_step = 10000;
410
411 tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config); 414 tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config);
412 if (IS_ERR(tps->regmap)) { 415 if (IS_ERR(tps->regmap)) {
413 ret = PTR_ERR(tps->regmap); 416 ret = PTR_ERR(tps->regmap);
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index d5df1e9ad1da..f1df4423d361 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -312,7 +312,11 @@ static void tps65090_configure_regulator_config(
312 gpio_flag = GPIOF_OUT_INIT_HIGH; 312 gpio_flag = GPIOF_OUT_INIT_HIGH;
313 313
314 config->ena_gpio = tps_pdata->gpio; 314 config->ena_gpio = tps_pdata->gpio;
315 config->ena_gpio_initialized = true;
315 config->ena_gpio_flags = gpio_flag; 316 config->ena_gpio_flags = gpio_flag;
317 } else {
318 config->ena_gpio = -EINVAL;
319 config->ena_gpio_initialized = false;
316 } 320 }
317} 321}
318 322
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index f0a40281b9c1..263cc85d6202 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -231,7 +231,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
231 231
232 template = match->data; 232 template = match->data;
233 id = template->id; 233 id = template->id;
234 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); 234 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
235 &regulators[id]);
235 236
236 platform_set_drvdata(pdev, tps); 237 platform_set_drvdata(pdev, tps);
237 238
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 0b4f8660fdb4..dd727bca1983 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1104,7 +1104,8 @@ static int twlreg_probe(struct platform_device *pdev)
1104 template = match->data; 1104 template = match->data;
1105 id = template->desc.id; 1105 id = template->desc.id;
1106 initdata = of_get_regulator_init_data(&pdev->dev, 1106 initdata = of_get_regulator_init_data(&pdev->dev,
1107 pdev->dev.of_node); 1107 pdev->dev.of_node,
1108 &template->desc);
1108 drvdata = NULL; 1109 drvdata = NULL;
1109 } else { 1110 } else {
1110 id = pdev->id; 1111 id = pdev->id;
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index 02e7267ccf92..5e7c789023a9 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -74,7 +74,8 @@ static int vexpress_regulator_probe(struct platform_device *pdev)
74 reg->desc.owner = THIS_MODULE; 74 reg->desc.owner = THIS_MODULE;
75 reg->desc.continuous_voltage_range = true; 75 reg->desc.continuous_voltage_range = true;
76 76
77 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); 77 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
78 &reg->desc);
78 if (!init_data) 79 if (!init_data)
79 return -EINVAL; 80 return -EINVAL;
80 81
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index c24346db8a71..88f5064e412b 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -145,10 +145,12 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
145 config.driver_data = ldo; 145 config.driver_data = ldo;
146 config.regmap = wm8994->regmap; 146 config.regmap = wm8994->regmap;
147 config.init_data = &ldo->init_data; 147 config.init_data = &ldo->init_data;
148 if (pdata) 148 if (pdata) {
149 config.ena_gpio = pdata->ldo[id].enable; 149 config.ena_gpio = pdata->ldo[id].enable;
150 else if (wm8994->dev->of_node) 150 } else if (wm8994->dev->of_node) {
151 config.ena_gpio = wm8994->pdata.ldo[id].enable; 151 config.ena_gpio = wm8994->pdata.ldo[id].enable;
152 config.ena_gpio_initialized = true;
153 }
152 154
153 /* Use default constraints if none set up */ 155 /* Use default constraints if none set up */
154 if (!pdata || !pdata->ldo[id].init_data || wm8994->dev->of_node) { 156 if (!pdata || !pdata->ldo[id].init_data || wm8994->dev->of_node) {
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 51689721ea7a..cf92f6e7c5dc 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -27,6 +27,7 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/remoteproc.h> 29#include <linux/remoteproc.h>
30#include <linux/mailbox_client.h>
30#include <linux/omap-mailbox.h> 31#include <linux/omap-mailbox.h>
31 32
32#include <linux/platform_data/remoteproc-omap.h> 33#include <linux/platform_data/remoteproc-omap.h>
@@ -36,20 +37,19 @@
36 37
37/** 38/**
38 * struct omap_rproc - omap remote processor state 39 * struct omap_rproc - omap remote processor state
39 * @mbox: omap mailbox handle 40 * @mbox: mailbox channel handle
40 * @nb: notifier block that will be invoked on inbound mailbox messages 41 * @client: mailbox client to request the mailbox channel
41 * @rproc: rproc handle 42 * @rproc: rproc handle
42 */ 43 */
43struct omap_rproc { 44struct omap_rproc {
44 struct omap_mbox *mbox; 45 struct mbox_chan *mbox;
45 struct notifier_block nb; 46 struct mbox_client client;
46 struct rproc *rproc; 47 struct rproc *rproc;
47}; 48};
48 49
49/** 50/**
50 * omap_rproc_mbox_callback() - inbound mailbox message handler 51 * omap_rproc_mbox_callback() - inbound mailbox message handler
51 * @this: notifier block 52 * @client: mailbox client pointer used for requesting the mailbox channel
52 * @index: unused
53 * @data: mailbox payload 53 * @data: mailbox payload
54 * 54 *
55 * This handler is invoked by omap's mailbox driver whenever a mailbox 55 * This handler is invoked by omap's mailbox driver whenever a mailbox
@@ -61,13 +61,13 @@ struct omap_rproc {
61 * that indicates different events. Those values are deliberately very 61 * that indicates different events. Those values are deliberately very
62 * big so they don't coincide with virtqueue indices. 62 * big so they don't coincide with virtqueue indices.
63 */ 63 */
64static int omap_rproc_mbox_callback(struct notifier_block *this, 64static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
65 unsigned long index, void *data)
66{ 65{
67 mbox_msg_t msg = (mbox_msg_t) data; 66 struct omap_rproc *oproc = container_of(client, struct omap_rproc,
68 struct omap_rproc *oproc = container_of(this, struct omap_rproc, nb); 67 client);
69 struct device *dev = oproc->rproc->dev.parent; 68 struct device *dev = oproc->rproc->dev.parent;
70 const char *name = oproc->rproc->name; 69 const char *name = oproc->rproc->name;
70 u32 msg = (u32)data;
71 71
72 dev_dbg(dev, "mbox msg: 0x%x\n", msg); 72 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
73 73
@@ -84,8 +84,6 @@ static int omap_rproc_mbox_callback(struct notifier_block *this,
84 if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE) 84 if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
85 dev_dbg(dev, "no message was found in vqid %d\n", msg); 85 dev_dbg(dev, "no message was found in vqid %d\n", msg);
86 } 86 }
87
88 return NOTIFY_DONE;
89} 87}
90 88
91/* kick a virtqueue */ 89/* kick a virtqueue */
@@ -96,8 +94,8 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
96 int ret; 94 int ret;
97 95
98 /* send the index of the triggered virtqueue in the mailbox payload */ 96 /* send the index of the triggered virtqueue in the mailbox payload */
99 ret = omap_mbox_msg_send(oproc->mbox, vqid); 97 ret = mbox_send_message(oproc->mbox, (void *)vqid);
100 if (ret) 98 if (ret < 0)
101 dev_err(dev, "omap_mbox_msg_send failed: %d\n", ret); 99 dev_err(dev, "omap_mbox_msg_send failed: %d\n", ret);
102} 100}
103 101
@@ -115,17 +113,22 @@ static int omap_rproc_start(struct rproc *rproc)
115 struct platform_device *pdev = to_platform_device(dev); 113 struct platform_device *pdev = to_platform_device(dev);
116 struct omap_rproc_pdata *pdata = pdev->dev.platform_data; 114 struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
117 int ret; 115 int ret;
116 struct mbox_client *client = &oproc->client;
118 117
119 if (pdata->set_bootaddr) 118 if (pdata->set_bootaddr)
120 pdata->set_bootaddr(rproc->bootaddr); 119 pdata->set_bootaddr(rproc->bootaddr);
121 120
122 oproc->nb.notifier_call = omap_rproc_mbox_callback; 121 client->dev = dev;
122 client->tx_done = NULL;
123 client->rx_callback = omap_rproc_mbox_callback;
124 client->tx_block = false;
125 client->knows_txdone = false;
123 126
124 /* every omap rproc is assigned a mailbox instance for messaging */ 127 oproc->mbox = omap_mbox_request_channel(client, pdata->mbox_name);
125 oproc->mbox = omap_mbox_get(pdata->mbox_name, &oproc->nb);
126 if (IS_ERR(oproc->mbox)) { 128 if (IS_ERR(oproc->mbox)) {
127 ret = PTR_ERR(oproc->mbox); 129 ret = -EBUSY;
128 dev_err(dev, "omap_mbox_get failed: %d\n", ret); 130 dev_err(dev, "mbox_request_channel failed: %ld\n",
131 PTR_ERR(oproc->mbox));
129 return ret; 132 return ret;
130 } 133 }
131 134
@@ -136,9 +139,9 @@ static int omap_rproc_start(struct rproc *rproc)
136 * Note that the reply will _not_ arrive immediately: this message 139 * Note that the reply will _not_ arrive immediately: this message
137 * will wait in the mailbox fifo until the remote processor is booted. 140 * will wait in the mailbox fifo until the remote processor is booted.
138 */ 141 */
139 ret = omap_mbox_msg_send(oproc->mbox, RP_MBOX_ECHO_REQUEST); 142 ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
140 if (ret) { 143 if (ret < 0) {
141 dev_err(dev, "omap_mbox_get failed: %d\n", ret); 144 dev_err(dev, "mbox_send_message failed: %d\n", ret);
142 goto put_mbox; 145 goto put_mbox;
143 } 146 }
144 147
@@ -151,7 +154,7 @@ static int omap_rproc_start(struct rproc *rproc)
151 return 0; 154 return 0;
152 155
153put_mbox: 156put_mbox:
154 omap_mbox_put(oproc->mbox, &oproc->nb); 157 mbox_free_channel(oproc->mbox);
155 return ret; 158 return ret;
156} 159}
157 160
@@ -168,7 +171,7 @@ static int omap_rproc_stop(struct rproc *rproc)
168 if (ret) 171 if (ret)
169 return ret; 172 return ret;
170 173
171 omap_mbox_put(oproc->mbox, &oproc->nb); 174 mbox_free_channel(oproc->mbox);
172 175
173 return 0; 176 return 0;
174} 177}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index a34b50690b4e..e1a10232a943 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -207,7 +207,7 @@ static void rproc_virtio_reset(struct virtio_device *vdev)
207} 207}
208 208
209/* provide the vdev features as retrieved from the firmware */ 209/* provide the vdev features as retrieved from the firmware */
210static u32 rproc_virtio_get_features(struct virtio_device *vdev) 210static u64 rproc_virtio_get_features(struct virtio_device *vdev)
211{ 211{
212 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 212 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
213 struct fw_rsc_vdev *rsc; 213 struct fw_rsc_vdev *rsc;
@@ -217,7 +217,7 @@ static u32 rproc_virtio_get_features(struct virtio_device *vdev)
217 return rsc->dfeatures; 217 return rsc->dfeatures;
218} 218}
219 219
220static void rproc_virtio_finalize_features(struct virtio_device *vdev) 220static int rproc_virtio_finalize_features(struct virtio_device *vdev)
221{ 221{
222 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 222 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
223 struct fw_rsc_vdev *rsc; 223 struct fw_rsc_vdev *rsc;
@@ -227,11 +227,16 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
227 /* Give virtio_ring a chance to accept features */ 227 /* Give virtio_ring a chance to accept features */
228 vring_transport_features(vdev); 228 vring_transport_features(vdev);
229 229
230 /* Make sure we don't have any features > 32 bits! */
231 BUG_ON((u32)vdev->features != vdev->features);
232
230 /* 233 /*
231 * Remember the finalized features of our vdev, and provide it 234 * Remember the finalized features of our vdev, and provide it
232 * to the remote processor once it is powered on. 235 * to the remote processor once it is powered on.
233 */ 236 */
234 rsc->gfeatures = vdev->features[0]; 237 rsc->gfeatures = vdev->features;
238
239 return 0;
235} 240}
236 241
237static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset, 242static void rproc_virtio_get(struct virtio_device *vdev, unsigned offset,
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 60fed3d7820b..157d421f755b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_RESET_CONTROLLER) += core.o 1obj-$(CONFIG_RESET_CONTROLLER) += core.o
2obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o 2obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
3obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
3obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o 4obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
4obj-$(CONFIG_ARCH_STI) += sti/ 5obj-$(CONFIG_ARCH_STI) += sti/
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index baeaf82d40d9..7955e00d04d4 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -126,6 +126,21 @@ int reset_control_deassert(struct reset_control *rstc)
126EXPORT_SYMBOL_GPL(reset_control_deassert); 126EXPORT_SYMBOL_GPL(reset_control_deassert);
127 127
128/** 128/**
129 * reset_control_status - returns a negative errno if not supported, a
130 * positive value if the reset line is asserted, or zero if the reset
131 * line is not asserted.
132 * @rstc: reset controller
133 */
134int reset_control_status(struct reset_control *rstc)
135{
136 if (rstc->rcdev->ops->status)
137 return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
138
139 return -ENOSYS;
140}
141EXPORT_SYMBOL_GPL(reset_control_status);
142
143/**
129 * of_reset_control_get - Lookup and obtain a reference to a reset controller. 144 * of_reset_control_get - Lookup and obtain a reference to a reset controller.
130 * @node: device to be reset by the controller 145 * @node: device to be reset by the controller
131 * @id: reset line name 146 * @id: reset line name
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
new file mode 100644
index 000000000000..f8b48a13cf0b
--- /dev/null
+++ b/drivers/reset/reset-berlin.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2014 Marvell Technology Group Ltd.
3 *
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
5 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/platform_device.h>
18#include <linux/reset-controller.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21
22#define BERLIN_MAX_RESETS 32
23
24#define to_berlin_reset_priv(p) \
25 container_of((p), struct berlin_reset_priv, rcdev)
26
27struct berlin_reset_priv {
28 void __iomem *base;
29 unsigned int size;
30 struct reset_controller_dev rcdev;
31};
32
33static int berlin_reset_reset(struct reset_controller_dev *rcdev,
34 unsigned long id)
35{
36 struct berlin_reset_priv *priv = to_berlin_reset_priv(rcdev);
37 int offset = id >> 8;
38 int mask = BIT(id & 0x1f);
39
40 writel(mask, priv->base + offset);
41
42 /* let the reset be effective */
43 udelay(10);
44
45 return 0;
46}
47
48static struct reset_control_ops berlin_reset_ops = {
49 .reset = berlin_reset_reset,
50};
51
52static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
53 const struct of_phandle_args *reset_spec)
54{
55 struct berlin_reset_priv *priv = to_berlin_reset_priv(rcdev);
56 unsigned offset, bit;
57
58 if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
59 return -EINVAL;
60
61 offset = reset_spec->args[0];
62 bit = reset_spec->args[1];
63
64 if (offset >= priv->size)
65 return -EINVAL;
66
67 if (bit >= BERLIN_MAX_RESETS)
68 return -EINVAL;
69
70 return (offset << 8) | bit;
71}
72
73static int __berlin_reset_init(struct device_node *np)
74{
75 struct berlin_reset_priv *priv;
76 struct resource res;
77 resource_size_t size;
78 int ret;
79
80 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
81 if (!priv)
82 return -ENOMEM;
83
84 ret = of_address_to_resource(np, 0, &res);
85 if (ret)
86 goto err;
87
88 size = resource_size(&res);
89 priv->base = ioremap(res.start, size);
90 if (!priv->base) {
91 ret = -ENOMEM;
92 goto err;
93 }
94 priv->size = size;
95
96 priv->rcdev.owner = THIS_MODULE;
97 priv->rcdev.ops = &berlin_reset_ops;
98 priv->rcdev.of_node = np;
99 priv->rcdev.of_reset_n_cells = 2;
100 priv->rcdev.of_xlate = berlin_reset_xlate;
101
102 reset_controller_register(&priv->rcdev);
103
104 return 0;
105
106err:
107 kfree(priv);
108 return ret;
109}
110
111static const struct of_device_id berlin_reset_of_match[] __initconst = {
112 { .compatible = "marvell,berlin2-chip-ctrl" },
113 { .compatible = "marvell,berlin2cd-chip-ctrl" },
114 { .compatible = "marvell,berlin2q-chip-ctrl" },
115 { },
116};
117
118static int __init berlin_reset_init(void)
119{
120 struct device_node *np;
121 int ret;
122
123 for_each_matching_node(np, berlin_reset_of_match) {
124 ret = __berlin_reset_init(np);
125 if (ret)
126 return ret;
127 }
128
129 return 0;
130}
131arch_initcall(berlin_reset_init);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 79c32ca84ef1..40582089474a 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -76,9 +76,24 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
76 return 0; 76 return 0;
77} 77}
78 78
79static int socfpga_reset_status(struct reset_controller_dev *rcdev,
80 unsigned long id)
81{
82 struct socfpga_reset_data *data = container_of(rcdev,
83 struct socfpga_reset_data, rcdev);
84 int bank = id / BITS_PER_LONG;
85 int offset = id % BITS_PER_LONG;
86 u32 reg;
87
88 reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
89
90 return !(reg & BIT(offset));
91}
92
79static struct reset_control_ops socfpga_reset_ops = { 93static struct reset_control_ops socfpga_reset_ops = {
80 .assert = socfpga_reset_assert, 94 .assert = socfpga_reset_assert,
81 .deassert = socfpga_reset_deassert, 95 .deassert = socfpga_reset_deassert,
96 .status = socfpga_reset_status,
82}; 97};
83 98
84static int socfpga_reset_probe(struct platform_device *pdev) 99static int socfpga_reset_probe(struct platform_device *pdev)
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 88d2d0316613..f8c15a37fb35 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -12,4 +12,8 @@ config STIH416_RESET
12 bool 12 bool
13 select STI_RESET_SYSCFG 13 select STI_RESET_SYSCFG
14 14
15config STIH407_RESET
16 bool
17 select STI_RESET_SYSCFG
18
15endif 19endif
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index be1c97647871..dc85dfbe56a9 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
2 2
3obj-$(CONFIG_STIH415_RESET) += reset-stih415.o 3obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
4obj-$(CONFIG_STIH416_RESET) += reset-stih416.o 4obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
5obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih407.c b/drivers/reset/sti/reset-stih407.c
new file mode 100644
index 000000000000..d83db5d72d08
--- /dev/null
+++ b/drivers/reset/sti/reset-stih407.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2014 STMicroelectronics (R&D) Limited
3 * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <dt-bindings/reset-controller/stih407-resets.h>
15#include "reset-syscfg.h"
16
17/* STiH407 Peripheral powerdown definitions. */
18static const char stih407_core[] = "st,stih407-core-syscfg";
19static const char stih407_sbc_reg[] = "st,stih407-sbc-reg-syscfg";
20static const char stih407_lpm[] = "st,stih407-lpm-syscfg";
21
22#define STIH407_PDN_0(_bit) \
23 _SYSCFG_RST_CH(stih407_core, SYSCFG_5000, _bit, SYSSTAT_5500, _bit)
24#define STIH407_PDN_1(_bit) \
25 _SYSCFG_RST_CH(stih407_core, SYSCFG_5001, _bit, SYSSTAT_5501, _bit)
26#define STIH407_PDN_ETH(_bit, _stat) \
27 _SYSCFG_RST_CH(stih407_sbc_reg, SYSCFG_4032, _bit, SYSSTAT_4520, _stat)
28
29/* Powerdown requests control 0 */
30#define SYSCFG_5000 0x0
31#define SYSSTAT_5500 0x7d0
32/* Powerdown requests control 1 (High Speed Links) */
33#define SYSCFG_5001 0x4
34#define SYSSTAT_5501 0x7d4
35
36/* Ethernet powerdown/status/reset */
37#define SYSCFG_4032 0x80
38#define SYSSTAT_4520 0x820
39#define SYSCFG_4002 0x8
40
41static const struct syscfg_reset_channel_data stih407_powerdowns[] = {
42 [STIH407_EMISS_POWERDOWN] = STIH407_PDN_0(1),
43 [STIH407_NAND_POWERDOWN] = STIH407_PDN_0(0),
44 [STIH407_USB3_POWERDOWN] = STIH407_PDN_1(6),
45 [STIH407_USB2_PORT1_POWERDOWN] = STIH407_PDN_1(5),
46 [STIH407_USB2_PORT0_POWERDOWN] = STIH407_PDN_1(4),
47 [STIH407_PCIE1_POWERDOWN] = STIH407_PDN_1(3),
48 [STIH407_PCIE0_POWERDOWN] = STIH407_PDN_1(2),
49 [STIH407_SATA1_POWERDOWN] = STIH407_PDN_1(1),
50 [STIH407_SATA0_POWERDOWN] = STIH407_PDN_1(0),
51 [STIH407_ETH1_POWERDOWN] = STIH407_PDN_ETH(0, 2),
52};
53
54/* Reset Generator control 0/1 */
55#define SYSCFG_5131 0x20c
56#define SYSCFG_5132 0x210
57
58#define LPM_SYSCFG_1 0x4 /* Softreset IRB & SBC UART */
59
60#define STIH407_SRST_CORE(_reg, _bit) \
61 _SYSCFG_RST_CH_NO_ACK(stih407_core, _reg, _bit)
62
63#define STIH407_SRST_SBC(_reg, _bit) \
64 _SYSCFG_RST_CH_NO_ACK(stih407_sbc_reg, _reg, _bit)
65
66#define STIH407_SRST_LPM(_reg, _bit) \
67 _SYSCFG_RST_CH_NO_ACK(stih407_lpm, _reg, _bit)
68
69static const struct syscfg_reset_channel_data stih407_softresets[] = {
70 [STIH407_ETH1_SOFTRESET] = STIH407_SRST_SBC(SYSCFG_4002, 4),
71 [STIH407_MMC1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 3),
72 [STIH407_USB2_PORT0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 28),
73 [STIH407_USB2_PORT1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 29),
74 [STIH407_PICOPHY_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 30),
75 [STIH407_IRB_SOFTRESET] = STIH407_SRST_LPM(LPM_SYSCFG_1, 6),
76 [STIH407_PCIE0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 6),
77 [STIH407_PCIE1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 15),
78 [STIH407_SATA0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 7),
79 [STIH407_SATA1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 16),
80 [STIH407_MIPHY0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 4),
81 [STIH407_MIPHY1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 13),
82 [STIH407_MIPHY2_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 22),
83 [STIH407_SATA0_PWR_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 5),
84 [STIH407_SATA1_PWR_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 14),
85 [STIH407_DELTA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 3),
86 [STIH407_BLITTER_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 10),
87 [STIH407_HDTVOUT_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 11),
88 [STIH407_HDQVDP_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 12),
89 [STIH407_VDP_AUX_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 14),
90 [STIH407_COMPO_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 15),
91 [STIH407_HDMI_TX_PHY_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 21),
92 [STIH407_JPEG_DEC_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 23),
93 [STIH407_VP8_DEC_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 24),
94 [STIH407_GPU_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 30),
95 [STIH407_HVA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 0),
96 [STIH407_ERAM_HVA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 1),
97 [STIH407_LPM_SOFTRESET] = STIH407_SRST_SBC(SYSCFG_4002, 2),
98 [STIH407_KEYSCAN_SOFTRESET] = STIH407_SRST_LPM(LPM_SYSCFG_1, 8),
99};
100
101/* PicoPHY reset/control */
102#define SYSCFG_5061 0x0f4
103
104static const struct syscfg_reset_channel_data stih407_picophyresets[] = {
105 [STIH407_PICOPHY0_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 5),
106 [STIH407_PICOPHY1_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 6),
107 [STIH407_PICOPHY2_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 7),
108};
109
110static const struct syscfg_reset_controller_data stih407_powerdown_controller = {
111 .wait_for_ack = true,
112 .nr_channels = ARRAY_SIZE(stih407_powerdowns),
113 .channels = stih407_powerdowns,
114};
115
116static const struct syscfg_reset_controller_data stih407_softreset_controller = {
117 .wait_for_ack = false,
118 .active_low = true,
119 .nr_channels = ARRAY_SIZE(stih407_softresets),
120 .channels = stih407_softresets,
121};
122
123static const struct syscfg_reset_controller_data stih407_picophyreset_controller = {
124 .wait_for_ack = false,
125 .nr_channels = ARRAY_SIZE(stih407_picophyresets),
126 .channels = stih407_picophyresets,
127};
128
129static struct of_device_id stih407_reset_match[] = {
130 {
131 .compatible = "st,stih407-powerdown",
132 .data = &stih407_powerdown_controller,
133 },
134 {
135 .compatible = "st,stih407-softreset",
136 .data = &stih407_softreset_controller,
137 },
138 {
139 .compatible = "st,stih407-picophyreset",
140 .data = &stih407_picophyreset_controller,
141 },
142 { /* sentinel */ },
143};
144
145static struct platform_driver stih407_reset_driver = {
146 .probe = syscfg_reset_probe,
147 .driver = {
148 .name = "reset-stih407",
149 .of_match_table = stih407_reset_match,
150 },
151};
152
153static int __init stih407_reset_init(void)
154{
155 return platform_driver_register(&stih407_reset_driver);
156}
157
158arch_initcall(stih407_reset_init);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6dd12ddbabc6..4511ddc1ac31 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -192,6 +192,14 @@ config RTC_DRV_DS1374
192 This driver can also be built as a module. If so, the module 192 This driver can also be built as a module. If so, the module
193 will be called rtc-ds1374. 193 will be called rtc-ds1374.
194 194
195config RTC_DRV_DS1374_WDT
196 bool "Dallas/Maxim DS1374 watchdog timer"
197 depends on RTC_DRV_DS1374
198 help
199 If you say Y here you will get support for the
200 watchdog timer in the Dallas Semiconductor DS1374
201 real-time clock chips.
202
195config RTC_DRV_DS1672 203config RTC_DRV_DS1672
196 tristate "Dallas/Maxim DS1672" 204 tristate "Dallas/Maxim DS1672"
197 help 205 help
@@ -1109,36 +1117,42 @@ config RTC_DRV_AT91RM9200
1109 this is powered by the backup power supply. 1117 this is powered by the backup power supply.
1110 1118
1111config RTC_DRV_AT91SAM9 1119config RTC_DRV_AT91SAM9
1112 tristate "AT91SAM9x/AT91CAP9 RTT as RTC" 1120 tristate "AT91SAM9 RTT as RTC"
1113 depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40) 1121 depends on ARCH_AT91
1122 select MFD_SYSCON
1114 help 1123 help
1115 RTC driver for the Atmel AT91SAM9x and AT91CAP9 internal RTT 1124 Some AT91SAM9 SoCs provide an RTT (Real Time Timer) block which
1116 (Real Time Timer). These timers are powered by the backup power 1125 can be used as an RTC thanks to the backup power supply (e.g. a
1117 supply (such as a small coin cell battery), but do not need to 1126 small coin cell battery) which keeps this block and the GPBR
1118 be used as RTCs. 1127 (General Purpose Backup Registers) block powered when the device
1119 1128 is shutdown.
1120 (On AT91SAM9rl and AT91SAM9G45 chips you probably want to use the 1129 Some AT91SAM9 SoCs provide a real RTC block, on those ones you'd
1121 dedicated RTC module and leave the RTT available for other uses.) 1130 probably want to use the real RTC block instead of the "RTT as an
1131 RTC" driver.
1122 1132
1123config RTC_DRV_AT91SAM9_RTT 1133config RTC_DRV_AT91SAM9_RTT
1124 int 1134 int
1125 range 0 1 1135 range 0 1
1126 default 0 1136 default 0
1127 prompt "RTT module Number" if ARCH_AT91SAM9263
1128 depends on RTC_DRV_AT91SAM9 1137 depends on RTC_DRV_AT91SAM9
1129 help 1138 help
1139 This option is only relevant for legacy board support and
1140 won't be used when booting a DT board.
1141
1130 More than one RTT module is available. You can choose which 1142 More than one RTT module is available. You can choose which
1131 one will be used as an RTC. The default of zero is normally 1143 one will be used as an RTC. The default of zero is normally
1132 OK to use, though some systems use that for non-RTC purposes. 1144 OK to use, though some systems use that for non-RTC purposes.
1133 1145
1134config RTC_DRV_AT91SAM9_GPBR 1146config RTC_DRV_AT91SAM9_GPBR
1135 int 1147 int
1136 range 0 3 if !ARCH_AT91SAM9263 1148 range 0 3
1137 range 0 15 if ARCH_AT91SAM9263
1138 default 0 1149 default 0
1139 prompt "Backup Register Number" 1150 prompt "Backup Register Number"
1140 depends on RTC_DRV_AT91SAM9 1151 depends on RTC_DRV_AT91SAM9
1141 help 1152 help
1153 This option is only relevant for legacy board support and
1154 won't be used when booting a DT board.
1155
1142 The RTC driver needs to use one of the General Purpose Backup 1156 The RTC driver needs to use one of the General Purpose Backup
1143 Registers (GPBRs) as well as the RTT. You can choose which one 1157 Registers (GPBRs) as well as the RTT. You can choose which one
1144 will be used. The default of zero is normally OK to use, but 1158 will be used. The default of zero is normally OK to use, but
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 38e26be705be..472a5adc4642 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -45,14 +45,14 @@ int rtc_hctosys_ret = -ENODEV;
45 * system's wall clock; restore it on resume(). 45 * system's wall clock; restore it on resume().
46 */ 46 */
47 47
48static struct timespec old_rtc, old_system, old_delta; 48static struct timespec64 old_rtc, old_system, old_delta;
49 49
50 50
51static int rtc_suspend(struct device *dev) 51static int rtc_suspend(struct device *dev)
52{ 52{
53 struct rtc_device *rtc = to_rtc_device(dev); 53 struct rtc_device *rtc = to_rtc_device(dev);
54 struct rtc_time tm; 54 struct rtc_time tm;
55 struct timespec delta, delta_delta; 55 struct timespec64 delta, delta_delta;
56 int err; 56 int err;
57 57
58 if (has_persistent_clock()) 58 if (has_persistent_clock())
@@ -68,8 +68,8 @@ static int rtc_suspend(struct device *dev)
68 return 0; 68 return 0;
69 } 69 }
70 70
71 getnstimeofday(&old_system); 71 getnstimeofday64(&old_system);
72 rtc_tm_to_time(&tm, &old_rtc.tv_sec); 72 old_rtc.tv_sec = rtc_tm_to_time64(&tm);
73 73
74 74
75 /* 75 /*
@@ -78,8 +78,8 @@ static int rtc_suspend(struct device *dev)
78 * try to compensate so the difference in system time 78 * try to compensate so the difference in system time
79 * and rtc time stays close to constant. 79 * and rtc time stays close to constant.
80 */ 80 */
81 delta = timespec_sub(old_system, old_rtc); 81 delta = timespec64_sub(old_system, old_rtc);
82 delta_delta = timespec_sub(delta, old_delta); 82 delta_delta = timespec64_sub(delta, old_delta);
83 if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) { 83 if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
84 /* 84 /*
85 * if delta_delta is too large, assume time correction 85 * if delta_delta is too large, assume time correction
@@ -88,7 +88,7 @@ static int rtc_suspend(struct device *dev)
88 old_delta = delta; 88 old_delta = delta;
89 } else { 89 } else {
90 /* Otherwise try to adjust old_system to compensate */ 90 /* Otherwise try to adjust old_system to compensate */
91 old_system = timespec_sub(old_system, delta_delta); 91 old_system = timespec64_sub(old_system, delta_delta);
92 } 92 }
93 93
94 return 0; 94 return 0;
@@ -98,8 +98,8 @@ static int rtc_resume(struct device *dev)
98{ 98{
99 struct rtc_device *rtc = to_rtc_device(dev); 99 struct rtc_device *rtc = to_rtc_device(dev);
100 struct rtc_time tm; 100 struct rtc_time tm;
101 struct timespec new_system, new_rtc; 101 struct timespec64 new_system, new_rtc;
102 struct timespec sleep_time; 102 struct timespec64 sleep_time;
103 int err; 103 int err;
104 104
105 if (has_persistent_clock()) 105 if (has_persistent_clock())
@@ -110,7 +110,7 @@ static int rtc_resume(struct device *dev)
110 return 0; 110 return 0;
111 111
112 /* snapshot the current rtc and system time at resume */ 112 /* snapshot the current rtc and system time at resume */
113 getnstimeofday(&new_system); 113 getnstimeofday64(&new_system);
114 err = rtc_read_time(rtc, &tm); 114 err = rtc_read_time(rtc, &tm);
115 if (err < 0) { 115 if (err < 0) {
116 pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev)); 116 pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
@@ -121,7 +121,7 @@ static int rtc_resume(struct device *dev)
121 pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); 121 pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
122 return 0; 122 return 0;
123 } 123 }
124 rtc_tm_to_time(&tm, &new_rtc.tv_sec); 124 new_rtc.tv_sec = rtc_tm_to_time64(&tm);
125 new_rtc.tv_nsec = 0; 125 new_rtc.tv_nsec = 0;
126 126
127 if (new_rtc.tv_sec < old_rtc.tv_sec) { 127 if (new_rtc.tv_sec < old_rtc.tv_sec) {
@@ -130,7 +130,7 @@ static int rtc_resume(struct device *dev)
130 } 130 }
131 131
132 /* calculate the RTC time delta (sleep time)*/ 132 /* calculate the RTC time delta (sleep time)*/
133 sleep_time = timespec_sub(new_rtc, old_rtc); 133 sleep_time = timespec64_sub(new_rtc, old_rtc);
134 134
135 /* 135 /*
136 * Since these RTC suspend/resume handlers are not called 136 * Since these RTC suspend/resume handlers are not called
@@ -139,11 +139,11 @@ static int rtc_resume(struct device *dev)
139 * so subtract kernel run-time between rtc_suspend to rtc_resume 139 * so subtract kernel run-time between rtc_suspend to rtc_resume
140 * to keep things accurate. 140 * to keep things accurate.
141 */ 141 */
142 sleep_time = timespec_sub(sleep_time, 142 sleep_time = timespec64_sub(sleep_time,
143 timespec_sub(new_system, old_system)); 143 timespec64_sub(new_system, old_system));
144 144
145 if (sleep_time.tv_sec >= 0) 145 if (sleep_time.tv_sec >= 0)
146 timekeeping_inject_sleeptime(&sleep_time); 146 timekeeping_inject_sleeptime64(&sleep_time);
147 rtc_hctosys_ret = 0; 147 rtc_hctosys_ret = 0;
148 return 0; 148 return 0;
149} 149}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5b2717f5dafa..45bfc28ee3aa 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -30,6 +30,14 @@ static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
30 else { 30 else {
31 memset(tm, 0, sizeof(struct rtc_time)); 31 memset(tm, 0, sizeof(struct rtc_time));
32 err = rtc->ops->read_time(rtc->dev.parent, tm); 32 err = rtc->ops->read_time(rtc->dev.parent, tm);
33 if (err < 0) {
34 dev_err(&rtc->dev, "read_time: fail to read\n");
35 return err;
36 }
37
38 err = rtc_valid_tm(tm);
39 if (err < 0)
40 dev_err(&rtc->dev, "read_time: rtc_time isn't valid\n");
33 } 41 }
34 return err; 42 return err;
35} 43}
@@ -891,11 +899,24 @@ again:
891 if (next) { 899 if (next) {
892 struct rtc_wkalrm alarm; 900 struct rtc_wkalrm alarm;
893 int err; 901 int err;
902 int retry = 3;
903
894 alarm.time = rtc_ktime_to_tm(next->expires); 904 alarm.time = rtc_ktime_to_tm(next->expires);
895 alarm.enabled = 1; 905 alarm.enabled = 1;
906reprogram:
896 err = __rtc_set_alarm(rtc, &alarm); 907 err = __rtc_set_alarm(rtc, &alarm);
897 if (err == -ETIME) 908 if (err == -ETIME)
898 goto again; 909 goto again;
910 else if (err) {
911 if (retry-- > 0)
912 goto reprogram;
913
914 timer = container_of(next, struct rtc_timer, node);
915 timerqueue_del(&rtc->timerqueue, &timer->node);
916 timer->enabled = 0;
917 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
918 goto again;
919 }
899 } else 920 } else
900 rtc_alarm_disable(rtc); 921 rtc_alarm_disable(rtc);
901 922
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 727e2f5d14d9..866e0ef5122d 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -504,6 +504,8 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
504 return err; 504 return err;
505 } 505 }
506 506
507 rtc->uie_unsupported = 1;
508
507 return 0; 509 return 0;
508} 510}
509 511
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 596374304532..abac38abd38e 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -21,10 +21,9 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/platform_data/atmel.h> 22#include <linux/platform_data/atmel.h>
23#include <linux/io.h> 23#include <linux/io.h>
24 24#include <linux/mfd/syscon.h>
25#include <mach/at91_rtt.h> 25#include <linux/regmap.h>
26#include <mach/cpu.h> 26#include <linux/clk.h>
27#include <mach/hardware.h>
28 27
29/* 28/*
30 * This driver uses two configurable hardware resources that live in the 29 * This driver uses two configurable hardware resources that live in the
@@ -47,6 +46,22 @@
47 * registers available, likewise usable for more than "RTC" support. 46 * registers available, likewise usable for more than "RTC" support.
48 */ 47 */
49 48
49#define AT91_RTT_MR 0x00 /* Real-time Mode Register */
50#define AT91_RTT_RTPRES (0xffff << 0) /* Real-time Timer Prescaler Value */
51#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
52#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
53#define AT91_RTT_RTTRST (1 << 18) /* Real Time Timer Restart */
54
55#define AT91_RTT_AR 0x04 /* Real-time Alarm Register */
56#define AT91_RTT_ALMV (0xffffffff) /* Alarm Value */
57
58#define AT91_RTT_VR 0x08 /* Real-time Value Register */
59#define AT91_RTT_CRTV (0xffffffff) /* Current Real-time Value */
60
61#define AT91_RTT_SR 0x0c /* Real-time Status Register */
62#define AT91_RTT_ALMS (1 << 0) /* Real-time Alarm Status */
63#define AT91_RTT_RTTINC (1 << 1) /* Real-time Timer Increment */
64
50/* 65/*
51 * We store ALARM_DISABLED in ALMV to record that no alarm is set. 66 * We store ALARM_DISABLED in ALMV to record that no alarm is set.
52 * It's also the reset value for that field. 67 * It's also the reset value for that field.
@@ -58,19 +73,30 @@ struct sam9_rtc {
58 void __iomem *rtt; 73 void __iomem *rtt;
59 struct rtc_device *rtcdev; 74 struct rtc_device *rtcdev;
60 u32 imr; 75 u32 imr;
61 void __iomem *gpbr; 76 struct regmap *gpbr;
77 unsigned int gpbr_offset;
62 int irq; 78 int irq;
79 struct clk *sclk;
63}; 80};
64 81
65#define rtt_readl(rtc, field) \ 82#define rtt_readl(rtc, field) \
66 __raw_readl((rtc)->rtt + AT91_RTT_ ## field) 83 readl((rtc)->rtt + AT91_RTT_ ## field)
67#define rtt_writel(rtc, field, val) \ 84#define rtt_writel(rtc, field, val) \
68 __raw_writel((val), (rtc)->rtt + AT91_RTT_ ## field) 85 writel((val), (rtc)->rtt + AT91_RTT_ ## field)
86
87static inline unsigned int gpbr_readl(struct sam9_rtc *rtc)
88{
89 unsigned int val;
90
91 regmap_read(rtc->gpbr, rtc->gpbr_offset, &val);
69 92
70#define gpbr_readl(rtc) \ 93 return val;
71 __raw_readl((rtc)->gpbr) 94}
72#define gpbr_writel(rtc, val) \ 95
73 __raw_writel((val), (rtc)->gpbr) 96static inline void gpbr_writel(struct sam9_rtc *rtc, unsigned int val)
97{
98 regmap_write(rtc->gpbr, rtc->gpbr_offset, val);
99}
74 100
75/* 101/*
76 * Read current time and date in RTC 102 * Read current time and date in RTC
@@ -287,22 +313,22 @@ static const struct rtc_class_ops at91_rtc_ops = {
287 .alarm_irq_enable = at91_rtc_alarm_irq_enable, 313 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
288}; 314};
289 315
316static struct regmap_config gpbr_regmap_config = {
317 .reg_bits = 32,
318 .val_bits = 32,
319 .reg_stride = 4,
320};
321
290/* 322/*
291 * Initialize and install RTC driver 323 * Initialize and install RTC driver
292 */ 324 */
293static int at91_rtc_probe(struct platform_device *pdev) 325static int at91_rtc_probe(struct platform_device *pdev)
294{ 326{
295 struct resource *r, *r_gpbr; 327 struct resource *r;
296 struct sam9_rtc *rtc; 328 struct sam9_rtc *rtc;
297 int ret, irq; 329 int ret, irq;
298 u32 mr; 330 u32 mr;
299 331 unsigned int sclk_rate;
300 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
301 r_gpbr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
302 if (!r || !r_gpbr) {
303 dev_err(&pdev->dev, "need 2 ressources\n");
304 return -ENODEV;
305 }
306 332
307 irq = platform_get_irq(pdev, 0); 333 irq = platform_get_irq(pdev, 0);
308 if (irq < 0) { 334 if (irq < 0) {
@@ -321,24 +347,66 @@ static int at91_rtc_probe(struct platform_device *pdev)
321 device_init_wakeup(&pdev->dev, 1); 347 device_init_wakeup(&pdev->dev, 1);
322 348
323 platform_set_drvdata(pdev, rtc); 349 platform_set_drvdata(pdev, rtc);
324 rtc->rtt = devm_ioremap(&pdev->dev, r->start, resource_size(r)); 350
325 if (!rtc->rtt) { 351 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
326 dev_err(&pdev->dev, "failed to map registers, aborting.\n"); 352 rtc->rtt = devm_ioremap_resource(&pdev->dev, r);
327 return -ENOMEM; 353 if (IS_ERR(rtc->rtt))
354 return PTR_ERR(rtc->rtt);
355
356 if (!pdev->dev.of_node) {
357 /*
358 * TODO: Remove this code chunk when removing non DT board
359 * support. Remember to remove the gpbr_regmap_config
360 * variable too.
361 */
362 void __iomem *gpbr;
363
364 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
365 gpbr = devm_ioremap_resource(&pdev->dev, r);
366 if (IS_ERR(gpbr))
367 return PTR_ERR(gpbr);
368
369 rtc->gpbr = regmap_init_mmio(NULL, gpbr,
370 &gpbr_regmap_config);
371 } else {
372 struct of_phandle_args args;
373
374 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
375 "atmel,rtt-rtc-time-reg", 1, 0,
376 &args);
377 if (ret)
378 return ret;
379
380 rtc->gpbr = syscon_node_to_regmap(args.np);
381 rtc->gpbr_offset = args.args[0];
328 } 382 }
329 383
330 rtc->gpbr = devm_ioremap(&pdev->dev, r_gpbr->start, 384 if (IS_ERR(rtc->gpbr)) {
331 resource_size(r_gpbr)); 385 dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
332 if (!rtc->gpbr) {
333 dev_err(&pdev->dev, "failed to map gpbr registers, aborting.\n");
334 return -ENOMEM; 386 return -ENOMEM;
335 } 387 }
336 388
389 rtc->sclk = devm_clk_get(&pdev->dev, NULL);
390 if (IS_ERR(rtc->sclk))
391 return PTR_ERR(rtc->sclk);
392
393 sclk_rate = clk_get_rate(rtc->sclk);
394 if (!sclk_rate || sclk_rate > AT91_RTT_RTPRES) {
395 dev_err(&pdev->dev, "Invalid slow clock rate\n");
396 return -EINVAL;
397 }
398
399 ret = clk_prepare_enable(rtc->sclk);
400 if (ret) {
401 dev_err(&pdev->dev, "Could not enable slow clock\n");
402 return ret;
403 }
404
337 mr = rtt_readl(rtc, MR); 405 mr = rtt_readl(rtc, MR);
338 406
339 /* unless RTT is counting at 1 Hz, re-initialize it */ 407 /* unless RTT is counting at 1 Hz, re-initialize it */
340 if ((mr & AT91_RTT_RTPRES) != AT91_SLOW_CLOCK) { 408 if ((mr & AT91_RTT_RTPRES) != sclk_rate) {
341 mr = AT91_RTT_RTTRST | (AT91_SLOW_CLOCK & AT91_RTT_RTPRES); 409 mr = AT91_RTT_RTTRST | (sclk_rate & AT91_RTT_RTPRES);
342 gpbr_writel(rtc, 0); 410 gpbr_writel(rtc, 0);
343 } 411 }
344 412
@@ -383,6 +451,9 @@ static int at91_rtc_remove(struct platform_device *pdev)
383 /* disable all interrupts */ 451 /* disable all interrupts */
384 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); 452 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
385 453
454 if (!IS_ERR(rtc->sclk))
455 clk_disable_unprepare(rtc->sclk);
456
386 return 0; 457 return 0;
387} 458}
388 459
@@ -440,6 +511,14 @@ static int at91_rtc_resume(struct device *dev)
440 511
441static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); 512static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
442 513
514#ifdef CONFIG_OF
515static const struct of_device_id at91_rtc_dt_ids[] = {
516 { .compatible = "atmel,at91sam9260-rtt" },
517 { /* sentinel */ }
518};
519MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
520#endif
521
443static struct platform_driver at91_rtc_driver = { 522static struct platform_driver at91_rtc_driver = {
444 .probe = at91_rtc_probe, 523 .probe = at91_rtc_probe,
445 .remove = at91_rtc_remove, 524 .remove = at91_rtc_remove,
@@ -448,6 +527,7 @@ static struct platform_driver at91_rtc_driver = {
448 .name = "rtc-at91sam9", 527 .name = "rtc-at91sam9",
449 .owner = THIS_MODULE, 528 .owner = THIS_MODULE,
450 .pm = &at91_rtc_pm_ops, 529 .pm = &at91_rtc_pm_ops,
530 .of_match_table = of_match_ptr(at91_rtc_dt_ids),
451 }, 531 },
452}; 532};
453 533
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index bb43cf703efc..4ffabb322a9a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -35,7 +35,7 @@ enum ds_type {
35 ds_1388, 35 ds_1388,
36 ds_3231, 36 ds_3231,
37 m41t00, 37 m41t00,
38 mcp7941x, 38 mcp794xx,
39 rx_8025, 39 rx_8025,
40 last_ds_type /* always last */ 40 last_ds_type /* always last */
41 /* rs5c372 too? different address... */ 41 /* rs5c372 too? different address... */
@@ -46,7 +46,7 @@ enum ds_type {
46#define DS1307_REG_SECS 0x00 /* 00-59 */ 46#define DS1307_REG_SECS 0x00 /* 00-59 */
47# define DS1307_BIT_CH 0x80 47# define DS1307_BIT_CH 0x80
48# define DS1340_BIT_nEOSC 0x80 48# define DS1340_BIT_nEOSC 0x80
49# define MCP7941X_BIT_ST 0x80 49# define MCP794XX_BIT_ST 0x80
50#define DS1307_REG_MIN 0x01 /* 00-59 */ 50#define DS1307_REG_MIN 0x01 /* 00-59 */
51#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */ 51#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
52# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */ 52# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
@@ -54,7 +54,7 @@ enum ds_type {
54# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */ 54# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
55# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */ 55# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
56#define DS1307_REG_WDAY 0x03 /* 01-07 */ 56#define DS1307_REG_WDAY 0x03 /* 01-07 */
57# define MCP7941X_BIT_VBATEN 0x08 57# define MCP794XX_BIT_VBATEN 0x08
58#define DS1307_REG_MDAY 0x04 /* 01-31 */ 58#define DS1307_REG_MDAY 0x04 /* 01-31 */
59#define DS1307_REG_MONTH 0x05 /* 01-12 */ 59#define DS1307_REG_MONTH 0x05 /* 01-12 */
60# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */ 60# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */
@@ -159,7 +159,7 @@ static struct chip_desc chips[last_ds_type] = {
159 [ds_3231] = { 159 [ds_3231] = {
160 .alarm = 1, 160 .alarm = 1,
161 }, 161 },
162 [mcp7941x] = { 162 [mcp794xx] = {
163 .alarm = 1, 163 .alarm = 1,
164 /* this is battery backed SRAM */ 164 /* this is battery backed SRAM */
165 .nvram_offset = 0x20, 165 .nvram_offset = 0x20,
@@ -176,7 +176,8 @@ static const struct i2c_device_id ds1307_id[] = {
176 { "ds1340", ds_1340 }, 176 { "ds1340", ds_1340 },
177 { "ds3231", ds_3231 }, 177 { "ds3231", ds_3231 },
178 { "m41t00", m41t00 }, 178 { "m41t00", m41t00 },
179 { "mcp7941x", mcp7941x }, 179 { "mcp7940x", mcp794xx },
180 { "mcp7941x", mcp794xx },
180 { "pt7c4338", ds_1307 }, 181 { "pt7c4338", ds_1307 },
181 { "rx8025", rx_8025 }, 182 { "rx8025", rx_8025 },
182 { } 183 { }
@@ -439,14 +440,14 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
439 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN 440 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
440 | DS1340_BIT_CENTURY; 441 | DS1340_BIT_CENTURY;
441 break; 442 break;
442 case mcp7941x: 443 case mcp794xx:
443 /* 444 /*
444 * these bits were cleared when preparing the date/time 445 * these bits were cleared when preparing the date/time
445 * values and need to be set again before writing the 446 * values and need to be set again before writing the
446 * buffer out to the device. 447 * buffer out to the device.
447 */ 448 */
448 buf[DS1307_REG_SECS] |= MCP7941X_BIT_ST; 449 buf[DS1307_REG_SECS] |= MCP794XX_BIT_ST;
449 buf[DS1307_REG_WDAY] |= MCP7941X_BIT_VBATEN; 450 buf[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN;
450 break; 451 break;
451 default: 452 default:
452 break; 453 break;
@@ -614,26 +615,26 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
614/*----------------------------------------------------------------------*/ 615/*----------------------------------------------------------------------*/
615 616
616/* 617/*
617 * Alarm support for mcp7941x devices. 618 * Alarm support for mcp794xx devices.
618 */ 619 */
619 620
620#define MCP7941X_REG_CONTROL 0x07 621#define MCP794XX_REG_CONTROL 0x07
621# define MCP7941X_BIT_ALM0_EN 0x10 622# define MCP794XX_BIT_ALM0_EN 0x10
622# define MCP7941X_BIT_ALM1_EN 0x20 623# define MCP794XX_BIT_ALM1_EN 0x20
623#define MCP7941X_REG_ALARM0_BASE 0x0a 624#define MCP794XX_REG_ALARM0_BASE 0x0a
624#define MCP7941X_REG_ALARM0_CTRL 0x0d 625#define MCP794XX_REG_ALARM0_CTRL 0x0d
625#define MCP7941X_REG_ALARM1_BASE 0x11 626#define MCP794XX_REG_ALARM1_BASE 0x11
626#define MCP7941X_REG_ALARM1_CTRL 0x14 627#define MCP794XX_REG_ALARM1_CTRL 0x14
627# define MCP7941X_BIT_ALMX_IF (1 << 3) 628# define MCP794XX_BIT_ALMX_IF (1 << 3)
628# define MCP7941X_BIT_ALMX_C0 (1 << 4) 629# define MCP794XX_BIT_ALMX_C0 (1 << 4)
629# define MCP7941X_BIT_ALMX_C1 (1 << 5) 630# define MCP794XX_BIT_ALMX_C1 (1 << 5)
630# define MCP7941X_BIT_ALMX_C2 (1 << 6) 631# define MCP794XX_BIT_ALMX_C2 (1 << 6)
631# define MCP7941X_BIT_ALMX_POL (1 << 7) 632# define MCP794XX_BIT_ALMX_POL (1 << 7)
632# define MCP7941X_MSK_ALMX_MATCH (MCP7941X_BIT_ALMX_C0 | \ 633# define MCP794XX_MSK_ALMX_MATCH (MCP794XX_BIT_ALMX_C0 | \
633 MCP7941X_BIT_ALMX_C1 | \ 634 MCP794XX_BIT_ALMX_C1 | \
634 MCP7941X_BIT_ALMX_C2) 635 MCP794XX_BIT_ALMX_C2)
635 636
636static void mcp7941x_work(struct work_struct *work) 637static void mcp794xx_work(struct work_struct *work)
637{ 638{
638 struct ds1307 *ds1307 = container_of(work, struct ds1307, work); 639 struct ds1307 *ds1307 = container_of(work, struct ds1307, work);
639 struct i2c_client *client = ds1307->client; 640 struct i2c_client *client = ds1307->client;
@@ -642,22 +643,22 @@ static void mcp7941x_work(struct work_struct *work)
642 mutex_lock(&ds1307->rtc->ops_lock); 643 mutex_lock(&ds1307->rtc->ops_lock);
643 644
644 /* Check and clear alarm 0 interrupt flag. */ 645 /* Check and clear alarm 0 interrupt flag. */
645 reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_ALARM0_CTRL); 646 reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_ALARM0_CTRL);
646 if (reg < 0) 647 if (reg < 0)
647 goto out; 648 goto out;
648 if (!(reg & MCP7941X_BIT_ALMX_IF)) 649 if (!(reg & MCP794XX_BIT_ALMX_IF))
649 goto out; 650 goto out;
650 reg &= ~MCP7941X_BIT_ALMX_IF; 651 reg &= ~MCP794XX_BIT_ALMX_IF;
651 ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_ALARM0_CTRL, reg); 652 ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_ALARM0_CTRL, reg);
652 if (ret < 0) 653 if (ret < 0)
653 goto out; 654 goto out;
654 655
655 /* Disable alarm 0. */ 656 /* Disable alarm 0. */
656 reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL); 657 reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL);
657 if (reg < 0) 658 if (reg < 0)
658 goto out; 659 goto out;
659 reg &= ~MCP7941X_BIT_ALM0_EN; 660 reg &= ~MCP794XX_BIT_ALM0_EN;
660 ret = i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg); 661 ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg);
661 if (ret < 0) 662 if (ret < 0)
662 goto out; 663 goto out;
663 664
@@ -669,7 +670,7 @@ out:
669 mutex_unlock(&ds1307->rtc->ops_lock); 670 mutex_unlock(&ds1307->rtc->ops_lock);
670} 671}
671 672
672static int mcp7941x_read_alarm(struct device *dev, struct rtc_wkalrm *t) 673static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
673{ 674{
674 struct i2c_client *client = to_i2c_client(dev); 675 struct i2c_client *client = to_i2c_client(dev);
675 struct ds1307 *ds1307 = i2c_get_clientdata(client); 676 struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -680,11 +681,11 @@ static int mcp7941x_read_alarm(struct device *dev, struct rtc_wkalrm *t)
680 return -EINVAL; 681 return -EINVAL;
681 682
682 /* Read control and alarm 0 registers. */ 683 /* Read control and alarm 0 registers. */
683 ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs); 684 ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
684 if (ret < 0) 685 if (ret < 0)
685 return ret; 686 return ret;
686 687
687 t->enabled = !!(regs[0] & MCP7941X_BIT_ALM0_EN); 688 t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN);
688 689
689 /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ 690 /* Report alarm 0 time assuming 24-hour and day-of-month modes. */
690 t->time.tm_sec = bcd2bin(ds1307->regs[3] & 0x7f); 691 t->time.tm_sec = bcd2bin(ds1307->regs[3] & 0x7f);
@@ -701,14 +702,14 @@ static int mcp7941x_read_alarm(struct device *dev, struct rtc_wkalrm *t)
701 "enabled=%d polarity=%d irq=%d match=%d\n", __func__, 702 "enabled=%d polarity=%d irq=%d match=%d\n", __func__,
702 t->time.tm_sec, t->time.tm_min, t->time.tm_hour, 703 t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
703 t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled, 704 t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled,
704 !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_POL), 705 !!(ds1307->regs[6] & MCP794XX_BIT_ALMX_POL),
705 !!(ds1307->regs[6] & MCP7941X_BIT_ALMX_IF), 706 !!(ds1307->regs[6] & MCP794XX_BIT_ALMX_IF),
706 (ds1307->regs[6] & MCP7941X_MSK_ALMX_MATCH) >> 4); 707 (ds1307->regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4);
707 708
708 return 0; 709 return 0;
709} 710}
710 711
711static int mcp7941x_set_alarm(struct device *dev, struct rtc_wkalrm *t) 712static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
712{ 713{
713 struct i2c_client *client = to_i2c_client(dev); 714 struct i2c_client *client = to_i2c_client(dev);
714 struct ds1307 *ds1307 = i2c_get_clientdata(client); 715 struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -725,7 +726,7 @@ static int mcp7941x_set_alarm(struct device *dev, struct rtc_wkalrm *t)
725 t->enabled, t->pending); 726 t->enabled, t->pending);
726 727
727 /* Read control and alarm 0 registers. */ 728 /* Read control and alarm 0 registers. */
728 ret = ds1307->read_block_data(client, MCP7941X_REG_CONTROL, 10, regs); 729 ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
729 if (ret < 0) 730 if (ret < 0)
730 return ret; 731 return ret;
731 732
@@ -738,23 +739,23 @@ static int mcp7941x_set_alarm(struct device *dev, struct rtc_wkalrm *t)
738 regs[8] = bin2bcd(t->time.tm_mon) + 1; 739 regs[8] = bin2bcd(t->time.tm_mon) + 1;
739 740
740 /* Clear the alarm 0 interrupt flag. */ 741 /* Clear the alarm 0 interrupt flag. */
741 regs[6] &= ~MCP7941X_BIT_ALMX_IF; 742 regs[6] &= ~MCP794XX_BIT_ALMX_IF;
742 /* Set alarm match: second, minute, hour, day, date, month. */ 743 /* Set alarm match: second, minute, hour, day, date, month. */
743 regs[6] |= MCP7941X_MSK_ALMX_MATCH; 744 regs[6] |= MCP794XX_MSK_ALMX_MATCH;
744 745
745 if (t->enabled) 746 if (t->enabled)
746 regs[0] |= MCP7941X_BIT_ALM0_EN; 747 regs[0] |= MCP794XX_BIT_ALM0_EN;
747 else 748 else
748 regs[0] &= ~MCP7941X_BIT_ALM0_EN; 749 regs[0] &= ~MCP794XX_BIT_ALM0_EN;
749 750
750 ret = ds1307->write_block_data(client, MCP7941X_REG_CONTROL, 10, regs); 751 ret = ds1307->write_block_data(client, MCP794XX_REG_CONTROL, 10, regs);
751 if (ret < 0) 752 if (ret < 0)
752 return ret; 753 return ret;
753 754
754 return 0; 755 return 0;
755} 756}
756 757
757static int mcp7941x_alarm_irq_enable(struct device *dev, unsigned int enabled) 758static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled)
758{ 759{
759 struct i2c_client *client = to_i2c_client(dev); 760 struct i2c_client *client = to_i2c_client(dev);
760 struct ds1307 *ds1307 = i2c_get_clientdata(client); 761 struct ds1307 *ds1307 = i2c_get_clientdata(client);
@@ -763,24 +764,24 @@ static int mcp7941x_alarm_irq_enable(struct device *dev, unsigned int enabled)
763 if (!test_bit(HAS_ALARM, &ds1307->flags)) 764 if (!test_bit(HAS_ALARM, &ds1307->flags))
764 return -EINVAL; 765 return -EINVAL;
765 766
766 reg = i2c_smbus_read_byte_data(client, MCP7941X_REG_CONTROL); 767 reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL);
767 if (reg < 0) 768 if (reg < 0)
768 return reg; 769 return reg;
769 770
770 if (enabled) 771 if (enabled)
771 reg |= MCP7941X_BIT_ALM0_EN; 772 reg |= MCP794XX_BIT_ALM0_EN;
772 else 773 else
773 reg &= ~MCP7941X_BIT_ALM0_EN; 774 reg &= ~MCP794XX_BIT_ALM0_EN;
774 775
775 return i2c_smbus_write_byte_data(client, MCP7941X_REG_CONTROL, reg); 776 return i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg);
776} 777}
777 778
778static const struct rtc_class_ops mcp7941x_rtc_ops = { 779static const struct rtc_class_ops mcp794xx_rtc_ops = {
779 .read_time = ds1307_get_time, 780 .read_time = ds1307_get_time,
780 .set_time = ds1307_set_time, 781 .set_time = ds1307_set_time,
781 .read_alarm = mcp7941x_read_alarm, 782 .read_alarm = mcp794xx_read_alarm,
782 .set_alarm = mcp7941x_set_alarm, 783 .set_alarm = mcp794xx_set_alarm,
783 .alarm_irq_enable = mcp7941x_alarm_irq_enable, 784 .alarm_irq_enable = mcp794xx_alarm_irq_enable,
784}; 785};
785 786
786/*----------------------------------------------------------------------*/ 787/*----------------------------------------------------------------------*/
@@ -1049,10 +1050,10 @@ static int ds1307_probe(struct i2c_client *client,
1049 case ds_1388: 1050 case ds_1388:
1050 ds1307->offset = 1; /* Seconds starts at 1 */ 1051 ds1307->offset = 1; /* Seconds starts at 1 */
1051 break; 1052 break;
1052 case mcp7941x: 1053 case mcp794xx:
1053 rtc_ops = &mcp7941x_rtc_ops; 1054 rtc_ops = &mcp794xx_rtc_ops;
1054 if (ds1307->client->irq > 0 && chip->alarm) { 1055 if (ds1307->client->irq > 0 && chip->alarm) {
1055 INIT_WORK(&ds1307->work, mcp7941x_work); 1056 INIT_WORK(&ds1307->work, mcp794xx_work);
1056 want_irq = true; 1057 want_irq = true;
1057 } 1058 }
1058 break; 1059 break;
@@ -1117,18 +1118,18 @@ read_rtc:
1117 dev_warn(&client->dev, "SET TIME!\n"); 1118 dev_warn(&client->dev, "SET TIME!\n");
1118 } 1119 }
1119 break; 1120 break;
1120 case mcp7941x: 1121 case mcp794xx:
1121 /* make sure that the backup battery is enabled */ 1122 /* make sure that the backup battery is enabled */
1122 if (!(ds1307->regs[DS1307_REG_WDAY] & MCP7941X_BIT_VBATEN)) { 1123 if (!(ds1307->regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) {
1123 i2c_smbus_write_byte_data(client, DS1307_REG_WDAY, 1124 i2c_smbus_write_byte_data(client, DS1307_REG_WDAY,
1124 ds1307->regs[DS1307_REG_WDAY] 1125 ds1307->regs[DS1307_REG_WDAY]
1125 | MCP7941X_BIT_VBATEN); 1126 | MCP794XX_BIT_VBATEN);
1126 } 1127 }
1127 1128
1128 /* clock halted? turn it on, so clock can tick. */ 1129 /* clock halted? turn it on, so clock can tick. */
1129 if (!(tmp & MCP7941X_BIT_ST)) { 1130 if (!(tmp & MCP794XX_BIT_ST)) {
1130 i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 1131 i2c_smbus_write_byte_data(client, DS1307_REG_SECS,
1131 MCP7941X_BIT_ST); 1132 MCP794XX_BIT_ST);
1132 dev_warn(&client->dev, "SET TIME!\n"); 1133 dev_warn(&client->dev, "SET TIME!\n");
1133 goto read_rtc; 1134 goto read_rtc;
1134 } 1135 }
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 9e6e14fb53d7..8605fde394b2 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -4,6 +4,7 @@
4 * Based on code by Randy Vinson <rvinson@mvista.com>, 4 * Based on code by Randy Vinson <rvinson@mvista.com>,
5 * which was based on the m41t00.c by Mark Greer <mgreer@mvista.com>. 5 * which was based on the m41t00.c by Mark Greer <mgreer@mvista.com>.
6 * 6 *
7 * Copyright (C) 2014 Rose Technology
7 * Copyright (C) 2006-2007 Freescale Semiconductor 8 * Copyright (C) 2006-2007 Freescale Semiconductor
8 * 9 *
9 * 2005 (c) MontaVista Software, Inc. This file is licensed under 10 * 2005 (c) MontaVista Software, Inc. This file is licensed under
@@ -26,6 +27,13 @@
26#include <linux/workqueue.h> 27#include <linux/workqueue.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/pm.h> 29#include <linux/pm.h>
30#ifdef CONFIG_RTC_DRV_DS1374_WDT
31#include <linux/fs.h>
32#include <linux/ioctl.h>
33#include <linux/miscdevice.h>
34#include <linux/reboot.h>
35#include <linux/watchdog.h>
36#endif
29 37
30#define DS1374_REG_TOD0 0x00 /* Time of Day */ 38#define DS1374_REG_TOD0 0x00 /* Time of Day */
31#define DS1374_REG_TOD1 0x01 39#define DS1374_REG_TOD1 0x01
@@ -49,6 +57,14 @@ static const struct i2c_device_id ds1374_id[] = {
49}; 57};
50MODULE_DEVICE_TABLE(i2c, ds1374_id); 58MODULE_DEVICE_TABLE(i2c, ds1374_id);
51 59
60#ifdef CONFIG_OF
61static const struct of_device_id ds1374_of_match[] = {
62 { .compatible = "dallas,ds1374" },
63 { }
64};
65MODULE_DEVICE_TABLE(of, ds1374_of_match);
66#endif
67
52struct ds1374 { 68struct ds1374 {
53 struct i2c_client *client; 69 struct i2c_client *client;
54 struct rtc_device *rtc; 70 struct rtc_device *rtc;
@@ -162,6 +178,7 @@ static int ds1374_set_time(struct device *dev, struct rtc_time *time)
162 return ds1374_write_rtc(client, itime, DS1374_REG_TOD0, 4); 178 return ds1374_write_rtc(client, itime, DS1374_REG_TOD0, 4);
163} 179}
164 180
181#ifndef CONFIG_RTC_DRV_DS1374_WDT
165/* The ds1374 has a decrementer for an alarm, rather than a comparator. 182/* The ds1374 has a decrementer for an alarm, rather than a comparator.
166 * If the time of day is changed, then the alarm will need to be 183 * If the time of day is changed, then the alarm will need to be
167 * reset. 184 * reset.
@@ -263,6 +280,7 @@ out:
263 mutex_unlock(&ds1374->mutex); 280 mutex_unlock(&ds1374->mutex);
264 return ret; 281 return ret;
265} 282}
283#endif
266 284
267static irqreturn_t ds1374_irq(int irq, void *dev_id) 285static irqreturn_t ds1374_irq(int irq, void *dev_id)
268{ 286{
@@ -307,6 +325,7 @@ unlock:
307 mutex_unlock(&ds1374->mutex); 325 mutex_unlock(&ds1374->mutex);
308} 326}
309 327
328#ifndef CONFIG_RTC_DRV_DS1374_WDT
310static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled) 329static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
311{ 330{
312 struct i2c_client *client = to_i2c_client(dev); 331 struct i2c_client *client = to_i2c_client(dev);
@@ -331,15 +350,260 @@ out:
331 mutex_unlock(&ds1374->mutex); 350 mutex_unlock(&ds1374->mutex);
332 return ret; 351 return ret;
333} 352}
353#endif
334 354
335static const struct rtc_class_ops ds1374_rtc_ops = { 355static const struct rtc_class_ops ds1374_rtc_ops = {
336 .read_time = ds1374_read_time, 356 .read_time = ds1374_read_time,
337 .set_time = ds1374_set_time, 357 .set_time = ds1374_set_time,
358#ifndef CONFIG_RTC_DRV_DS1374_WDT
338 .read_alarm = ds1374_read_alarm, 359 .read_alarm = ds1374_read_alarm,
339 .set_alarm = ds1374_set_alarm, 360 .set_alarm = ds1374_set_alarm,
340 .alarm_irq_enable = ds1374_alarm_irq_enable, 361 .alarm_irq_enable = ds1374_alarm_irq_enable,
362#endif
363};
364
365#ifdef CONFIG_RTC_DRV_DS1374_WDT
366/*
367 *****************************************************************************
368 *
369 * Watchdog Driver
370 *
371 *****************************************************************************
372 */
373static struct i2c_client *save_client;
374/* Default margin */
375#define WD_TIMO 131762
376
377#define DRV_NAME "DS1374 Watchdog"
378
379static int wdt_margin = WD_TIMO;
380static unsigned long wdt_is_open;
381module_param(wdt_margin, int, 0);
382MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 32s)");
383
384static const struct watchdog_info ds1374_wdt_info = {
385 .identity = "DS1374 WTD",
386 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
387 WDIOF_MAGICCLOSE,
341}; 388};
342 389
390static int ds1374_wdt_settimeout(unsigned int timeout)
391{
392 int ret = -ENOIOCTLCMD;
393 int cr;
394
395 ret = cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
396 if (ret < 0)
397 goto out;
398
399 /* Disable any existing watchdog/alarm before setting the new one */
400 cr &= ~DS1374_REG_CR_WACE;
401
402 ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
403 if (ret < 0)
404 goto out;
405
406 /* Set new watchdog time */
407 ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3);
408 if (ret) {
409 pr_info("rtc-ds1374 - couldn't set new watchdog time\n");
410 goto out;
411 }
412
413 /* Enable watchdog timer */
414 cr |= DS1374_REG_CR_WACE | DS1374_REG_CR_WDALM;
415 cr &= ~DS1374_REG_CR_AIE;
416
417 ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
418 if (ret < 0)
419 goto out;
420
421 return 0;
422out:
423 return ret;
424}
425
426
427/*
428 * Reload the watchdog timer. (ie, pat the watchdog)
429 */
430static void ds1374_wdt_ping(void)
431{
432 u32 val;
433 int ret = 0;
434
435 ret = ds1374_read_rtc(save_client, &val, DS1374_REG_WDALM0, 3);
436 if (ret)
437 pr_info("WD TICK FAIL!!!!!!!!!! %i\n", ret);
438}
439
440static void ds1374_wdt_disable(void)
441{
442 int ret = -ENOIOCTLCMD;
443 int cr;
444
445 cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
446 /* Disable watchdog timer */
447 cr &= ~DS1374_REG_CR_WACE;
448
449 ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
450}
451
452/*
453 * Watchdog device is opened, and watchdog starts running.
454 */
455static int ds1374_wdt_open(struct inode *inode, struct file *file)
456{
457 struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
458
459 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
460 mutex_lock(&ds1374->mutex);
461 if (test_and_set_bit(0, &wdt_is_open)) {
462 mutex_unlock(&ds1374->mutex);
463 return -EBUSY;
464 }
465 /*
466 * Activate
467 */
468 wdt_is_open = 1;
469 mutex_unlock(&ds1374->mutex);
470 return nonseekable_open(inode, file);
471 }
472 return -ENODEV;
473}
474
475/*
476 * Close the watchdog device.
477 */
478static int ds1374_wdt_release(struct inode *inode, struct file *file)
479{
480 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
481 clear_bit(0, &wdt_is_open);
482
483 return 0;
484}
485
486/*
487 * Pat the watchdog whenever device is written to.
488 */
489static ssize_t ds1374_wdt_write(struct file *file, const char __user *data,
490 size_t len, loff_t *ppos)
491{
492 if (len) {
493 ds1374_wdt_ping();
494 return 1;
495 }
496 return 0;
497}
498
499static ssize_t ds1374_wdt_read(struct file *file, char __user *data,
500 size_t len, loff_t *ppos)
501{
502 return 0;
503}
504
505/*
506 * Handle commands from user-space.
507 */
508static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
509 unsigned long arg)
510{
511 int new_margin, options;
512
513 switch (cmd) {
514 case WDIOC_GETSUPPORT:
515 return copy_to_user((struct watchdog_info __user *)arg,
516 &ds1374_wdt_info, sizeof(ds1374_wdt_info)) ? -EFAULT : 0;
517
518 case WDIOC_GETSTATUS:
519 case WDIOC_GETBOOTSTATUS:
520 return put_user(0, (int __user *)arg);
521 case WDIOC_KEEPALIVE:
522 ds1374_wdt_ping();
523 return 0;
524 case WDIOC_SETTIMEOUT:
525 if (get_user(new_margin, (int __user *)arg))
526 return -EFAULT;
527
528 if (new_margin < 1 || new_margin > 16777216)
529 return -EINVAL;
530
531 wdt_margin = new_margin;
532 ds1374_wdt_settimeout(new_margin);
533 ds1374_wdt_ping();
534 /* fallthrough */
535 case WDIOC_GETTIMEOUT:
536 return put_user(wdt_margin, (int __user *)arg);
537 case WDIOC_SETOPTIONS:
538 if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
539 return -EFAULT;
540
541 if (options & WDIOS_DISABLECARD) {
542 pr_info("rtc-ds1374: disable watchdog\n");
543 ds1374_wdt_disable();
544 }
545
546 if (options & WDIOS_ENABLECARD) {
547 pr_info("rtc-ds1374: enable watchdog\n");
548 ds1374_wdt_settimeout(wdt_margin);
549 ds1374_wdt_ping();
550 }
551
552 return -EINVAL;
553 }
554 return -ENOTTY;
555}
556
557static long ds1374_wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
558 unsigned long arg)
559{
560 int ret;
561 struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
562
563 mutex_lock(&ds1374->mutex);
564 ret = ds1374_wdt_ioctl(file, cmd, arg);
565 mutex_unlock(&ds1374->mutex);
566
567 return ret;
568}
569
570static int ds1374_wdt_notify_sys(struct notifier_block *this,
571 unsigned long code, void *unused)
572{
573 if (code == SYS_DOWN || code == SYS_HALT)
574 /* Disable Watchdog */
575 ds1374_wdt_disable();
576 return NOTIFY_DONE;
577}
578
579static const struct file_operations ds1374_wdt_fops = {
580 .owner = THIS_MODULE,
581 .read = ds1374_wdt_read,
582 .unlocked_ioctl = ds1374_wdt_unlocked_ioctl,
583 .write = ds1374_wdt_write,
584 .open = ds1374_wdt_open,
585 .release = ds1374_wdt_release,
586 .llseek = no_llseek,
587};
588
589static struct miscdevice ds1374_miscdev = {
590 .minor = WATCHDOG_MINOR,
591 .name = "watchdog",
592 .fops = &ds1374_wdt_fops,
593};
594
595static struct notifier_block ds1374_wdt_notifier = {
596 .notifier_call = ds1374_wdt_notify_sys,
597};
598
599#endif /*CONFIG_RTC_DRV_DS1374_WDT*/
600/*
601 *****************************************************************************
602 *
603 * Driver Interface
604 *
605 *****************************************************************************
606 */
343static int ds1374_probe(struct i2c_client *client, 607static int ds1374_probe(struct i2c_client *client,
344 const struct i2c_device_id *id) 608 const struct i2c_device_id *id)
345{ 609{
@@ -378,12 +642,33 @@ static int ds1374_probe(struct i2c_client *client,
378 return PTR_ERR(ds1374->rtc); 642 return PTR_ERR(ds1374->rtc);
379 } 643 }
380 644
645#ifdef CONFIG_RTC_DRV_DS1374_WDT
646 save_client = client;
647 ret = misc_register(&ds1374_miscdev);
648 if (ret)
649 return ret;
650 ret = register_reboot_notifier(&ds1374_wdt_notifier);
651 if (ret) {
652 misc_deregister(&ds1374_miscdev);
653 return ret;
654 }
655 ds1374_wdt_settimeout(131072);
656#endif
657
381 return 0; 658 return 0;
382} 659}
383 660
384static int ds1374_remove(struct i2c_client *client) 661static int ds1374_remove(struct i2c_client *client)
385{ 662{
386 struct ds1374 *ds1374 = i2c_get_clientdata(client); 663 struct ds1374 *ds1374 = i2c_get_clientdata(client);
664#ifdef CONFIG_RTC_DRV_DS1374_WDT
665 int res;
666
667 res = misc_deregister(&ds1374_miscdev);
668 if (!res)
669 ds1374_miscdev.parent = NULL;
670 unregister_reboot_notifier(&ds1374_wdt_notifier);
671#endif
387 672
388 if (client->irq > 0) { 673 if (client->irq > 0) {
389 mutex_lock(&ds1374->mutex); 674 mutex_lock(&ds1374->mutex);
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 455b601d731d..6e1fcfb5d7e6 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -41,6 +41,7 @@
41#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */ 41#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
42#define ISL12057_REG_RTC_DT 0x04 /* Date */ 42#define ISL12057_REG_RTC_DT 0x04 /* Date */
43#define ISL12057_REG_RTC_MO 0x05 /* Month */ 43#define ISL12057_REG_RTC_MO 0x05 /* Month */
44#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */
44#define ISL12057_REG_RTC_YR 0x06 /* Year */ 45#define ISL12057_REG_RTC_YR 0x06 /* Year */
45#define ISL12057_RTC_SEC_LEN 7 46#define ISL12057_RTC_SEC_LEN 7
46 47
@@ -88,7 +89,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
88 tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]); 89 tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
89 90
90 if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */ 91 if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
91 tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f); 92 tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
92 if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM) 93 if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
93 tm->tm_hour += 12; 94 tm->tm_hour += 12;
94 } else { /* 24 hour mode */ 95 } else { /* 24 hour mode */
@@ -97,26 +98,37 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
97 98
98 tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]); 99 tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
99 tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */ 100 tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
100 tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */ 101 tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
101 tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100; 102 tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
103
104 /* Check if years register has overflown from 99 to 00 */
105 if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN)
106 tm->tm_year += 100;
102} 107}
103 108
104static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm) 109static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
105{ 110{
111 u8 century_bit;
112
106 /* 113 /*
107 * The clock has an 8 bit wide bcd-coded register for the year. 114 * The clock has an 8 bit wide bcd-coded register for the year.
115 * It also has a century bit encoded in MO flag which provides
116 * information about overflow of year register from 99 to 00.
108 * tm_year is an offset from 1900 and we are interested in the 117 * tm_year is an offset from 1900 and we are interested in the
109 * 2000-2099 range, so any value less than 100 is invalid. 118 * 2000-2199 range, so any value less than 100 or larger than
119 * 299 is invalid.
110 */ 120 */
111 if (tm->tm_year < 100) 121 if (tm->tm_year < 100 || tm->tm_year > 299)
112 return -EINVAL; 122 return -EINVAL;
113 123
124 century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0;
125
114 regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec); 126 regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
115 regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min); 127 regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
116 regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */ 128 regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
117 regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday); 129 regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
118 regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1); 130 regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit;
119 regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year - 100); 131 regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100);
120 regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1); 132 regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
121 133
122 return 0; 134 return 0;
@@ -152,17 +164,33 @@ static int isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
152{ 164{
153 struct isl12057_rtc_data *data = dev_get_drvdata(dev); 165 struct isl12057_rtc_data *data = dev_get_drvdata(dev);
154 u8 regs[ISL12057_RTC_SEC_LEN]; 166 u8 regs[ISL12057_RTC_SEC_LEN];
167 unsigned int sr;
155 int ret; 168 int ret;
156 169
157 mutex_lock(&data->lock); 170 mutex_lock(&data->lock);
171 ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
172 if (ret) {
173 dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
174 __func__, ret);
175 goto out;
176 } else {
177 if (sr & ISL12057_REG_SR_OSF) {
178 ret = -ENODATA;
179 goto out;
180 }
181 }
182
158 ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs, 183 ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
159 ISL12057_RTC_SEC_LEN); 184 ISL12057_RTC_SEC_LEN);
185 if (ret)
186 dev_err(dev, "%s: unable to read RTC time section (%d)\n",
187 __func__, ret);
188
189out:
160 mutex_unlock(&data->lock); 190 mutex_unlock(&data->lock);
161 191
162 if (ret) { 192 if (ret)
163 dev_err(dev, "%s: RTC read failed\n", __func__);
164 return ret; 193 return ret;
165 }
166 194
167 isl12057_rtc_regs_to_tm(tm, regs); 195 isl12057_rtc_regs_to_tm(tm, regs);
168 196
@@ -182,10 +210,24 @@ static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
182 mutex_lock(&data->lock); 210 mutex_lock(&data->lock);
183 ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs, 211 ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
184 ISL12057_RTC_SEC_LEN); 212 ISL12057_RTC_SEC_LEN);
185 mutex_unlock(&data->lock); 213 if (ret) {
214 dev_err(dev, "%s: unable to write RTC time section (%d)\n",
215 __func__, ret);
216 goto out;
217 }
186 218
187 if (ret) 219 /*
188 dev_err(dev, "%s: RTC write failed\n", __func__); 220 * Now that RTC time has been updated, let's clear oscillator
221 * failure flag, if needed.
222 */
223 ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
224 ISL12057_REG_SR_OSF, 0);
225 if (ret < 0)
226 dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n",
227 __func__, ret);
228
229out:
230 mutex_unlock(&data->lock);
189 231
190 return ret; 232 return ret;
191} 233}
@@ -203,15 +245,8 @@ static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
203 ret = regmap_update_bits(regmap, ISL12057_REG_INT, 245 ret = regmap_update_bits(regmap, ISL12057_REG_INT,
204 ISL12057_REG_INT_EOSC, 0); 246 ISL12057_REG_INT_EOSC, 0);
205 if (ret < 0) { 247 if (ret < 0) {
206 dev_err(dev, "Unable to enable oscillator\n"); 248 dev_err(dev, "%s: unable to enable oscillator (%d)\n",
207 return ret; 249 __func__, ret);
208 }
209
210 /* Clear oscillator failure bit if needed */
211 ret = regmap_update_bits(regmap, ISL12057_REG_SR,
212 ISL12057_REG_SR_OSF, 0);
213 if (ret < 0) {
214 dev_err(dev, "Unable to clear oscillator failure bit\n");
215 return ret; 250 return ret;
216 } 251 }
217 252
@@ -219,7 +254,8 @@ static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
219 ret = regmap_update_bits(regmap, ISL12057_REG_SR, 254 ret = regmap_update_bits(regmap, ISL12057_REG_SR,
220 ISL12057_REG_SR_A1F, 0); 255 ISL12057_REG_SR_A1F, 0);
221 if (ret < 0) { 256 if (ret < 0) {
222 dev_err(dev, "Unable to clear alarm bit\n"); 257 dev_err(dev, "%s: unable to clear alarm bit (%d)\n",
258 __func__, ret);
223 return ret; 259 return ret;
224 } 260 }
225 261
@@ -253,7 +289,8 @@ static int isl12057_probe(struct i2c_client *client,
253 regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config); 289 regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
254 if (IS_ERR(regmap)) { 290 if (IS_ERR(regmap)) {
255 ret = PTR_ERR(regmap); 291 ret = PTR_ERR(regmap);
256 dev_err(dev, "regmap allocation failed: %d\n", ret); 292 dev_err(dev, "%s: regmap allocation failed (%d)\n",
293 __func__, ret);
257 return ret; 294 return ret;
258 } 295 }
259 296
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index c4cf05731118..e6bfb9c42a10 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -45,16 +45,20 @@ int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
45} 45}
46EXPORT_SYMBOL(rtc_year_days); 46EXPORT_SYMBOL(rtc_year_days);
47 47
48
48/* 49/*
50 * rtc_time_to_tm64 - Converts time64_t to rtc_time.
49 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date. 51 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
50 */ 52 */
51void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) 53void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
52{ 54{
53 unsigned int month, year; 55 unsigned int month, year;
56 unsigned long secs;
54 int days; 57 int days;
55 58
56 days = time / 86400; 59 /* time must be positive */
57 time -= (unsigned int) days * 86400; 60 days = div_s64(time, 86400);
61 secs = time - (unsigned int) days * 86400;
58 62
59 /* day of the week, 1970-01-01 was a Thursday */ 63 /* day of the week, 1970-01-01 was a Thursday */
60 tm->tm_wday = (days + 4) % 7; 64 tm->tm_wday = (days + 4) % 7;
@@ -81,14 +85,14 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
81 tm->tm_mon = month; 85 tm->tm_mon = month;
82 tm->tm_mday = days + 1; 86 tm->tm_mday = days + 1;
83 87
84 tm->tm_hour = time / 3600; 88 tm->tm_hour = secs / 3600;
85 time -= tm->tm_hour * 3600; 89 secs -= tm->tm_hour * 3600;
86 tm->tm_min = time / 60; 90 tm->tm_min = secs / 60;
87 tm->tm_sec = time - tm->tm_min * 60; 91 tm->tm_sec = secs - tm->tm_min * 60;
88 92
89 tm->tm_isdst = 0; 93 tm->tm_isdst = 0;
90} 94}
91EXPORT_SYMBOL(rtc_time_to_tm); 95EXPORT_SYMBOL(rtc_time64_to_tm);
92 96
93/* 97/*
94 * Does the rtc_time represent a valid date/time? 98 * Does the rtc_time represent a valid date/time?
@@ -109,24 +113,22 @@ int rtc_valid_tm(struct rtc_time *tm)
109EXPORT_SYMBOL(rtc_valid_tm); 113EXPORT_SYMBOL(rtc_valid_tm);
110 114
111/* 115/*
116 * rtc_tm_to_time64 - Converts rtc_time to time64_t.
112 * Convert Gregorian date to seconds since 01-01-1970 00:00:00. 117 * Convert Gregorian date to seconds since 01-01-1970 00:00:00.
113 */ 118 */
114int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) 119time64_t rtc_tm_to_time64(struct rtc_time *tm)
115{ 120{
116 *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, 121 return mktime64(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
117 tm->tm_hour, tm->tm_min, tm->tm_sec); 122 tm->tm_hour, tm->tm_min, tm->tm_sec);
118 return 0;
119} 123}
120EXPORT_SYMBOL(rtc_tm_to_time); 124EXPORT_SYMBOL(rtc_tm_to_time64);
121 125
122/* 126/*
123 * Convert rtc_time to ktime 127 * Convert rtc_time to ktime
124 */ 128 */
125ktime_t rtc_tm_to_ktime(struct rtc_time tm) 129ktime_t rtc_tm_to_ktime(struct rtc_time tm)
126{ 130{
127 time_t time; 131 return ktime_set(rtc_tm_to_time64(&tm), 0);
128 rtc_tm_to_time(&tm, &time);
129 return ktime_set(time, 0);
130} 132}
131EXPORT_SYMBOL_GPL(rtc_tm_to_ktime); 133EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
132 134
@@ -135,14 +137,14 @@ EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
135 */ 137 */
136struct rtc_time rtc_ktime_to_tm(ktime_t kt) 138struct rtc_time rtc_ktime_to_tm(ktime_t kt)
137{ 139{
138 struct timespec ts; 140 struct timespec64 ts;
139 struct rtc_time ret; 141 struct rtc_time ret;
140 142
141 ts = ktime_to_timespec(kt); 143 ts = ktime_to_timespec64(kt);
142 /* Round up any ns */ 144 /* Round up any ns */
143 if (ts.tv_nsec) 145 if (ts.tv_nsec)
144 ts.tv_sec++; 146 ts.tv_sec++;
145 rtc_time_to_tm(ts.tv_sec, &ret); 147 rtc_time64_to_tm(ts.tv_sec, &ret);
146 return ret; 148 return ret;
147} 149}
148EXPORT_SYMBOL_GPL(rtc_ktime_to_tm); 150EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 21142e6574a9..4f1c6ca97211 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * TI OMAP1 Real Time Clock interface for Linux 2 * TI OMAP Real Time Clock interface for Linux
3 * 3 *
4 * Copyright (C) 2003 MontaVista Software, Inc. 4 * Copyright (C) 2003 MontaVista Software, Inc.
5 * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com> 5 * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
6 * 6 *
7 * Copyright (C) 2006 David Brownell (new RTC framework) 7 * Copyright (C) 2006 David Brownell (new RTC framework)
8 * Copyright (C) 2014 Johan Hovold <johan@kernel.org>
8 * 9 *
9 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
@@ -25,7 +26,8 @@
25#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
26#include <linux/io.h> 27#include <linux/io.h>
27 28
28/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock 29/*
30 * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
29 * with century-range alarm matching, driven by the 32kHz clock. 31 * with century-range alarm matching, driven by the 32kHz clock.
30 * 32 *
31 * The main user-visible ways it differs from PC RTCs are by omitting 33 * The main user-visible ways it differs from PC RTCs are by omitting
@@ -39,10 +41,6 @@
39 * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment. 41 * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
40 */ 42 */
41 43
42#define DRIVER_NAME "omap_rtc"
43
44#define OMAP_RTC_BASE 0xfffb4800
45
46/* RTC registers */ 44/* RTC registers */
47#define OMAP_RTC_SECONDS_REG 0x00 45#define OMAP_RTC_SECONDS_REG 0x00
48#define OMAP_RTC_MINUTES_REG 0x04 46#define OMAP_RTC_MINUTES_REG 0x04
@@ -72,6 +70,15 @@
72 70
73#define OMAP_RTC_IRQWAKEEN 0x7c 71#define OMAP_RTC_IRQWAKEEN 0x7c
74 72
73#define OMAP_RTC_ALARM2_SECONDS_REG 0x80
74#define OMAP_RTC_ALARM2_MINUTES_REG 0x84
75#define OMAP_RTC_ALARM2_HOURS_REG 0x88
76#define OMAP_RTC_ALARM2_DAYS_REG 0x8c
77#define OMAP_RTC_ALARM2_MONTHS_REG 0x90
78#define OMAP_RTC_ALARM2_YEARS_REG 0x94
79
80#define OMAP_RTC_PMIC_REG 0x98
81
75/* OMAP_RTC_CTRL_REG bit fields: */ 82/* OMAP_RTC_CTRL_REG bit fields: */
76#define OMAP_RTC_CTRL_SPLIT BIT(7) 83#define OMAP_RTC_CTRL_SPLIT BIT(7)
77#define OMAP_RTC_CTRL_DISABLE BIT(6) 84#define OMAP_RTC_CTRL_DISABLE BIT(6)
@@ -84,6 +91,7 @@
84 91
85/* OMAP_RTC_STATUS_REG bit fields: */ 92/* OMAP_RTC_STATUS_REG bit fields: */
86#define OMAP_RTC_STATUS_POWER_UP BIT(7) 93#define OMAP_RTC_STATUS_POWER_UP BIT(7)
94#define OMAP_RTC_STATUS_ALARM2 BIT(7)
87#define OMAP_RTC_STATUS_ALARM BIT(6) 95#define OMAP_RTC_STATUS_ALARM BIT(6)
88#define OMAP_RTC_STATUS_1D_EVENT BIT(5) 96#define OMAP_RTC_STATUS_1D_EVENT BIT(5)
89#define OMAP_RTC_STATUS_1H_EVENT BIT(4) 97#define OMAP_RTC_STATUS_1H_EVENT BIT(4)
@@ -93,6 +101,7 @@
93#define OMAP_RTC_STATUS_BUSY BIT(0) 101#define OMAP_RTC_STATUS_BUSY BIT(0)
94 102
95/* OMAP_RTC_INTERRUPTS_REG bit fields: */ 103/* OMAP_RTC_INTERRUPTS_REG bit fields: */
104#define OMAP_RTC_INTERRUPTS_IT_ALARM2 BIT(4)
96#define OMAP_RTC_INTERRUPTS_IT_ALARM BIT(3) 105#define OMAP_RTC_INTERRUPTS_IT_ALARM BIT(3)
97#define OMAP_RTC_INTERRUPTS_IT_TIMER BIT(2) 106#define OMAP_RTC_INTERRUPTS_IT_TIMER BIT(2)
98 107
@@ -102,61 +111,82 @@
102/* OMAP_RTC_IRQWAKEEN bit fields: */ 111/* OMAP_RTC_IRQWAKEEN bit fields: */
103#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) 112#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
104 113
114/* OMAP_RTC_PMIC bit fields: */
115#define OMAP_RTC_PMIC_POWER_EN_EN BIT(16)
116
105/* OMAP_RTC_KICKER values */ 117/* OMAP_RTC_KICKER values */
106#define KICK0_VALUE 0x83e70b13 118#define KICK0_VALUE 0x83e70b13
107#define KICK1_VALUE 0x95a4f1e0 119#define KICK1_VALUE 0x95a4f1e0
108 120
109#define OMAP_RTC_HAS_KICKER BIT(0) 121struct omap_rtc_device_type {
110 122 bool has_32kclk_en;
111/* 123 bool has_kicker;
112 * Few RTC IP revisions has special WAKE-EN Register to enable Wakeup 124 bool has_irqwakeen;
113 * generation for event Alarm. 125 bool has_pmic_mode;
114 */ 126 bool has_power_up_reset;
115#define OMAP_RTC_HAS_IRQWAKEEN BIT(1) 127};
116 128
117/* 129struct omap_rtc {
118 * Some RTC IP revisions (like those in AM335x and DRA7x) need 130 struct rtc_device *rtc;
119 * the 32KHz clock to be explicitly enabled. 131 void __iomem *base;
120 */ 132 int irq_alarm;
121#define OMAP_RTC_HAS_32KCLK_EN BIT(2) 133 int irq_timer;
134 u8 interrupts_reg;
135 bool is_pmic_controller;
136 const struct omap_rtc_device_type *type;
137};
122 138
123static void __iomem *rtc_base; 139static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg)
140{
141 return readb(rtc->base + reg);
142}
124 143
125#define rtc_read(addr) readb(rtc_base + (addr)) 144static inline u32 rtc_readl(struct omap_rtc *rtc, unsigned int reg)
126#define rtc_write(val, addr) writeb(val, rtc_base + (addr)) 145{
146 return readl(rtc->base + reg);
147}
127 148
128#define rtc_writel(val, addr) writel(val, rtc_base + (addr)) 149static inline void rtc_write(struct omap_rtc *rtc, unsigned int reg, u8 val)
150{
151 writeb(val, rtc->base + reg);
152}
129 153
154static inline void rtc_writel(struct omap_rtc *rtc, unsigned int reg, u32 val)
155{
156 writel(val, rtc->base + reg);
157}
130 158
131/* we rely on the rtc framework to handle locking (rtc->ops_lock), 159/*
160 * We rely on the rtc framework to handle locking (rtc->ops_lock),
132 * so the only other requirement is that register accesses which 161 * so the only other requirement is that register accesses which
133 * require BUSY to be clear are made with IRQs locally disabled 162 * require BUSY to be clear are made with IRQs locally disabled
134 */ 163 */
135static void rtc_wait_not_busy(void) 164static void rtc_wait_not_busy(struct omap_rtc *rtc)
136{ 165{
137 int count = 0; 166 int count;
138 u8 status; 167 u8 status;
139 168
140 /* BUSY may stay active for 1/32768 second (~30 usec) */ 169 /* BUSY may stay active for 1/32768 second (~30 usec) */
141 for (count = 0; count < 50; count++) { 170 for (count = 0; count < 50; count++) {
142 status = rtc_read(OMAP_RTC_STATUS_REG); 171 status = rtc_read(rtc, OMAP_RTC_STATUS_REG);
143 if ((status & (u8)OMAP_RTC_STATUS_BUSY) == 0) 172 if (!(status & OMAP_RTC_STATUS_BUSY))
144 break; 173 break;
145 udelay(1); 174 udelay(1);
146 } 175 }
147 /* now we have ~15 usec to read/write various registers */ 176 /* now we have ~15 usec to read/write various registers */
148} 177}
149 178
150static irqreturn_t rtc_irq(int irq, void *rtc) 179static irqreturn_t rtc_irq(int irq, void *dev_id)
151{ 180{
152 unsigned long events = 0; 181 struct omap_rtc *rtc = dev_id;
153 u8 irq_data; 182 unsigned long events = 0;
183 u8 irq_data;
154 184
155 irq_data = rtc_read(OMAP_RTC_STATUS_REG); 185 irq_data = rtc_read(rtc, OMAP_RTC_STATUS_REG);
156 186
157 /* alarm irq? */ 187 /* alarm irq? */
158 if (irq_data & OMAP_RTC_STATUS_ALARM) { 188 if (irq_data & OMAP_RTC_STATUS_ALARM) {
159 rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG); 189 rtc_write(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM);
160 events |= RTC_IRQF | RTC_AF; 190 events |= RTC_IRQF | RTC_AF;
161 } 191 }
162 192
@@ -164,23 +194,21 @@ static irqreturn_t rtc_irq(int irq, void *rtc)
164 if (irq_data & OMAP_RTC_STATUS_1S_EVENT) 194 if (irq_data & OMAP_RTC_STATUS_1S_EVENT)
165 events |= RTC_IRQF | RTC_UF; 195 events |= RTC_IRQF | RTC_UF;
166 196
167 rtc_update_irq(rtc, 1, events); 197 rtc_update_irq(rtc->rtc, 1, events);
168 198
169 return IRQ_HANDLED; 199 return IRQ_HANDLED;
170} 200}
171 201
172static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) 202static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
173{ 203{
204 struct omap_rtc *rtc = dev_get_drvdata(dev);
174 u8 reg, irqwake_reg = 0; 205 u8 reg, irqwake_reg = 0;
175 struct platform_device *pdev = to_platform_device(dev);
176 const struct platform_device_id *id_entry =
177 platform_get_device_id(pdev);
178 206
179 local_irq_disable(); 207 local_irq_disable();
180 rtc_wait_not_busy(); 208 rtc_wait_not_busy(rtc);
181 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); 209 reg = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
182 if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) 210 if (rtc->type->has_irqwakeen)
183 irqwake_reg = rtc_read(OMAP_RTC_IRQWAKEEN); 211 irqwake_reg = rtc_read(rtc, OMAP_RTC_IRQWAKEEN);
184 212
185 if (enabled) { 213 if (enabled) {
186 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; 214 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
@@ -189,10 +217,10 @@ static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
189 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; 217 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
190 irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN; 218 irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
191 } 219 }
192 rtc_wait_not_busy(); 220 rtc_wait_not_busy(rtc);
193 rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); 221 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
194 if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) 222 if (rtc->type->has_irqwakeen)
195 rtc_write(irqwake_reg, OMAP_RTC_IRQWAKEEN); 223 rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
196 local_irq_enable(); 224 local_irq_enable();
197 225
198 return 0; 226 return 0;
@@ -230,39 +258,47 @@ static void bcd2tm(struct rtc_time *tm)
230 tm->tm_year = bcd2bin(tm->tm_year) + 100; 258 tm->tm_year = bcd2bin(tm->tm_year) + 100;
231} 259}
232 260
261static void omap_rtc_read_time_raw(struct omap_rtc *rtc, struct rtc_time *tm)
262{
263 tm->tm_sec = rtc_read(rtc, OMAP_RTC_SECONDS_REG);
264 tm->tm_min = rtc_read(rtc, OMAP_RTC_MINUTES_REG);
265 tm->tm_hour = rtc_read(rtc, OMAP_RTC_HOURS_REG);
266 tm->tm_mday = rtc_read(rtc, OMAP_RTC_DAYS_REG);
267 tm->tm_mon = rtc_read(rtc, OMAP_RTC_MONTHS_REG);
268 tm->tm_year = rtc_read(rtc, OMAP_RTC_YEARS_REG);
269}
233 270
234static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm) 271static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm)
235{ 272{
273 struct omap_rtc *rtc = dev_get_drvdata(dev);
274
236 /* we don't report wday/yday/isdst ... */ 275 /* we don't report wday/yday/isdst ... */
237 local_irq_disable(); 276 local_irq_disable();
238 rtc_wait_not_busy(); 277 rtc_wait_not_busy(rtc);
239 278 omap_rtc_read_time_raw(rtc, tm);
240 tm->tm_sec = rtc_read(OMAP_RTC_SECONDS_REG);
241 tm->tm_min = rtc_read(OMAP_RTC_MINUTES_REG);
242 tm->tm_hour = rtc_read(OMAP_RTC_HOURS_REG);
243 tm->tm_mday = rtc_read(OMAP_RTC_DAYS_REG);
244 tm->tm_mon = rtc_read(OMAP_RTC_MONTHS_REG);
245 tm->tm_year = rtc_read(OMAP_RTC_YEARS_REG);
246
247 local_irq_enable(); 279 local_irq_enable();
248 280
249 bcd2tm(tm); 281 bcd2tm(tm);
282
250 return 0; 283 return 0;
251} 284}
252 285
253static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm) 286static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
254{ 287{
288 struct omap_rtc *rtc = dev_get_drvdata(dev);
289
255 if (tm2bcd(tm) < 0) 290 if (tm2bcd(tm) < 0)
256 return -EINVAL; 291 return -EINVAL;
292
257 local_irq_disable(); 293 local_irq_disable();
258 rtc_wait_not_busy(); 294 rtc_wait_not_busy(rtc);
259 295
260 rtc_write(tm->tm_year, OMAP_RTC_YEARS_REG); 296 rtc_write(rtc, OMAP_RTC_YEARS_REG, tm->tm_year);
261 rtc_write(tm->tm_mon, OMAP_RTC_MONTHS_REG); 297 rtc_write(rtc, OMAP_RTC_MONTHS_REG, tm->tm_mon);
262 rtc_write(tm->tm_mday, OMAP_RTC_DAYS_REG); 298 rtc_write(rtc, OMAP_RTC_DAYS_REG, tm->tm_mday);
263 rtc_write(tm->tm_hour, OMAP_RTC_HOURS_REG); 299 rtc_write(rtc, OMAP_RTC_HOURS_REG, tm->tm_hour);
264 rtc_write(tm->tm_min, OMAP_RTC_MINUTES_REG); 300 rtc_write(rtc, OMAP_RTC_MINUTES_REG, tm->tm_min);
265 rtc_write(tm->tm_sec, OMAP_RTC_SECONDS_REG); 301 rtc_write(rtc, OMAP_RTC_SECONDS_REG, tm->tm_sec);
266 302
267 local_irq_enable(); 303 local_irq_enable();
268 304
@@ -271,48 +307,50 @@ static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
271 307
272static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) 308static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
273{ 309{
310 struct omap_rtc *rtc = dev_get_drvdata(dev);
311 u8 interrupts;
312
274 local_irq_disable(); 313 local_irq_disable();
275 rtc_wait_not_busy(); 314 rtc_wait_not_busy(rtc);
276 315
277 alm->time.tm_sec = rtc_read(OMAP_RTC_ALARM_SECONDS_REG); 316 alm->time.tm_sec = rtc_read(rtc, OMAP_RTC_ALARM_SECONDS_REG);
278 alm->time.tm_min = rtc_read(OMAP_RTC_ALARM_MINUTES_REG); 317 alm->time.tm_min = rtc_read(rtc, OMAP_RTC_ALARM_MINUTES_REG);
279 alm->time.tm_hour = rtc_read(OMAP_RTC_ALARM_HOURS_REG); 318 alm->time.tm_hour = rtc_read(rtc, OMAP_RTC_ALARM_HOURS_REG);
280 alm->time.tm_mday = rtc_read(OMAP_RTC_ALARM_DAYS_REG); 319 alm->time.tm_mday = rtc_read(rtc, OMAP_RTC_ALARM_DAYS_REG);
281 alm->time.tm_mon = rtc_read(OMAP_RTC_ALARM_MONTHS_REG); 320 alm->time.tm_mon = rtc_read(rtc, OMAP_RTC_ALARM_MONTHS_REG);
282 alm->time.tm_year = rtc_read(OMAP_RTC_ALARM_YEARS_REG); 321 alm->time.tm_year = rtc_read(rtc, OMAP_RTC_ALARM_YEARS_REG);
283 322
284 local_irq_enable(); 323 local_irq_enable();
285 324
286 bcd2tm(&alm->time); 325 bcd2tm(&alm->time);
287 alm->enabled = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG) 326
288 & OMAP_RTC_INTERRUPTS_IT_ALARM); 327 interrupts = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
328 alm->enabled = !!(interrupts & OMAP_RTC_INTERRUPTS_IT_ALARM);
289 329
290 return 0; 330 return 0;
291} 331}
292 332
293static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) 333static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
294{ 334{
335 struct omap_rtc *rtc = dev_get_drvdata(dev);
295 u8 reg, irqwake_reg = 0; 336 u8 reg, irqwake_reg = 0;
296 struct platform_device *pdev = to_platform_device(dev);
297 const struct platform_device_id *id_entry =
298 platform_get_device_id(pdev);
299 337
300 if (tm2bcd(&alm->time) < 0) 338 if (tm2bcd(&alm->time) < 0)
301 return -EINVAL; 339 return -EINVAL;
302 340
303 local_irq_disable(); 341 local_irq_disable();
304 rtc_wait_not_busy(); 342 rtc_wait_not_busy(rtc);
305 343
306 rtc_write(alm->time.tm_year, OMAP_RTC_ALARM_YEARS_REG); 344 rtc_write(rtc, OMAP_RTC_ALARM_YEARS_REG, alm->time.tm_year);
307 rtc_write(alm->time.tm_mon, OMAP_RTC_ALARM_MONTHS_REG); 345 rtc_write(rtc, OMAP_RTC_ALARM_MONTHS_REG, alm->time.tm_mon);
308 rtc_write(alm->time.tm_mday, OMAP_RTC_ALARM_DAYS_REG); 346 rtc_write(rtc, OMAP_RTC_ALARM_DAYS_REG, alm->time.tm_mday);
309 rtc_write(alm->time.tm_hour, OMAP_RTC_ALARM_HOURS_REG); 347 rtc_write(rtc, OMAP_RTC_ALARM_HOURS_REG, alm->time.tm_hour);
310 rtc_write(alm->time.tm_min, OMAP_RTC_ALARM_MINUTES_REG); 348 rtc_write(rtc, OMAP_RTC_ALARM_MINUTES_REG, alm->time.tm_min);
311 rtc_write(alm->time.tm_sec, OMAP_RTC_ALARM_SECONDS_REG); 349 rtc_write(rtc, OMAP_RTC_ALARM_SECONDS_REG, alm->time.tm_sec);
312 350
313 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); 351 reg = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
314 if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) 352 if (rtc->type->has_irqwakeen)
315 irqwake_reg = rtc_read(OMAP_RTC_IRQWAKEEN); 353 irqwake_reg = rtc_read(rtc, OMAP_RTC_IRQWAKEEN);
316 354
317 if (alm->enabled) { 355 if (alm->enabled) {
318 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; 356 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
@@ -321,15 +359,79 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
321 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; 359 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
322 irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN; 360 irqwake_reg &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
323 } 361 }
324 rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); 362 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, reg);
325 if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) 363 if (rtc->type->has_irqwakeen)
326 rtc_write(irqwake_reg, OMAP_RTC_IRQWAKEEN); 364 rtc_write(rtc, OMAP_RTC_IRQWAKEEN, irqwake_reg);
327 365
328 local_irq_enable(); 366 local_irq_enable();
329 367
330 return 0; 368 return 0;
331} 369}
332 370
371static struct omap_rtc *omap_rtc_power_off_rtc;
372
373/*
374 * omap_rtc_poweroff: RTC-controlled power off
375 *
376 * The RTC can be used to control an external PMIC via the pmic_power_en pin,
377 * which can be configured to transition to OFF on ALARM2 events.
378 *
379 * Notes:
380 * The two-second alarm offset is the shortest offset possible as the alarm
381 * registers must be set before the next timer update and the offset
382 * calculation is too heavy for everything to be done within a single access
383 * period (~15 us).
384 *
385 * Called with local interrupts disabled.
386 */
387static void omap_rtc_power_off(void)
388{
389 struct omap_rtc *rtc = omap_rtc_power_off_rtc;
390 struct rtc_time tm;
391 unsigned long now;
392 u32 val;
393
394 /* enable pmic_power_en control */
395 val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
396 rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN);
397
398 /* set alarm two seconds from now */
399 omap_rtc_read_time_raw(rtc, &tm);
400 bcd2tm(&tm);
401 rtc_tm_to_time(&tm, &now);
402 rtc_time_to_tm(now + 2, &tm);
403
404 if (tm2bcd(&tm) < 0) {
405 dev_err(&rtc->rtc->dev, "power off failed\n");
406 return;
407 }
408
409 rtc_wait_not_busy(rtc);
410
411 rtc_write(rtc, OMAP_RTC_ALARM2_SECONDS_REG, tm.tm_sec);
412 rtc_write(rtc, OMAP_RTC_ALARM2_MINUTES_REG, tm.tm_min);
413 rtc_write(rtc, OMAP_RTC_ALARM2_HOURS_REG, tm.tm_hour);
414 rtc_write(rtc, OMAP_RTC_ALARM2_DAYS_REG, tm.tm_mday);
415 rtc_write(rtc, OMAP_RTC_ALARM2_MONTHS_REG, tm.tm_mon);
416 rtc_write(rtc, OMAP_RTC_ALARM2_YEARS_REG, tm.tm_year);
417
418 /*
419 * enable ALARM2 interrupt
420 *
421 * NOTE: this fails on AM3352 if rtc_write (writeb) is used
422 */
423 val = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
424 rtc_writel(rtc, OMAP_RTC_INTERRUPTS_REG,
425 val | OMAP_RTC_INTERRUPTS_IT_ALARM2);
426
427 /*
428 * Wait for alarm to trigger (within two seconds) and external PMIC to
429 * power off the system. Add a 500 ms margin for external latencies
430 * (e.g. debounce circuits).
431 */
432 mdelay(2500);
433}
434
333static struct rtc_class_ops omap_rtc_ops = { 435static struct rtc_class_ops omap_rtc_ops = {
334 .read_time = omap_rtc_read_time, 436 .read_time = omap_rtc_read_time,
335 .set_time = omap_rtc_set_time, 437 .set_time = omap_rtc_set_time,
@@ -338,137 +440,140 @@ static struct rtc_class_ops omap_rtc_ops = {
338 .alarm_irq_enable = omap_rtc_alarm_irq_enable, 440 .alarm_irq_enable = omap_rtc_alarm_irq_enable,
339}; 441};
340 442
341static int omap_rtc_alarm; 443static const struct omap_rtc_device_type omap_rtc_default_type = {
342static int omap_rtc_timer; 444 .has_power_up_reset = true,
445};
343 446
344#define OMAP_RTC_DATA_AM3352_IDX 1 447static const struct omap_rtc_device_type omap_rtc_am3352_type = {
345#define OMAP_RTC_DATA_DA830_IDX 2 448 .has_32kclk_en = true,
449 .has_kicker = true,
450 .has_irqwakeen = true,
451 .has_pmic_mode = true,
452};
346 453
347static struct platform_device_id omap_rtc_devtype[] = { 454static const struct omap_rtc_device_type omap_rtc_da830_type = {
455 .has_kicker = true,
456};
457
458static const struct platform_device_id omap_rtc_id_table[] = {
348 { 459 {
349 .name = DRIVER_NAME, 460 .name = "omap_rtc",
350 }, 461 .driver_data = (kernel_ulong_t)&omap_rtc_default_type,
351 [OMAP_RTC_DATA_AM3352_IDX] = { 462 }, {
352 .name = "am3352-rtc", 463 .name = "am3352-rtc",
353 .driver_data = OMAP_RTC_HAS_KICKER | OMAP_RTC_HAS_IRQWAKEEN | 464 .driver_data = (kernel_ulong_t)&omap_rtc_am3352_type,
354 OMAP_RTC_HAS_32KCLK_EN, 465 }, {
355 },
356 [OMAP_RTC_DATA_DA830_IDX] = {
357 .name = "da830-rtc", 466 .name = "da830-rtc",
358 .driver_data = OMAP_RTC_HAS_KICKER, 467 .driver_data = (kernel_ulong_t)&omap_rtc_da830_type,
359 }, 468 }, {
360 {}, 469 /* sentinel */
470 }
361}; 471};
362MODULE_DEVICE_TABLE(platform, omap_rtc_devtype); 472MODULE_DEVICE_TABLE(platform, omap_rtc_id_table);
363 473
364static const struct of_device_id omap_rtc_of_match[] = { 474static const struct of_device_id omap_rtc_of_match[] = {
365 { .compatible = "ti,da830-rtc", 475 {
366 .data = &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX], 476 .compatible = "ti,am3352-rtc",
367 }, 477 .data = &omap_rtc_am3352_type,
368 { .compatible = "ti,am3352-rtc", 478 }, {
369 .data = &omap_rtc_devtype[OMAP_RTC_DATA_AM3352_IDX], 479 .compatible = "ti,da830-rtc",
370 }, 480 .data = &omap_rtc_da830_type,
371 {}, 481 }, {
482 /* sentinel */
483 }
372}; 484};
373MODULE_DEVICE_TABLE(of, omap_rtc_of_match); 485MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
374 486
375static int __init omap_rtc_probe(struct platform_device *pdev) 487static int __init omap_rtc_probe(struct platform_device *pdev)
376{ 488{
377 struct resource *res; 489 struct omap_rtc *rtc;
378 struct rtc_device *rtc; 490 struct resource *res;
379 u8 reg, new_ctrl; 491 u8 reg, mask, new_ctrl;
380 const struct platform_device_id *id_entry; 492 const struct platform_device_id *id_entry;
381 const struct of_device_id *of_id; 493 const struct of_device_id *of_id;
494 int ret;
382 495
383 of_id = of_match_device(omap_rtc_of_match, &pdev->dev); 496 rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
384 if (of_id) 497 if (!rtc)
385 pdev->id_entry = of_id->data; 498 return -ENOMEM;
386 499
387 id_entry = platform_get_device_id(pdev); 500 of_id = of_match_device(omap_rtc_of_match, &pdev->dev);
388 if (!id_entry) { 501 if (of_id) {
389 dev_err(&pdev->dev, "no matching device entry\n"); 502 rtc->type = of_id->data;
390 return -ENODEV; 503 rtc->is_pmic_controller = rtc->type->has_pmic_mode &&
504 of_property_read_bool(pdev->dev.of_node,
505 "system-power-controller");
506 } else {
507 id_entry = platform_get_device_id(pdev);
508 rtc->type = (void *)id_entry->driver_data;
391 } 509 }
392 510
393 omap_rtc_timer = platform_get_irq(pdev, 0); 511 rtc->irq_timer = platform_get_irq(pdev, 0);
394 if (omap_rtc_timer <= 0) { 512 if (rtc->irq_timer <= 0)
395 pr_debug("%s: no update irq?\n", pdev->name);
396 return -ENOENT; 513 return -ENOENT;
397 }
398 514
399 omap_rtc_alarm = platform_get_irq(pdev, 1); 515 rtc->irq_alarm = platform_get_irq(pdev, 1);
400 if (omap_rtc_alarm <= 0) { 516 if (rtc->irq_alarm <= 0)
401 pr_debug("%s: no alarm irq?\n", pdev->name);
402 return -ENOENT; 517 return -ENOENT;
403 }
404 518
405 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 519 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
406 rtc_base = devm_ioremap_resource(&pdev->dev, res); 520 rtc->base = devm_ioremap_resource(&pdev->dev, res);
407 if (IS_ERR(rtc_base)) 521 if (IS_ERR(rtc->base))
408 return PTR_ERR(rtc_base); 522 return PTR_ERR(rtc->base);
523
524 platform_set_drvdata(pdev, rtc);
409 525
410 /* Enable the clock/module so that we can access the registers */ 526 /* Enable the clock/module so that we can access the registers */
411 pm_runtime_enable(&pdev->dev); 527 pm_runtime_enable(&pdev->dev);
412 pm_runtime_get_sync(&pdev->dev); 528 pm_runtime_get_sync(&pdev->dev);
413 529
414 if (id_entry->driver_data & OMAP_RTC_HAS_KICKER) { 530 if (rtc->type->has_kicker) {
415 rtc_writel(KICK0_VALUE, OMAP_RTC_KICK0_REG); 531 rtc_writel(rtc, OMAP_RTC_KICK0_REG, KICK0_VALUE);
416 rtc_writel(KICK1_VALUE, OMAP_RTC_KICK1_REG); 532 rtc_writel(rtc, OMAP_RTC_KICK1_REG, KICK1_VALUE);
417 }
418
419 rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
420 &omap_rtc_ops, THIS_MODULE);
421 if (IS_ERR(rtc)) {
422 pr_debug("%s: can't register RTC device, err %ld\n",
423 pdev->name, PTR_ERR(rtc));
424 goto fail0;
425 } 533 }
426 platform_set_drvdata(pdev, rtc);
427 534
428 /* clear pending irqs, and set 1/second periodic, 535 /*
429 * which we'll use instead of update irqs 536 * disable interrupts
537 *
538 * NOTE: ALARM2 is not cleared on AM3352 if rtc_write (writeb) is used
430 */ 539 */
431 rtc_write(0, OMAP_RTC_INTERRUPTS_REG); 540 rtc_writel(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
432 541
433 /* enable RTC functional clock */ 542 /* enable RTC functional clock */
434 if (id_entry->driver_data & OMAP_RTC_HAS_32KCLK_EN) 543 if (rtc->type->has_32kclk_en) {
435 rtc_writel(OMAP_RTC_OSC_32KCLK_EN, OMAP_RTC_OSC_REG); 544 reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
545 rtc_writel(rtc, OMAP_RTC_OSC_REG,
546 reg | OMAP_RTC_OSC_32KCLK_EN);
547 }
436 548
437 /* clear old status */ 549 /* clear old status */
438 reg = rtc_read(OMAP_RTC_STATUS_REG); 550 reg = rtc_read(rtc, OMAP_RTC_STATUS_REG);
439 if (reg & (u8) OMAP_RTC_STATUS_POWER_UP) {
440 pr_info("%s: RTC power up reset detected\n",
441 pdev->name);
442 rtc_write(OMAP_RTC_STATUS_POWER_UP, OMAP_RTC_STATUS_REG);
443 }
444 if (reg & (u8) OMAP_RTC_STATUS_ALARM)
445 rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
446 551
447 /* handle periodic and alarm irqs */ 552 mask = OMAP_RTC_STATUS_ALARM;
448 if (devm_request_irq(&pdev->dev, omap_rtc_timer, rtc_irq, 0, 553
449 dev_name(&rtc->dev), rtc)) { 554 if (rtc->type->has_pmic_mode)
450 pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n", 555 mask |= OMAP_RTC_STATUS_ALARM2;
451 pdev->name, omap_rtc_timer); 556
452 goto fail0; 557 if (rtc->type->has_power_up_reset) {
453 } 558 mask |= OMAP_RTC_STATUS_POWER_UP;
454 if ((omap_rtc_timer != omap_rtc_alarm) && 559 if (reg & OMAP_RTC_STATUS_POWER_UP)
455 (devm_request_irq(&pdev->dev, omap_rtc_alarm, rtc_irq, 0, 560 dev_info(&pdev->dev, "RTC power up reset detected\n");
456 dev_name(&rtc->dev), rtc))) {
457 pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
458 pdev->name, omap_rtc_alarm);
459 goto fail0;
460 } 561 }
461 562
563 if (reg & mask)
564 rtc_write(rtc, OMAP_RTC_STATUS_REG, reg & mask);
565
462 /* On boards with split power, RTC_ON_NOFF won't reset the RTC */ 566 /* On boards with split power, RTC_ON_NOFF won't reset the RTC */
463 reg = rtc_read(OMAP_RTC_CTRL_REG); 567 reg = rtc_read(rtc, OMAP_RTC_CTRL_REG);
464 if (reg & (u8) OMAP_RTC_CTRL_STOP) 568 if (reg & OMAP_RTC_CTRL_STOP)
465 pr_info("%s: already running\n", pdev->name); 569 dev_info(&pdev->dev, "already running\n");
466 570
467 /* force to 24 hour mode */ 571 /* force to 24 hour mode */
468 new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); 572 new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT | OMAP_RTC_CTRL_AUTO_COMP);
469 new_ctrl |= OMAP_RTC_CTRL_STOP; 573 new_ctrl |= OMAP_RTC_CTRL_STOP;
470 574
471 /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: 575 /*
576 * BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
472 * 577 *
473 * - Device wake-up capability setting should come through chip 578 * - Device wake-up capability setting should come through chip
474 * init logic. OMAP1 boards should initialize the "wakeup capable" 579 * init logic. OMAP1 boards should initialize the "wakeup capable"
@@ -482,36 +587,70 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
482 * is write-only, and always reads as zero...) 587 * is write-only, and always reads as zero...)
483 */ 588 */
484 589
590 if (new_ctrl & OMAP_RTC_CTRL_SPLIT)
591 dev_info(&pdev->dev, "split power mode\n");
592
593 if (reg != new_ctrl)
594 rtc_write(rtc, OMAP_RTC_CTRL_REG, new_ctrl);
595
485 device_init_wakeup(&pdev->dev, true); 596 device_init_wakeup(&pdev->dev, true);
486 597
487 if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) 598 rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
488 pr_info("%s: split power mode\n", pdev->name); 599 &omap_rtc_ops, THIS_MODULE);
600 if (IS_ERR(rtc->rtc)) {
601 ret = PTR_ERR(rtc->rtc);
602 goto err;
603 }
489 604
490 if (reg != new_ctrl) 605 /* handle periodic and alarm irqs */
491 rtc_write(new_ctrl, OMAP_RTC_CTRL_REG); 606 ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
607 dev_name(&rtc->rtc->dev), rtc);
608 if (ret)
609 goto err;
610
611 if (rtc->irq_timer != rtc->irq_alarm) {
612 ret = devm_request_irq(&pdev->dev, rtc->irq_alarm, rtc_irq, 0,
613 dev_name(&rtc->rtc->dev), rtc);
614 if (ret)
615 goto err;
616 }
617
618 if (rtc->is_pmic_controller) {
619 if (!pm_power_off) {
620 omap_rtc_power_off_rtc = rtc;
621 pm_power_off = omap_rtc_power_off;
622 }
623 }
492 624
493 return 0; 625 return 0;
494 626
495fail0: 627err:
496 if (id_entry->driver_data & OMAP_RTC_HAS_KICKER) 628 device_init_wakeup(&pdev->dev, false);
497 rtc_writel(0, OMAP_RTC_KICK0_REG); 629 if (rtc->type->has_kicker)
630 rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
498 pm_runtime_put_sync(&pdev->dev); 631 pm_runtime_put_sync(&pdev->dev);
499 pm_runtime_disable(&pdev->dev); 632 pm_runtime_disable(&pdev->dev);
500 return -EIO; 633
634 return ret;
501} 635}
502 636
503static int __exit omap_rtc_remove(struct platform_device *pdev) 637static int __exit omap_rtc_remove(struct platform_device *pdev)
504{ 638{
505 const struct platform_device_id *id_entry = 639 struct omap_rtc *rtc = platform_get_drvdata(pdev);
506 platform_get_device_id(pdev); 640
641 if (pm_power_off == omap_rtc_power_off &&
642 omap_rtc_power_off_rtc == rtc) {
643 pm_power_off = NULL;
644 omap_rtc_power_off_rtc = NULL;
645 }
507 646
508 device_init_wakeup(&pdev->dev, 0); 647 device_init_wakeup(&pdev->dev, 0);
509 648
510 /* leave rtc running, but disable irqs */ 649 /* leave rtc running, but disable irqs */
511 rtc_write(0, OMAP_RTC_INTERRUPTS_REG); 650 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
512 651
513 if (id_entry->driver_data & OMAP_RTC_HAS_KICKER) 652 if (rtc->type->has_kicker)
514 rtc_writel(0, OMAP_RTC_KICK0_REG); 653 rtc_writel(rtc, OMAP_RTC_KICK0_REG, 0);
515 654
516 /* Disable the clock/module */ 655 /* Disable the clock/module */
517 pm_runtime_put_sync(&pdev->dev); 656 pm_runtime_put_sync(&pdev->dev);
@@ -521,20 +660,21 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
521} 660}
522 661
523#ifdef CONFIG_PM_SLEEP 662#ifdef CONFIG_PM_SLEEP
524static u8 irqstat;
525
526static int omap_rtc_suspend(struct device *dev) 663static int omap_rtc_suspend(struct device *dev)
527{ 664{
528 irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG); 665 struct omap_rtc *rtc = dev_get_drvdata(dev);
529 666
530 /* FIXME the RTC alarm is not currently acting as a wakeup event 667 rtc->interrupts_reg = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
668
669 /*
670 * FIXME: the RTC alarm is not currently acting as a wakeup event
531 * source on some platforms, and in fact this enable() call is just 671 * source on some platforms, and in fact this enable() call is just
532 * saving a flag that's never used... 672 * saving a flag that's never used...
533 */ 673 */
534 if (device_may_wakeup(dev)) 674 if (device_may_wakeup(dev))
535 enable_irq_wake(omap_rtc_alarm); 675 enable_irq_wake(rtc->irq_alarm);
536 else 676 else
537 rtc_write(0, OMAP_RTC_INTERRUPTS_REG); 677 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
538 678
539 /* Disable the clock/module */ 679 /* Disable the clock/module */
540 pm_runtime_put_sync(dev); 680 pm_runtime_put_sync(dev);
@@ -544,13 +684,15 @@ static int omap_rtc_suspend(struct device *dev)
544 684
545static int omap_rtc_resume(struct device *dev) 685static int omap_rtc_resume(struct device *dev)
546{ 686{
687 struct omap_rtc *rtc = dev_get_drvdata(dev);
688
547 /* Enable the clock/module so that we can access the registers */ 689 /* Enable the clock/module so that we can access the registers */
548 pm_runtime_get_sync(dev); 690 pm_runtime_get_sync(dev);
549 691
550 if (device_may_wakeup(dev)) 692 if (device_may_wakeup(dev))
551 disable_irq_wake(omap_rtc_alarm); 693 disable_irq_wake(rtc->irq_alarm);
552 else 694 else
553 rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG); 695 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
554 696
555 return 0; 697 return 0;
556} 698}
@@ -560,23 +702,32 @@ static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume);
560 702
561static void omap_rtc_shutdown(struct platform_device *pdev) 703static void omap_rtc_shutdown(struct platform_device *pdev)
562{ 704{
563 rtc_write(0, OMAP_RTC_INTERRUPTS_REG); 705 struct omap_rtc *rtc = platform_get_drvdata(pdev);
706 u8 mask;
707
708 /*
709 * Keep the ALARM interrupt enabled to allow the system to power up on
710 * alarm events.
711 */
712 mask = rtc_read(rtc, OMAP_RTC_INTERRUPTS_REG);
713 mask &= OMAP_RTC_INTERRUPTS_IT_ALARM;
714 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, mask);
564} 715}
565 716
566MODULE_ALIAS("platform:omap_rtc");
567static struct platform_driver omap_rtc_driver = { 717static struct platform_driver omap_rtc_driver = {
568 .remove = __exit_p(omap_rtc_remove), 718 .remove = __exit_p(omap_rtc_remove),
569 .shutdown = omap_rtc_shutdown, 719 .shutdown = omap_rtc_shutdown,
570 .driver = { 720 .driver = {
571 .name = DRIVER_NAME, 721 .name = "omap_rtc",
572 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
573 .pm = &omap_rtc_pm_ops, 723 .pm = &omap_rtc_pm_ops,
574 .of_match_table = omap_rtc_of_match, 724 .of_match_table = omap_rtc_of_match,
575 }, 725 },
576 .id_table = omap_rtc_devtype, 726 .id_table = omap_rtc_id_table,
577}; 727};
578 728
579module_platform_driver_probe(omap_rtc_driver, omap_rtc_probe); 729module_platform_driver_probe(omap_rtc_driver, omap_rtc_probe);
580 730
731MODULE_ALIAS("platform:omap_rtc");
581MODULE_AUTHOR("George G. Davis (and others)"); 732MODULE_AUTHOR("George G. Davis (and others)");
582MODULE_LICENSE("GPL"); 733MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index c2ef0a22ee94..96fb32e7d6f8 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -28,6 +28,7 @@
28#define PCF8563_REG_ST2 0x01 28#define PCF8563_REG_ST2 0x01
29#define PCF8563_BIT_AIE (1 << 1) 29#define PCF8563_BIT_AIE (1 << 1)
30#define PCF8563_BIT_AF (1 << 3) 30#define PCF8563_BIT_AF (1 << 3)
31#define PCF8563_BITS_ST2_N (7 << 5)
31 32
32#define PCF8563_REG_SC 0x02 /* datetime */ 33#define PCF8563_REG_SC 0x02 /* datetime */
33#define PCF8563_REG_MN 0x03 34#define PCF8563_REG_MN 0x03
@@ -41,6 +42,13 @@
41 42
42#define PCF8563_REG_CLKO 0x0D /* clock out */ 43#define PCF8563_REG_CLKO 0x0D /* clock out */
43#define PCF8563_REG_TMRC 0x0E /* timer control */ 44#define PCF8563_REG_TMRC 0x0E /* timer control */
45#define PCF8563_TMRC_ENABLE BIT(7)
46#define PCF8563_TMRC_4096 0
47#define PCF8563_TMRC_64 1
48#define PCF8563_TMRC_1 2
49#define PCF8563_TMRC_1_60 3
50#define PCF8563_TMRC_MASK 3
51
44#define PCF8563_REG_TMR 0x0F /* timer */ 52#define PCF8563_REG_TMR 0x0F /* timer */
45 53
46#define PCF8563_SC_LV 0x80 /* low voltage */ 54#define PCF8563_SC_LV 0x80 /* low voltage */
@@ -118,22 +126,21 @@ static int pcf8563_write_block_data(struct i2c_client *client,
118 126
119static int pcf8563_set_alarm_mode(struct i2c_client *client, bool on) 127static int pcf8563_set_alarm_mode(struct i2c_client *client, bool on)
120{ 128{
121 unsigned char buf[2]; 129 unsigned char buf;
122 int err; 130 int err;
123 131
124 err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, buf + 1); 132 err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, &buf);
125 if (err < 0) 133 if (err < 0)
126 return err; 134 return err;
127 135
128 if (on) 136 if (on)
129 buf[1] |= PCF8563_BIT_AIE; 137 buf |= PCF8563_BIT_AIE;
130 else 138 else
131 buf[1] &= ~PCF8563_BIT_AIE; 139 buf &= ~PCF8563_BIT_AIE;
132 140
133 buf[1] &= ~PCF8563_BIT_AF; 141 buf &= ~(PCF8563_BIT_AF | PCF8563_BITS_ST2_N);
134 buf[0] = PCF8563_REG_ST2;
135 142
136 err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, buf + 1); 143 err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf);
137 if (err < 0) { 144 if (err < 0) {
138 dev_err(&client->dev, "%s: write error\n", __func__); 145 dev_err(&client->dev, "%s: write error\n", __func__);
139 return -EIO; 146 return -EIO;
@@ -336,8 +343,8 @@ static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
336 __func__, buf[0], buf[1], buf[2], buf[3]); 343 __func__, buf[0], buf[1], buf[2], buf[3]);
337 344
338 tm->time.tm_min = bcd2bin(buf[0] & 0x7F); 345 tm->time.tm_min = bcd2bin(buf[0] & 0x7F);
339 tm->time.tm_hour = bcd2bin(buf[1] & 0x7F); 346 tm->time.tm_hour = bcd2bin(buf[1] & 0x3F);
340 tm->time.tm_mday = bcd2bin(buf[2] & 0x1F); 347 tm->time.tm_mday = bcd2bin(buf[2] & 0x3F);
341 tm->time.tm_wday = bcd2bin(buf[3] & 0x7); 348 tm->time.tm_wday = bcd2bin(buf[3] & 0x7);
342 tm->time.tm_mon = -1; 349 tm->time.tm_mon = -1;
343 tm->time.tm_year = -1; 350 tm->time.tm_year = -1;
@@ -361,6 +368,14 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
361 struct i2c_client *client = to_i2c_client(dev); 368 struct i2c_client *client = to_i2c_client(dev);
362 unsigned char buf[4]; 369 unsigned char buf[4];
363 int err; 370 int err;
371 unsigned long alarm_time;
372
373 /* The alarm has no seconds, round up to nearest minute */
374 if (tm->time.tm_sec) {
375 rtc_tm_to_time(&tm->time, &alarm_time);
376 alarm_time += 60-tm->time.tm_sec;
377 rtc_time_to_tm(alarm_time, &tm->time);
378 }
364 379
365 dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d " 380 dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
366 "enabled=%d pending=%d\n", __func__, 381 "enabled=%d pending=%d\n", __func__,
@@ -381,6 +396,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
381 396
382static int pcf8563_irq_enable(struct device *dev, unsigned int enabled) 397static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
383{ 398{
399 dev_dbg(dev, "%s: en=%d\n", __func__, enabled);
384 return pcf8563_set_alarm_mode(to_i2c_client(dev), !!enabled); 400 return pcf8563_set_alarm_mode(to_i2c_client(dev), !!enabled);
385} 401}
386 402
@@ -398,6 +414,8 @@ static int pcf8563_probe(struct i2c_client *client,
398{ 414{
399 struct pcf8563 *pcf8563; 415 struct pcf8563 *pcf8563;
400 int err; 416 int err;
417 unsigned char buf;
418 unsigned char alm_pending;
401 419
402 dev_dbg(&client->dev, "%s\n", __func__); 420 dev_dbg(&client->dev, "%s\n", __func__);
403 421
@@ -415,6 +433,22 @@ static int pcf8563_probe(struct i2c_client *client,
415 pcf8563->client = client; 433 pcf8563->client = client;
416 device_set_wakeup_capable(&client->dev, 1); 434 device_set_wakeup_capable(&client->dev, 1);
417 435
436 /* Set timer to lowest frequency to save power (ref Haoyu datasheet) */
437 buf = PCF8563_TMRC_1_60;
438 err = pcf8563_write_block_data(client, PCF8563_REG_TMRC, 1, &buf);
439 if (err < 0) {
440 dev_err(&client->dev, "%s: write error\n", __func__);
441 return err;
442 }
443
444 err = pcf8563_get_alarm_mode(client, NULL, &alm_pending);
445 if (err < 0) {
446 dev_err(&client->dev, "%s: read error\n", __func__);
447 return err;
448 }
449 if (alm_pending)
450 pcf8563_set_alarm_mode(client, 0);
451
418 pcf8563->rtc = devm_rtc_device_register(&client->dev, 452 pcf8563->rtc = devm_rtc_device_register(&client->dev,
419 pcf8563_driver.driver.name, 453 pcf8563_driver.driver.name,
420 &pcf8563_rtc_ops, THIS_MODULE); 454 &pcf8563_rtc_ops, THIS_MODULE);
@@ -435,6 +469,9 @@ static int pcf8563_probe(struct i2c_client *client,
435 469
436 } 470 }
437 471
472 /* the pcf8563 alarm only supports a minute accuracy */
473 pcf8563->rtc->uie_unsupported = 1;
474
438 return 0; 475 return 0;
439} 476}
440 477
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 76e38007ba90..d2ac6688e5c7 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -47,6 +47,7 @@ struct sirfsoc_rtc_drv {
47 unsigned irq_wake; 47 unsigned irq_wake;
48 /* Overflow for every 8 years extra time */ 48 /* Overflow for every 8 years extra time */
49 u32 overflow_rtc; 49 u32 overflow_rtc;
50 spinlock_t lock;
50#ifdef CONFIG_PM 51#ifdef CONFIG_PM
51 u32 saved_counter; 52 u32 saved_counter;
52 u32 saved_overflow_rtc; 53 u32 saved_overflow_rtc;
@@ -61,7 +62,7 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
61 62
62 rtcdrv = dev_get_drvdata(dev); 63 rtcdrv = dev_get_drvdata(dev);
63 64
64 local_irq_disable(); 65 spin_lock_irq(&rtcdrv->lock);
65 66
66 rtc_count = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN); 67 rtc_count = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
67 68
@@ -84,7 +85,8 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
84 if (sirfsoc_rtc_iobrg_readl( 85 if (sirfsoc_rtc_iobrg_readl(
85 rtcdrv->rtc_base + RTC_STATUS) & SIRFSOC_RTC_AL0E) 86 rtcdrv->rtc_base + RTC_STATUS) & SIRFSOC_RTC_AL0E)
86 alrm->enabled = 1; 87 alrm->enabled = 1;
87 local_irq_enable(); 88
89 spin_unlock_irq(&rtcdrv->lock);
88 90
89 return 0; 91 return 0;
90} 92}
@@ -99,7 +101,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
99 if (alrm->enabled) { 101 if (alrm->enabled) {
100 rtc_tm_to_time(&(alrm->time), &rtc_alarm); 102 rtc_tm_to_time(&(alrm->time), &rtc_alarm);
101 103
102 local_irq_disable(); 104 spin_lock_irq(&rtcdrv->lock);
103 105
104 rtc_status_reg = sirfsoc_rtc_iobrg_readl( 106 rtc_status_reg = sirfsoc_rtc_iobrg_readl(
105 rtcdrv->rtc_base + RTC_STATUS); 107 rtcdrv->rtc_base + RTC_STATUS);
@@ -123,14 +125,15 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
123 rtc_status_reg |= SIRFSOC_RTC_AL0E; 125 rtc_status_reg |= SIRFSOC_RTC_AL0E;
124 sirfsoc_rtc_iobrg_writel( 126 sirfsoc_rtc_iobrg_writel(
125 rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS); 127 rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
126 local_irq_enable(); 128
129 spin_unlock_irq(&rtcdrv->lock);
127 } else { 130 } else {
128 /* 131 /*
129 * if this function was called with enabled=0 132 * if this function was called with enabled=0
130 * then it could mean that the application is 133 * then it could mean that the application is
131 * trying to cancel an ongoing alarm 134 * trying to cancel an ongoing alarm
132 */ 135 */
133 local_irq_disable(); 136 spin_lock_irq(&rtcdrv->lock);
134 137
135 rtc_status_reg = sirfsoc_rtc_iobrg_readl( 138 rtc_status_reg = sirfsoc_rtc_iobrg_readl(
136 rtcdrv->rtc_base + RTC_STATUS); 139 rtcdrv->rtc_base + RTC_STATUS);
@@ -146,7 +149,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
146 rtcdrv->rtc_base + RTC_STATUS); 149 rtcdrv->rtc_base + RTC_STATUS);
147 } 150 }
148 151
149 local_irq_enable(); 152 spin_unlock_irq(&rtcdrv->lock);
150 } 153 }
151 154
152 return 0; 155 return 0;
@@ -209,12 +212,38 @@ static int sirfsoc_rtc_ioctl(struct device *dev, unsigned int cmd,
209 } 212 }
210} 213}
211 214
215static int sirfsoc_rtc_alarm_irq_enable(struct device *dev,
216 unsigned int enabled)
217{
218 unsigned long rtc_status_reg = 0x0;
219 struct sirfsoc_rtc_drv *rtcdrv;
220
221 rtcdrv = dev_get_drvdata(dev);
222
223 spin_lock_irq(&rtcdrv->lock);
224
225 rtc_status_reg = sirfsoc_rtc_iobrg_readl(
226 rtcdrv->rtc_base + RTC_STATUS);
227 if (enabled)
228 rtc_status_reg |= SIRFSOC_RTC_AL0E;
229 else
230 rtc_status_reg &= ~SIRFSOC_RTC_AL0E;
231
232 sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
233
234 spin_unlock_irq(&rtcdrv->lock);
235
236 return 0;
237
238}
239
212static const struct rtc_class_ops sirfsoc_rtc_ops = { 240static const struct rtc_class_ops sirfsoc_rtc_ops = {
213 .read_time = sirfsoc_rtc_read_time, 241 .read_time = sirfsoc_rtc_read_time,
214 .set_time = sirfsoc_rtc_set_time, 242 .set_time = sirfsoc_rtc_set_time,
215 .read_alarm = sirfsoc_rtc_read_alarm, 243 .read_alarm = sirfsoc_rtc_read_alarm,
216 .set_alarm = sirfsoc_rtc_set_alarm, 244 .set_alarm = sirfsoc_rtc_set_alarm,
217 .ioctl = sirfsoc_rtc_ioctl 245 .ioctl = sirfsoc_rtc_ioctl,
246 .alarm_irq_enable = sirfsoc_rtc_alarm_irq_enable
218}; 247};
219 248
220static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata) 249static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
@@ -223,6 +252,8 @@ static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
223 unsigned long rtc_status_reg = 0x0; 252 unsigned long rtc_status_reg = 0x0;
224 unsigned long events = 0x0; 253 unsigned long events = 0x0;
225 254
255 spin_lock(&rtcdrv->lock);
256
226 rtc_status_reg = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_STATUS); 257 rtc_status_reg = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_STATUS);
227 /* this bit will be set ONLY if an alarm was active 258 /* this bit will be set ONLY if an alarm was active
228 * and it expired NOW 259 * and it expired NOW
@@ -240,6 +271,9 @@ static irqreturn_t sirfsoc_rtc_irq_handler(int irq, void *pdata)
240 rtc_status_reg &= ~(SIRFSOC_RTC_AL0E); 271 rtc_status_reg &= ~(SIRFSOC_RTC_AL0E);
241 } 272 }
242 sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS); 273 sirfsoc_rtc_iobrg_writel(rtc_status_reg, rtcdrv->rtc_base + RTC_STATUS);
274
275 spin_unlock(&rtcdrv->lock);
276
243 /* this should wake up any apps polling/waiting on the read 277 /* this should wake up any apps polling/waiting on the read
244 * after setting the alarm 278 * after setting the alarm
245 */ 279 */
@@ -267,6 +301,8 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
267 if (rtcdrv == NULL) 301 if (rtcdrv == NULL)
268 return -ENOMEM; 302 return -ENOMEM;
269 303
304 spin_lock_init(&rtcdrv->lock);
305
270 err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base); 306 err = of_property_read_u32(np, "reg", &rtcdrv->rtc_base);
271 if (err) { 307 if (err) {
272 dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n"); 308 dev_err(&pdev->dev, "unable to find base address of rtc node in dtb\n");
@@ -286,14 +322,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
286 rtc_div = ((32768 / RTC_HZ) / 2) - 1; 322 rtc_div = ((32768 / RTC_HZ) / 2) - 1;
287 sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); 323 sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
288 324
289 rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
290 &sirfsoc_rtc_ops, THIS_MODULE);
291 if (IS_ERR(rtcdrv->rtc)) {
292 err = PTR_ERR(rtcdrv->rtc);
293 dev_err(&pdev->dev, "can't register RTC device\n");
294 return err;
295 }
296
297 /* 0x3 -> RTC_CLK */ 325 /* 0x3 -> RTC_CLK */
298 sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, 326 sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
299 rtcdrv->rtc_base + RTC_CLOCK_SWITCH); 327 rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
@@ -308,6 +336,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
308 rtcdrv->overflow_rtc = 336 rtcdrv->overflow_rtc =
309 sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); 337 sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
310 338
339 rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
340 &sirfsoc_rtc_ops, THIS_MODULE);
341 if (IS_ERR(rtcdrv->rtc)) {
342 err = PTR_ERR(rtcdrv->rtc);
343 dev_err(&pdev->dev, "can't register RTC device\n");
344 return err;
345 }
346
311 rtcdrv->irq = platform_get_irq(pdev, 0); 347 rtcdrv->irq = platform_get_irq(pdev, 0);
312 err = devm_request_irq( 348 err = devm_request_irq(
313 &pdev->dev, 349 &pdev->dev,
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index fa384fe28988..2cd8ffe5c698 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -17,6 +17,7 @@
17#include <linux/of_device.h> 17#include <linux/of_device.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/rtc.h> 19#include <linux/rtc.h>
20#include <linux/clk.h>
20 21
21/* These register offsets are relative to LP (Low Power) range */ 22/* These register offsets are relative to LP (Low Power) range */
22#define SNVS_LPCR 0x04 23#define SNVS_LPCR 0x04
@@ -39,6 +40,7 @@ struct snvs_rtc_data {
39 void __iomem *ioaddr; 40 void __iomem *ioaddr;
40 int irq; 41 int irq;
41 spinlock_t lock; 42 spinlock_t lock;
43 struct clk *clk;
42}; 44};
43 45
44static u32 rtc_read_lp_counter(void __iomem *ioaddr) 46static u32 rtc_read_lp_counter(void __iomem *ioaddr)
@@ -260,6 +262,18 @@ static int snvs_rtc_probe(struct platform_device *pdev)
260 if (data->irq < 0) 262 if (data->irq < 0)
261 return data->irq; 263 return data->irq;
262 264
265 data->clk = devm_clk_get(&pdev->dev, "snvs-rtc");
266 if (IS_ERR(data->clk)) {
267 data->clk = NULL;
268 } else {
269 ret = clk_prepare_enable(data->clk);
270 if (ret) {
271 dev_err(&pdev->dev,
272 "Could not prepare or enable the snvs clock\n");
273 return ret;
274 }
275 }
276
263 platform_set_drvdata(pdev, data); 277 platform_set_drvdata(pdev, data);
264 278
265 spin_lock_init(&data->lock); 279 spin_lock_init(&data->lock);
@@ -280,7 +294,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
280 if (ret) { 294 if (ret) {
281 dev_err(&pdev->dev, "failed to request irq %d: %d\n", 295 dev_err(&pdev->dev, "failed to request irq %d: %d\n",
282 data->irq, ret); 296 data->irq, ret);
283 return ret; 297 goto error_rtc_device_register;
284 } 298 }
285 299
286 data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, 300 data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
@@ -288,10 +302,16 @@ static int snvs_rtc_probe(struct platform_device *pdev)
288 if (IS_ERR(data->rtc)) { 302 if (IS_ERR(data->rtc)) {
289 ret = PTR_ERR(data->rtc); 303 ret = PTR_ERR(data->rtc);
290 dev_err(&pdev->dev, "failed to register rtc: %d\n", ret); 304 dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
291 return ret; 305 goto error_rtc_device_register;
292 } 306 }
293 307
294 return 0; 308 return 0;
309
310error_rtc_device_register:
311 if (data->clk)
312 clk_disable_unprepare(data->clk);
313
314 return ret;
295} 315}
296 316
297#ifdef CONFIG_PM_SLEEP 317#ifdef CONFIG_PM_SLEEP
@@ -302,21 +322,34 @@ static int snvs_rtc_suspend(struct device *dev)
302 if (device_may_wakeup(dev)) 322 if (device_may_wakeup(dev))
303 enable_irq_wake(data->irq); 323 enable_irq_wake(data->irq);
304 324
325 if (data->clk)
326 clk_disable_unprepare(data->clk);
327
305 return 0; 328 return 0;
306} 329}
307 330
308static int snvs_rtc_resume(struct device *dev) 331static int snvs_rtc_resume(struct device *dev)
309{ 332{
310 struct snvs_rtc_data *data = dev_get_drvdata(dev); 333 struct snvs_rtc_data *data = dev_get_drvdata(dev);
334 int ret;
311 335
312 if (device_may_wakeup(dev)) 336 if (device_may_wakeup(dev))
313 disable_irq_wake(data->irq); 337 disable_irq_wake(data->irq);
314 338
339 if (data->clk) {
340 ret = clk_prepare_enable(data->clk);
341 if (ret)
342 return ret;
343 }
344
315 return 0; 345 return 0;
316} 346}
317#endif 347#endif
318 348
319static SIMPLE_DEV_PM_OPS(snvs_rtc_pm_ops, snvs_rtc_suspend, snvs_rtc_resume); 349static const struct dev_pm_ops snvs_rtc_pm_ops = {
350 .suspend_noirq = snvs_rtc_suspend,
351 .resume_noirq = snvs_rtc_resume,
352};
320 353
321static const struct of_device_id snvs_dt_ids[] = { 354static const struct of_device_id snvs_dt_ids[] = {
322 { .compatible = "fsl,sec-v4.0-mon-rtc-lp", }, 355 { .compatible = "fsl,sec-v4.0-mon-rtc-lp", },
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index 0c5176179c17..43cee7fcd01c 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -136,8 +136,7 @@ static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
136 if (rc) 136 if (rc)
137 module_put(THIS_MODULE); 137 module_put(THIS_MODULE);
138 138
139 pr_debug("open file '/dev/%s' with return code %d\n", 139 pr_debug("open file '/dev/%pD' with return code %d\n", fp, rc);
140 fp->f_dentry->d_name.name, rc);
141 return rc; 140 return rc;
142} 141}
143 142
@@ -146,7 +145,7 @@ static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
146 */ 145 */
147static int hmcdrv_dev_release(struct inode *inode, struct file *fp) 146static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
148{ 147{
149 pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name); 148 pr_debug("closing file '/dev/%pD'\n", fp);
150 kfree(fp->private_data); 149 kfree(fp->private_data);
151 fp->private_data = NULL; 150 fp->private_data = NULL;
152 hmcdrv_ftp_shutdown(); 151 hmcdrv_ftp_shutdown();
@@ -231,8 +230,8 @@ static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
231 retlen = hmcdrv_dev_transfer((char *) fp->private_data, 230 retlen = hmcdrv_dev_transfer((char *) fp->private_data,
232 *pos, ubuf, len); 231 *pos, ubuf, len);
233 232
234 pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n", 233 pr_debug("read from file '/dev/%pD' at %lld returns %zd/%zu\n",
235 fp->f_dentry->d_name.name, (long long) *pos, retlen, len); 234 fp, (long long) *pos, retlen, len);
236 235
237 if (retlen > 0) 236 if (retlen > 0)
238 *pos += retlen; 237 *pos += retlen;
@@ -248,8 +247,8 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
248{ 247{
249 ssize_t retlen; 248 ssize_t retlen;
250 249
251 pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n", 250 pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
252 fp->f_dentry->d_name.name, (long long) *pos, len); 251 fp, (long long) *pos, len);
253 252
254 if (!fp->private_data) { /* first expect a cmd write */ 253 if (!fp->private_data) { /* first expect a cmd write */
255 fp->private_data = kmalloc(len + 1, GFP_KERNEL); 254 fp->private_data = kmalloc(len + 1, GFP_KERNEL);
@@ -272,8 +271,7 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
272 if (retlen > 0) 271 if (retlen > 0)
273 *pos += retlen; 272 *pos += retlen;
274 273
275 pr_debug("write to file '/dev/%s' returned %zd\n", 274 pr_debug("write to file '/dev/%pD' returned %zd\n", fp, retlen);
276 fp->f_dentry->d_name.name, retlen);
277 275
278 return retlen; 276 return retlen;
279} 277}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 643129070c51..dd65c8b4c7fe 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -80,7 +80,7 @@ static unsigned desc_size(const struct kvm_device_desc *desc)
80} 80}
81 81
82/* This gets the device's feature bits. */ 82/* This gets the device's feature bits. */
83static u32 kvm_get_features(struct virtio_device *vdev) 83static u64 kvm_get_features(struct virtio_device *vdev)
84{ 84{
85 unsigned int i; 85 unsigned int i;
86 u32 features = 0; 86 u32 features = 0;
@@ -93,7 +93,7 @@ static u32 kvm_get_features(struct virtio_device *vdev)
93 return features; 93 return features;
94} 94}
95 95
96static void kvm_finalize_features(struct virtio_device *vdev) 96static int kvm_finalize_features(struct virtio_device *vdev)
97{ 97{
98 unsigned int i, bits; 98 unsigned int i, bits;
99 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; 99 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
@@ -103,12 +103,17 @@ static void kvm_finalize_features(struct virtio_device *vdev)
103 /* Give virtio_ring a chance to accept features. */ 103 /* Give virtio_ring a chance to accept features. */
104 vring_transport_features(vdev); 104 vring_transport_features(vdev);
105 105
106 /* Make sure we don't have any features > 32 bits! */
107 BUG_ON((u32)vdev->features != vdev->features);
108
106 memset(out_features, 0, desc->feature_len); 109 memset(out_features, 0, desc->feature_len);
107 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; 110 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
108 for (i = 0; i < bits; i++) { 111 for (i = 0; i < bits; i++) {
109 if (test_bit(i, vdev->features)) 112 if (__virtio_test_bit(vdev, i))
110 out_features[i / 8] |= (1 << (i % 8)); 113 out_features[i / 8] |= (1 << (i % 8));
111 } 114 }
115
116 return 0;
112} 117}
113 118
114/* 119/*
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index bda52f18e967..71d7802aa8b4 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -55,6 +55,7 @@ struct virtio_ccw_device {
55 struct ccw_device *cdev; 55 struct ccw_device *cdev;
56 __u32 curr_io; 56 __u32 curr_io;
57 int err; 57 int err;
58 unsigned int revision; /* Transport revision */
58 wait_queue_head_t wait_q; 59 wait_queue_head_t wait_q;
59 spinlock_t lock; 60 spinlock_t lock;
60 struct list_head virtqueues; 61 struct list_head virtqueues;
@@ -67,13 +68,22 @@ struct virtio_ccw_device {
67 void *airq_info; 68 void *airq_info;
68}; 69};
69 70
70struct vq_info_block { 71struct vq_info_block_legacy {
71 __u64 queue; 72 __u64 queue;
72 __u32 align; 73 __u32 align;
73 __u16 index; 74 __u16 index;
74 __u16 num; 75 __u16 num;
75} __packed; 76} __packed;
76 77
78struct vq_info_block {
79 __u64 desc;
80 __u32 res0;
81 __u16 index;
82 __u16 num;
83 __u64 avail;
84 __u64 used;
85} __packed;
86
77struct virtio_feature_desc { 87struct virtio_feature_desc {
78 __u32 features; 88 __u32 features;
79 __u8 index; 89 __u8 index;
@@ -86,11 +96,23 @@ struct virtio_thinint_area {
86 u8 isc; 96 u8 isc;
87} __packed; 97} __packed;
88 98
99struct virtio_rev_info {
100 __u16 revision;
101 __u16 length;
102 __u8 data[];
103};
104
105/* the highest virtio-ccw revision we support */
106#define VIRTIO_CCW_REV_MAX 1
107
89struct virtio_ccw_vq_info { 108struct virtio_ccw_vq_info {
90 struct virtqueue *vq; 109 struct virtqueue *vq;
91 int num; 110 int num;
92 void *queue; 111 void *queue;
93 struct vq_info_block *info_block; 112 union {
113 struct vq_info_block s;
114 struct vq_info_block_legacy l;
115 } *info_block;
94 int bit_nr; 116 int bit_nr;
95 struct list_head node; 117 struct list_head node;
96 long cookie; 118 long cookie;
@@ -122,6 +144,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
122#define CCW_CMD_WRITE_STATUS 0x31 144#define CCW_CMD_WRITE_STATUS 0x31
123#define CCW_CMD_READ_VQ_CONF 0x32 145#define CCW_CMD_READ_VQ_CONF 0x32
124#define CCW_CMD_SET_IND_ADAPTER 0x73 146#define CCW_CMD_SET_IND_ADAPTER 0x73
147#define CCW_CMD_SET_VIRTIO_REV 0x83
125 148
126#define VIRTIO_CCW_DOING_SET_VQ 0x00010000 149#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
127#define VIRTIO_CCW_DOING_RESET 0x00040000 150#define VIRTIO_CCW_DOING_RESET 0x00040000
@@ -134,6 +157,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
134#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 157#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
135#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 158#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
136#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 159#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
160#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
137#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 161#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
138 162
139static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 163static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -399,13 +423,22 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
399 spin_unlock_irqrestore(&vcdev->lock, flags); 423 spin_unlock_irqrestore(&vcdev->lock, flags);
400 424
401 /* Release from host. */ 425 /* Release from host. */
402 info->info_block->queue = 0; 426 if (vcdev->revision == 0) {
403 info->info_block->align = 0; 427 info->info_block->l.queue = 0;
404 info->info_block->index = index; 428 info->info_block->l.align = 0;
405 info->info_block->num = 0; 429 info->info_block->l.index = index;
430 info->info_block->l.num = 0;
431 ccw->count = sizeof(info->info_block->l);
432 } else {
433 info->info_block->s.desc = 0;
434 info->info_block->s.index = index;
435 info->info_block->s.num = 0;
436 info->info_block->s.avail = 0;
437 info->info_block->s.used = 0;
438 ccw->count = sizeof(info->info_block->s);
439 }
406 ccw->cmd_code = CCW_CMD_SET_VQ; 440 ccw->cmd_code = CCW_CMD_SET_VQ;
407 ccw->flags = 0; 441 ccw->flags = 0;
408 ccw->count = sizeof(*info->info_block);
409 ccw->cda = (__u32)(unsigned long)(info->info_block); 442 ccw->cda = (__u32)(unsigned long)(info->info_block);
410 ret = ccw_io_helper(vcdev, ccw, 443 ret = ccw_io_helper(vcdev, ccw,
411 VIRTIO_CCW_DOING_SET_VQ | index); 444 VIRTIO_CCW_DOING_SET_VQ | index);
@@ -488,13 +521,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
488 } 521 }
489 522
490 /* Register it with the host. */ 523 /* Register it with the host. */
491 info->info_block->queue = (__u64)info->queue; 524 if (vcdev->revision == 0) {
492 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN; 525 info->info_block->l.queue = (__u64)info->queue;
493 info->info_block->index = i; 526 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
494 info->info_block->num = info->num; 527 info->info_block->l.index = i;
528 info->info_block->l.num = info->num;
529 ccw->count = sizeof(info->info_block->l);
530 } else {
531 info->info_block->s.desc = (__u64)info->queue;
532 info->info_block->s.index = i;
533 info->info_block->s.num = info->num;
534 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
535 info->info_block->s.used = (__u64)virtqueue_get_used(vq);
536 ccw->count = sizeof(info->info_block->s);
537 }
495 ccw->cmd_code = CCW_CMD_SET_VQ; 538 ccw->cmd_code = CCW_CMD_SET_VQ;
496 ccw->flags = 0; 539 ccw->flags = 0;
497 ccw->count = sizeof(*info->info_block);
498 ccw->cda = (__u32)(unsigned long)(info->info_block); 540 ccw->cda = (__u32)(unsigned long)(info->info_block);
499 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 541 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
500 if (err) { 542 if (err) {
@@ -660,11 +702,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
660 kfree(ccw); 702 kfree(ccw);
661} 703}
662 704
663static u32 virtio_ccw_get_features(struct virtio_device *vdev) 705static u64 virtio_ccw_get_features(struct virtio_device *vdev)
664{ 706{
665 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 707 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
666 struct virtio_feature_desc *features; 708 struct virtio_feature_desc *features;
667 int ret, rc; 709 int ret;
710 u64 rc;
668 struct ccw1 *ccw; 711 struct ccw1 *ccw;
669 712
670 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 713 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -677,7 +720,6 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
677 goto out_free; 720 goto out_free;
678 } 721 }
679 /* Read the feature bits from the host. */ 722 /* Read the feature bits from the host. */
680 /* TODO: Features > 32 bits */
681 features->index = 0; 723 features->index = 0;
682 ccw->cmd_code = CCW_CMD_READ_FEAT; 724 ccw->cmd_code = CCW_CMD_READ_FEAT;
683 ccw->flags = 0; 725 ccw->flags = 0;
@@ -691,46 +733,79 @@ static u32 virtio_ccw_get_features(struct virtio_device *vdev)
691 733
692 rc = le32_to_cpu(features->features); 734 rc = le32_to_cpu(features->features);
693 735
736 if (vcdev->revision == 0)
737 goto out_free;
738
739 /* Read second half of the feature bits from the host. */
740 features->index = 1;
741 ccw->cmd_code = CCW_CMD_READ_FEAT;
742 ccw->flags = 0;
743 ccw->count = sizeof(*features);
744 ccw->cda = (__u32)(unsigned long)features;
745 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
746 if (ret == 0)
747 rc |= (u64)le32_to_cpu(features->features) << 32;
748
694out_free: 749out_free:
695 kfree(features); 750 kfree(features);
696 kfree(ccw); 751 kfree(ccw);
697 return rc; 752 return rc;
698} 753}
699 754
700static void virtio_ccw_finalize_features(struct virtio_device *vdev) 755static int virtio_ccw_finalize_features(struct virtio_device *vdev)
701{ 756{
702 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 757 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
703 struct virtio_feature_desc *features; 758 struct virtio_feature_desc *features;
704 int i;
705 struct ccw1 *ccw; 759 struct ccw1 *ccw;
760 int ret;
761
762 if (vcdev->revision >= 1 &&
763 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
764 dev_err(&vdev->dev, "virtio: device uses revision 1 "
765 "but does not have VIRTIO_F_VERSION_1\n");
766 return -EINVAL;
767 }
706 768
707 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 769 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
708 if (!ccw) 770 if (!ccw)
709 return; 771 return -ENOMEM;
710 772
711 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 773 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
712 if (!features) 774 if (!features) {
775 ret = -ENOMEM;
713 goto out_free; 776 goto out_free;
714 777 }
715 /* Give virtio_ring a chance to accept features. */ 778 /* Give virtio_ring a chance to accept features. */
716 vring_transport_features(vdev); 779 vring_transport_features(vdev);
717 780
718 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features); 781 features->index = 0;
719 i++) { 782 features->features = cpu_to_le32((u32)vdev->features);
720 int highbits = i % 2 ? 32 : 0; 783 /* Write the first half of the feature bits to the host. */
721 features->index = i; 784 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
722 features->features = cpu_to_le32(vdev->features[i / 2] 785 ccw->flags = 0;
723 >> highbits); 786 ccw->count = sizeof(*features);
724 /* Write the feature bits to the host. */ 787 ccw->cda = (__u32)(unsigned long)features;
725 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 788 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
726 ccw->flags = 0; 789 if (ret)
727 ccw->count = sizeof(*features); 790 goto out_free;
728 ccw->cda = (__u32)(unsigned long)features; 791
729 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 792 if (vcdev->revision == 0)
730 } 793 goto out_free;
794
795 features->index = 1;
796 features->features = cpu_to_le32(vdev->features >> 32);
797 /* Write the second half of the feature bits to the host. */
798 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
799 ccw->flags = 0;
800 ccw->count = sizeof(*features);
801 ccw->cda = (__u32)(unsigned long)features;
802 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
803
731out_free: 804out_free:
732 kfree(features); 805 kfree(features);
733 kfree(ccw); 806 kfree(ccw);
807
808 return ret;
734} 809}
735 810
736static void virtio_ccw_get_config(struct virtio_device *vdev, 811static void virtio_ccw_get_config(struct virtio_device *vdev,
@@ -806,7 +881,9 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
806static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 881static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
807{ 882{
808 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 883 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
884 u8 old_status = *vcdev->status;
809 struct ccw1 *ccw; 885 struct ccw1 *ccw;
886 int ret;
810 887
811 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 888 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
812 if (!ccw) 889 if (!ccw)
@@ -818,7 +895,10 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
818 ccw->flags = 0; 895 ccw->flags = 0;
819 ccw->count = sizeof(status); 896 ccw->count = sizeof(status);
820 ccw->cda = (__u32)(unsigned long)vcdev->status; 897 ccw->cda = (__u32)(unsigned long)vcdev->status;
821 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 898 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
899 /* Write failed? We assume status is unchanged. */
900 if (ret)
901 *vcdev->status = old_status;
822 kfree(ccw); 902 kfree(ccw);
823} 903}
824 904
@@ -919,6 +999,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
919 case VIRTIO_CCW_DOING_RESET: 999 case VIRTIO_CCW_DOING_RESET:
920 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1000 case VIRTIO_CCW_DOING_READ_VQ_CONF:
921 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1001 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1002 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
922 vcdev->curr_io &= ~activity; 1003 vcdev->curr_io &= ~activity;
923 wake_up(&vcdev->wait_q); 1004 wake_up(&vcdev->wait_q);
924 break; 1005 break;
@@ -1034,6 +1115,51 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
1034 return 0; 1115 return 0;
1035} 1116}
1036 1117
1118static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
1119{
1120 struct virtio_rev_info *rev;
1121 struct ccw1 *ccw;
1122 int ret;
1123
1124 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
1125 if (!ccw)
1126 return -ENOMEM;
1127 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
1128 if (!rev) {
1129 kfree(ccw);
1130 return -ENOMEM;
1131 }
1132
1133 /* Set transport revision */
1134 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
1135 ccw->flags = 0;
1136 ccw->count = sizeof(*rev);
1137 ccw->cda = (__u32)(unsigned long)rev;
1138
1139 vcdev->revision = VIRTIO_CCW_REV_MAX;
1140 do {
1141 rev->revision = vcdev->revision;
1142 /* none of our supported revisions carry payload */
1143 rev->length = 0;
1144 ret = ccw_io_helper(vcdev, ccw,
1145 VIRTIO_CCW_DOING_SET_VIRTIO_REV);
1146 if (ret == -EOPNOTSUPP) {
1147 if (vcdev->revision == 0)
1148 /*
1149 * The host device does not support setting
1150 * the revision: let's operate it in legacy
1151 * mode.
1152 */
1153 ret = 0;
1154 else
1155 vcdev->revision--;
1156 }
1157 } while (ret == -EOPNOTSUPP);
1158
1159 kfree(ccw);
1160 kfree(rev);
1161 return ret;
1162}
1037 1163
1038static int virtio_ccw_online(struct ccw_device *cdev) 1164static int virtio_ccw_online(struct ccw_device *cdev)
1039{ 1165{
@@ -1074,6 +1200,15 @@ static int virtio_ccw_online(struct ccw_device *cdev)
1074 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1200 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1075 vcdev->vdev.id.vendor = cdev->id.cu_type; 1201 vcdev->vdev.id.vendor = cdev->id.cu_type;
1076 vcdev->vdev.id.device = cdev->id.cu_model; 1202 vcdev->vdev.id.device = cdev->id.cu_model;
1203
1204 if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
1205 vcdev->revision = 0;
1206 } else {
1207 ret = virtio_ccw_set_transport_rev(vcdev);
1208 if (ret)
1209 goto out_free;
1210 }
1211
1077 ret = register_virtio_device(&vcdev->vdev); 1212 ret = register_virtio_device(&vcdev->vdev);
1078 if (ret) { 1213 if (ret) {
1079 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1214 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8004b071a9f2..01a73395a017 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -353,9 +353,11 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
353 adapter->ccw_device = ccw_device; 353 adapter->ccw_device = ccw_device;
354 354
355 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); 355 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
356 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); 356 INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
357 INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); 357 INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
358 358
359 adapter->next_port_scan = jiffies;
360
359 if (zfcp_qdio_setup(adapter)) 361 if (zfcp_qdio_setup(adapter))
360 goto failed; 362 goto failed;
361 363
@@ -420,7 +422,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
420{ 422{
421 struct ccw_device *cdev = adapter->ccw_device; 423 struct ccw_device *cdev = adapter->ccw_device;
422 424
423 cancel_work_sync(&adapter->scan_work); 425 cancel_delayed_work_sync(&adapter->scan_work);
424 cancel_work_sync(&adapter->stat_work); 426 cancel_work_sync(&adapter->stat_work);
425 cancel_work_sync(&adapter->ns_up_work); 427 cancel_work_sync(&adapter->ns_up_work);
426 zfcp_destroy_adapter_work_queue(adapter); 428 zfcp_destroy_adapter_work_queue(adapter);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index f9879d400d0e..54c7b48fdb46 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -56,8 +56,22 @@ static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
58 tag); 58 tag);
59
60 /*
61 * We want to scan ports here, with some random backoff and without
62 * rate limit. Recovery has already scheduled a port scan for us,
63 * but with both random delay and rate limit. Nevertheless we get
64 * what we want here by flushing the scheduled work after sleeping
65 * an equivalent random time.
66 * Let the port scan random delay elapse first. If recovery finishes
67 * up to that point in time, that would be perfect for both recovery
68 * and port scan. If not, i.e. recovery takes ages, there was no
69 * point in waiting a random delay on top of the time consumed by
70 * recovery.
71 */
72 msleep(zfcp_fc_port_scan_backoff());
59 zfcp_erp_wait(adapter); 73 zfcp_erp_wait(adapter);
60 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ 74 flush_delayed_work(&adapter->scan_work);
61 75
62 zfcp_ccw_adapter_put(adapter); 76 zfcp_ccw_adapter_put(adapter);
63 77
@@ -162,11 +176,19 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
162 adapter->req_no = 0; 176 adapter->req_no = 0;
163 177
164 zfcp_ccw_activate(cdev, 0, "ccsonl1"); 178 zfcp_ccw_activate(cdev, 0, "ccsonl1");
165 /* scan for remote ports 179
166 either at the end of any successful adapter recovery 180 /*
167 or only after the adapter recovery for setting a device online */ 181 * We want to scan ports here, always, with some random delay and
182 * without rate limit - basically what zfcp_ccw_activate() has
183 * achieved for us. Not quite! That port scan depended on
184 * !no_auto_port_rescan. So let's cover the no_auto_port_rescan
185 * case here to make sure a port scan is done unconditionally.
186 * Since zfcp_ccw_activate() has waited the desired random time,
187 * we can immediately schedule and flush a port scan for the
188 * remaining cases.
189 */
168 zfcp_fc_inverse_conditional_port_scan(adapter); 190 zfcp_fc_inverse_conditional_port_scan(adapter);
169 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ 191 flush_delayed_work(&adapter->scan_work);
170 zfcp_ccw_adapter_put(adapter); 192 zfcp_ccw_adapter_put(adapter);
171 return 0; 193 return 0;
172} 194}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index d91173f326c5..b8e853e53546 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -186,12 +186,13 @@ struct zfcp_adapter {
186 struct fc_host_statistics *fc_stats; 186 struct fc_host_statistics *fc_stats;
187 struct fsf_qtcb_bottom_port *stats_reset_data; 187 struct fsf_qtcb_bottom_port *stats_reset_data;
188 unsigned long stats_reset; 188 unsigned long stats_reset;
189 struct work_struct scan_work; 189 struct delayed_work scan_work;
190 struct work_struct ns_up_work; 190 struct work_struct ns_up_work;
191 struct service_level service_level; 191 struct service_level service_level;
192 struct workqueue_struct *work_queue; 192 struct workqueue_struct *work_queue;
193 struct device_dma_parameters dma_parms; 193 struct device_dma_parameters dma_parms;
194 struct zfcp_fc_events events; 194 struct zfcp_fc_events events;
195 unsigned long next_port_scan;
195}; 196};
196 197
197struct zfcp_port { 198struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c82fe65c4128..2c5d4567d1da 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -821,11 +821,6 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
821 return ZFCP_ERP_CONTINUES; 821 return ZFCP_ERP_CONTINUES;
822} 822}
823 823
824static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
825{
826 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
827}
828
829static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) 824static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
830{ 825{
831 struct zfcp_port *port = erp_action->port; 826 struct zfcp_port *port = erp_action->port;
@@ -833,7 +828,6 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
833 828
834 switch (erp_action->step) { 829 switch (erp_action->step) {
835 case ZFCP_ERP_STEP_UNINITIALIZED: 830 case ZFCP_ERP_STEP_UNINITIALIZED:
836 zfcp_erp_port_strategy_clearstati(port);
837 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) && 831 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
838 (status & ZFCP_STATUS_COMMON_OPEN)) 832 (status & ZFCP_STATUS_COMMON_OPEN))
839 return zfcp_erp_port_forced_strategy_close(erp_action); 833 return zfcp_erp_port_forced_strategy_close(erp_action);
@@ -933,7 +927,6 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
933 927
934 switch (erp_action->step) { 928 switch (erp_action->step) {
935 case ZFCP_ERP_STEP_UNINITIALIZED: 929 case ZFCP_ERP_STEP_UNINITIALIZED:
936 zfcp_erp_port_strategy_clearstati(port);
937 if (p_status & ZFCP_STATUS_COMMON_OPEN) 930 if (p_status & ZFCP_STATUS_COMMON_OPEN)
938 return zfcp_erp_port_strategy_close(erp_action); 931 return zfcp_erp_port_strategy_close(erp_action);
939 break; 932 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index a9c570a09b85..5b500652572b 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -85,6 +85,7 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
85extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 85extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
86extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 86extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
87extern void zfcp_fc_sym_name_update(struct work_struct *); 87extern void zfcp_fc_sym_name_update(struct work_struct *);
88extern unsigned int zfcp_fc_port_scan_backoff(void);
88extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *); 89extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
89extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *); 90extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
90 91
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ca28e1c66115..25d49f32ca63 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/random.h>
15#include <scsi/fc/fc_els.h> 16#include <scsi/fc/fc_els.h>
16#include <scsi/libfc.h> 17#include <scsi/libfc.h>
17#include "zfcp_ext.h" 18#include "zfcp_ext.h"
@@ -31,12 +32,54 @@ module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
31MODULE_PARM_DESC(no_auto_port_rescan, 32MODULE_PARM_DESC(no_auto_port_rescan,
32 "no automatic port_rescan (default off)"); 33 "no automatic port_rescan (default off)");
33 34
35static unsigned int port_scan_backoff = 500;
36module_param(port_scan_backoff, uint, 0600);
37MODULE_PARM_DESC(port_scan_backoff,
38 "upper limit of port scan random backoff in msecs (default 500)");
39
40static unsigned int port_scan_ratelimit = 60000;
41module_param(port_scan_ratelimit, uint, 0600);
42MODULE_PARM_DESC(port_scan_ratelimit,
43 "minimum interval between port scans in msecs (default 60000)");
44
45unsigned int zfcp_fc_port_scan_backoff(void)
46{
47 if (!port_scan_backoff)
48 return 0;
49 return get_random_int() % port_scan_backoff;
50}
51
52static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
53{
54 unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
55 unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
56
57 adapter->next_port_scan = jiffies + interval + backoff;
58}
59
60static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
61{
62 unsigned long now = jiffies;
63 unsigned long next = adapter->next_port_scan;
64 unsigned long delay = 0, max;
65
66 /* delay only needed within waiting period */
67 if (time_before(now, next)) {
68 delay = next - now;
69 /* paranoia: never ever delay scans longer than specified */
70 max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
71 delay = min(delay, max);
72 }
73
74 queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
75}
76
34void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter) 77void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
35{ 78{
36 if (no_auto_port_rescan) 79 if (no_auto_port_rescan)
37 return; 80 return;
38 81
39 queue_work(adapter->work_queue, &adapter->scan_work); 82 zfcp_fc_port_scan(adapter);
40} 83}
41 84
42void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter) 85void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
@@ -44,7 +87,7 @@ void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
44 if (!no_auto_port_rescan) 87 if (!no_auto_port_rescan)
45 return; 88 return;
46 89
47 queue_work(adapter->work_queue, &adapter->scan_work); 90 zfcp_fc_port_scan(adapter);
48} 91}
49 92
50/** 93/**
@@ -680,12 +723,15 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
680 */ 723 */
681void zfcp_fc_scan_ports(struct work_struct *work) 724void zfcp_fc_scan_ports(struct work_struct *work)
682{ 725{
683 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, 726 struct delayed_work *dw = to_delayed_work(work);
727 struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
684 scan_work); 728 scan_work);
685 int ret, i; 729 int ret, i;
686 struct zfcp_fc_req *fc_req; 730 struct zfcp_fc_req *fc_req;
687 int chain, max_entries, buf_num, max_bytes; 731 int chain, max_entries, buf_num, max_bytes;
688 732
733 zfcp_fc_port_scan_time(adapter);
734
689 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 735 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
690 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1; 736 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
691 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; 737 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b1d2024ed513..df2b541c8287 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -212,8 +212,6 @@ static inline
212void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, 212void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
213 u8 tm_flags) 213 u8 tm_flags)
214{ 214{
215 char tag[2];
216
217 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); 215 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
218 216
219 if (unlikely(tm_flags)) { 217 if (unlikely(tm_flags)) {
@@ -221,17 +219,7 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
221 return; 219 return;
222 } 220 }
223 221
224 if (scsi_populate_tag_msg(scsi, tag)) { 222 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
225 switch (tag[0]) {
226 case MSG_ORDERED_TAG:
227 fcp->fc_pri_ta |= FCP_PTA_ORDERED;
228 break;
229 case MSG_SIMPLE_TAG:
230 fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
231 break;
232 };
233 } else
234 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
235 223
236 if (scsi->sc_data_direction == DMA_FROM_DEVICE) 224 if (scsi->sc_data_direction == DMA_FROM_DEVICE)
237 fcp->fc_flags |= FCP_CFL_RDDATA; 225 fcp->fc_flags |= FCP_CFL_RDDATA;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0fe8d5d95119..21ec5e2f584c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1396,8 +1396,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1396 port->handle = header->port_handle; 1396 port->handle = header->port_handle;
1397 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1397 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1399 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1399 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1400 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1401 &port->status); 1400 &port->status);
1402 /* check whether D_ID has changed during open */ 1401 /* check whether D_ID has changed during open */
1403 /* 1402 /*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7b353647cb90..75f4bfc2b98a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -32,25 +32,6 @@ static bool allow_lun_scan = 1;
32module_param(allow_lun_scan, bool, 0600); 32module_param(allow_lun_scan, bool, 0600);
33MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); 33MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
34 34
35static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
36 int reason)
37{
38 switch (reason) {
39 case SCSI_QDEPTH_DEFAULT:
40 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
41 break;
42 case SCSI_QDEPTH_QFULL:
43 scsi_track_queue_full(sdev, depth);
44 break;
45 case SCSI_QDEPTH_RAMP_UP:
46 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
47 break;
48 default:
49 return -EOPNOTSUPP;
50 }
51 return sdev->queue_depth;
52}
53
54static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) 35static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
55{ 36{
56 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 37 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -66,9 +47,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
66static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 47static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
67{ 48{
68 if (sdp->tagged_supported) 49 if (sdp->tagged_supported)
69 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth); 50 scsi_change_queue_depth(sdp, default_depth);
70 else
71 scsi_adjust_queue_depth(sdp, 0, 1);
72 return 0; 51 return 0;
73} 52}
74 53
@@ -307,7 +286,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
307 .slave_alloc = zfcp_scsi_slave_alloc, 286 .slave_alloc = zfcp_scsi_slave_alloc,
308 .slave_configure = zfcp_scsi_slave_configure, 287 .slave_configure = zfcp_scsi_slave_configure,
309 .slave_destroy = zfcp_scsi_slave_destroy, 288 .slave_destroy = zfcp_scsi_slave_destroy,
310 .change_queue_depth = zfcp_scsi_change_queue_depth, 289 .change_queue_depth = scsi_change_queue_depth,
311 .proc_name = "zfcp", 290 .proc_name = "zfcp",
312 .can_queue = 4096, 291 .can_queue = 4096,
313 .this_id = -1, 292 .this_id = -1,
@@ -322,6 +301,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
322 .use_clustering = 1, 301 .use_clustering = 1,
323 .shost_attrs = zfcp_sysfs_shost_attrs, 302 .shost_attrs = zfcp_sysfs_shost_attrs,
324 .sdev_attrs = zfcp_sysfs_sdev_attrs, 303 .sdev_attrs = zfcp_sysfs_sdev_attrs,
304 .track_queue_depth = 1,
325}; 305};
326 306
327/** 307/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 672b57219e11..96a0be13e841 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -73,9 +73,7 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
73ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", 73ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
74 (atomic_read(&port->status) & 74 (atomic_read(&port->status) &
75 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 75 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
76ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", 76ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0);
77 (atomic_read(&port->status) &
78 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
79 77
80ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", 78ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
81 zfcp_unit_sdev_status(unit)); 79 zfcp_unit_sdev_status(unit));
@@ -223,9 +221,13 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
223 if (!adapter) 221 if (!adapter)
224 return -ENODEV; 222 return -ENODEV;
225 223
226 /* sync the user-space- with the kernel-invocation of scan_work */ 224 /*
227 queue_work(adapter->work_queue, &adapter->scan_work); 225 * Users wish is our command: immediately schedule and flush a
228 flush_work(&adapter->scan_work); 226 * worker to conduct a synchronous port scan, that is, neither
227 * a random delay nor a rate limit is applied here.
228 */
229 queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
230 flush_delayed_work(&adapter->scan_work);
229 zfcp_ccw_adapter_put(adapter); 231 zfcp_ccw_adapter_put(adapter);
230 232
231 return (ssize_t) count; 233 return (ssize_t) count;
@@ -439,16 +441,15 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
439{ \ 441{ \
440 struct scsi_device *sdev = to_scsi_device(dev); \ 442 struct scsi_device *sdev = to_scsi_device(dev); \
441 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ 443 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
442 struct zfcp_port *port = zfcp_sdev->port; \
443 \ 444 \
444 return sprintf(buf, _format, _value); \ 445 return sprintf(buf, _format, _value); \
445} \ 446} \
446static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 447static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
447 448
448ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 449ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
449 dev_name(&port->adapter->ccw_device->dev)); 450 dev_name(&zfcp_sdev->port->adapter->ccw_device->dev));
450ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", 451ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
451 (unsigned long long) port->wwpn); 452 (unsigned long long) zfcp_sdev->port->wwpn);
452 453
453static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, 454static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
454 struct device_attribute *attr, 455 struct device_attribute *attr,
@@ -460,6 +461,49 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
460} 461}
461static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); 462static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
462 463
464ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n",
465 (atomic_read(&zfcp_sdev->status) &
466 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
467
468static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev,
469 struct device_attribute *attr,
470 char *buf)
471{
472 struct scsi_device *sdev = to_scsi_device(dev);
473 unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status);
474 unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
475
476 return sprintf(buf, "%d\n", failed);
477}
478
479static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf, size_t count)
482{
483 struct scsi_device *sdev = to_scsi_device(dev);
484 unsigned long val;
485
486 if (kstrtoul(buf, 0, &val) || val != 0)
487 return -EINVAL;
488
489 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
490 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
491 "syufai3");
492 zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter);
493
494 return count;
495}
496static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO,
497 zfcp_sysfs_scsi_zfcp_failed_show,
498 zfcp_sysfs_scsi_zfcp_failed_store);
499
500ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
501 (atomic_read(&zfcp_sdev->status) &
502 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
503
504ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
505 atomic_read(&zfcp_sdev->status));
506
463struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 507struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
464 &dev_attr_fcp_lun, 508 &dev_attr_fcp_lun,
465 &dev_attr_wwpn, 509 &dev_attr_wwpn,
@@ -467,6 +511,10 @@ struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
467 &dev_attr_read_latency, 511 &dev_attr_read_latency,
468 &dev_attr_write_latency, 512 &dev_attr_write_latency,
469 &dev_attr_cmd_latency, 513 &dev_attr_cmd_latency,
514 &dev_attr_zfcp_access_denied,
515 &dev_attr_zfcp_failed,
516 &dev_attr_zfcp_in_recovery,
517 &dev_attr_zfcp_status,
470 NULL 518 NULL
471}; 519};
472 520
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 0a7325361d29..cd4129ff7ae4 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -189,19 +189,6 @@ static ssize_t twa_show_stats(struct device *dev,
189 return len; 189 return len;
190} /* End twa_show_stats() */ 190} /* End twa_show_stats() */
191 191
192/* This function will set a devices queue depth */
193static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194 int reason)
195{
196 if (reason != SCSI_QDEPTH_DEFAULT)
197 return -EOPNOTSUPP;
198
199 if (queue_depth > TW_Q_LENGTH-2)
200 queue_depth = TW_Q_LENGTH-2;
201 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
202 return queue_depth;
203} /* End twa_change_queue_depth() */
204
205/* Create sysfs 'stats' entry */ 192/* Create sysfs 'stats' entry */
206static struct device_attribute twa_host_stats_attr = { 193static struct device_attribute twa_host_stats_attr = {
207 .attr = { 194 .attr = {
@@ -2016,7 +2003,7 @@ static struct scsi_host_template driver_template = {
2016 .queuecommand = twa_scsi_queue, 2003 .queuecommand = twa_scsi_queue,
2017 .eh_host_reset_handler = twa_scsi_eh_reset, 2004 .eh_host_reset_handler = twa_scsi_eh_reset,
2018 .bios_param = twa_scsi_biosparam, 2005 .bios_param = twa_scsi_biosparam,
2019 .change_queue_depth = twa_change_queue_depth, 2006 .change_queue_depth = scsi_change_queue_depth,
2020 .can_queue = TW_Q_LENGTH-2, 2007 .can_queue = TW_Q_LENGTH-2,
2021 .slave_configure = twa_slave_configure, 2008 .slave_configure = twa_slave_configure,
2022 .this_id = -1, 2009 .this_id = -1,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 6da6cec9a651..2361772d5909 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -191,19 +191,6 @@ static ssize_t twl_show_stats(struct device *dev,
191 return len; 191 return len;
192} /* End twl_show_stats() */ 192} /* End twl_show_stats() */
193 193
194/* This function will set a devices queue depth */
195static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
196 int reason)
197{
198 if (reason != SCSI_QDEPTH_DEFAULT)
199 return -EOPNOTSUPP;
200
201 if (queue_depth > TW_Q_LENGTH-2)
202 queue_depth = TW_Q_LENGTH-2;
203 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
204 return queue_depth;
205} /* End twl_change_queue_depth() */
206
207/* stats sysfs attribute initializer */ 194/* stats sysfs attribute initializer */
208static struct device_attribute twl_host_stats_attr = { 195static struct device_attribute twl_host_stats_attr = {
209 .attr = { 196 .attr = {
@@ -1590,7 +1577,7 @@ static struct scsi_host_template driver_template = {
1590 .queuecommand = twl_scsi_queue, 1577 .queuecommand = twl_scsi_queue,
1591 .eh_host_reset_handler = twl_scsi_eh_reset, 1578 .eh_host_reset_handler = twl_scsi_eh_reset,
1592 .bios_param = twl_scsi_biosparam, 1579 .bios_param = twl_scsi_biosparam,
1593 .change_queue_depth = twl_change_queue_depth, 1580 .change_queue_depth = scsi_change_queue_depth,
1594 .can_queue = TW_Q_LENGTH-2, 1581 .can_queue = TW_Q_LENGTH-2,
1595 .slave_configure = twl_slave_configure, 1582 .slave_configure = twl_slave_configure,
1596 .this_id = -1, 1583 .this_id = -1,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 752624e6bc00..c75f2048319f 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -523,19 +523,6 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
523 return len; 523 return len;
524} /* End tw_show_stats() */ 524} /* End tw_show_stats() */
525 525
526/* This function will set a devices queue depth */
527static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth,
528 int reason)
529{
530 if (reason != SCSI_QDEPTH_DEFAULT)
531 return -EOPNOTSUPP;
532
533 if (queue_depth > TW_Q_LENGTH-2)
534 queue_depth = TW_Q_LENGTH-2;
535 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
536 return queue_depth;
537} /* End tw_change_queue_depth() */
538
539/* Create sysfs 'stats' entry */ 526/* Create sysfs 'stats' entry */
540static struct device_attribute tw_host_stats_attr = { 527static struct device_attribute tw_host_stats_attr = {
541 .attr = { 528 .attr = {
@@ -2270,7 +2257,7 @@ static struct scsi_host_template driver_template = {
2270 .queuecommand = tw_scsi_queue, 2257 .queuecommand = tw_scsi_queue,
2271 .eh_host_reset_handler = tw_scsi_eh_reset, 2258 .eh_host_reset_handler = tw_scsi_eh_reset,
2272 .bios_param = tw_scsi_biosparam, 2259 .bios_param = tw_scsi_biosparam,
2273 .change_queue_depth = tw_change_queue_depth, 2260 .change_queue_depth = scsi_change_queue_depth,
2274 .can_queue = TW_Q_LENGTH-2, 2261 .can_queue = TW_Q_LENGTH-2,
2275 .slave_configure = tw_slave_configure, 2262 .slave_configure = tw_slave_configure,
2276 .this_id = -1, 2263 .this_id = -1,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index fabd4be2c985..aa915da2a5e5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); 175STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason); 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); 179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 180
181STATIC struct device_attribute *NCR_700_dev_attrs[]; 181STATIC struct device_attribute *NCR_700_dev_attrs[];
@@ -327,6 +327,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
327 tpnt->slave_alloc = NCR_700_slave_alloc; 327 tpnt->slave_alloc = NCR_700_slave_alloc;
328 tpnt->change_queue_depth = NCR_700_change_queue_depth; 328 tpnt->change_queue_depth = NCR_700_change_queue_depth;
329 tpnt->change_queue_type = NCR_700_change_queue_type; 329 tpnt->change_queue_type = NCR_700_change_queue_type;
330 tpnt->use_blk_tags = 1;
330 331
331 if(tpnt->name == NULL) 332 if(tpnt->name == NULL)
332 tpnt->name = "53c700"; 333 tpnt->name = "53c700";
@@ -592,19 +593,14 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
592 hostdata->cmd = NULL; 593 hostdata->cmd = NULL;
593 594
594 if(SCp != NULL) { 595 if(SCp != NULL) {
595 struct NCR_700_command_slot *slot = 596 struct NCR_700_command_slot *slot =
596 (struct NCR_700_command_slot *)SCp->host_scribble; 597 (struct NCR_700_command_slot *)SCp->host_scribble;
597 598
598 dma_unmap_single(hostdata->dev, slot->pCmd, 599 dma_unmap_single(hostdata->dev, slot->pCmd,
599 MAX_COMMAND_SIZE, DMA_TO_DEVICE); 600 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
600 if (slot->flags == NCR_700_FLAG_AUTOSENSE) { 601 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
601 char *cmnd = NCR_700_get_sense_cmnd(SCp->device); 602 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
602#ifdef NCR_700_DEBUG
603 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
604 SCp, SCp->cmnd[7], result);
605 scsi_print_sense("53c700", SCp);
606 603
607#endif
608 dma_unmap_single(hostdata->dev, slot->dma_handle, 604 dma_unmap_single(hostdata->dev, slot->dma_handle,
609 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 605 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
610 /* restore the old result if the request sense was 606 /* restore the old result if the request sense was
@@ -906,8 +902,10 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
906 /* we're done negotiating */ 902 /* we're done negotiating */
907 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION); 903 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
908 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
905
909 SCp->device->tagged_supported = 0; 906 SCp->device->tagged_supported = 0;
910 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun); 907 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
908 scsi_set_tag_type(SCp->device, 0);
911 } else { 909 } else {
912 shost_printk(KERN_WARNING, host, 910 shost_printk(KERN_WARNING, host,
913 "(%d:%d) Unexpected REJECT Message %s\n", 911 "(%d:%d) Unexpected REJECT Message %s\n",
@@ -1432,7 +1430,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
1432 if((hostdata->tag_negotiated & (1<<scmd_id(SCp))) 1430 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1433 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE && 1431 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1434 slot->flags != NCR_700_FLAG_AUTOSENSE)) { 1432 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1435 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]); 1433 count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1436 } 1434 }
1437 1435
1438 if(hostdata->fast && 1436 if(hostdata->fast &&
@@ -1772,7 +1770,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1772 */ 1770 */
1773 if(NCR_700_get_depth(SCp->device) != 0 1771 if(NCR_700_get_depth(SCp->device) != 0
1774 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp))) 1772 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1775 || !blk_rq_tagged(SCp->request))) { 1773 || !(SCp->flags & SCMD_TAGGED))) {
1776 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n", 1774 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1777 NCR_700_get_depth(SCp->device)); 1775 NCR_700_get_depth(SCp->device));
1778 return SCSI_MLQUEUE_DEVICE_BUSY; 1776 return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -1800,7 +1798,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1800 printk("53c700: scsi%d, command ", SCp->device->host->host_no); 1798 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1801 scsi_print_command(SCp); 1799 scsi_print_command(SCp);
1802#endif 1800#endif
1803 if(blk_rq_tagged(SCp->request) 1801 if ((SCp->flags & SCMD_TAGGED)
1804 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0 1802 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1805 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) { 1803 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1806 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n"); 1804 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
@@ -1814,7 +1812,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1814 * 1812 *
1815 * FIXME: This will royally screw up on multiple LUN devices 1813 * FIXME: This will royally screw up on multiple LUN devices
1816 * */ 1814 * */
1817 if(!blk_rq_tagged(SCp->request) 1815 if (!(SCp->flags & SCMD_TAGGED)
1818 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) { 1816 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1819 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n"); 1817 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1820 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 1818 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
@@ -1911,9 +1909,7 @@ NCR_700_abort(struct scsi_cmnd * SCp)
1911{ 1909{
1912 struct NCR_700_command_slot *slot; 1910 struct NCR_700_command_slot *slot;
1913 1911
1914 scmd_printk(KERN_INFO, SCp, 1912 scmd_printk(KERN_INFO, SCp, "abort command\n");
1915 "New error handler wants to abort command\n\t");
1916 scsi_print_command(SCp);
1917 1913
1918 slot = (struct NCR_700_command_slot *)SCp->host_scribble; 1914 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1919 1915
@@ -2056,13 +2052,10 @@ NCR_700_slave_configure(struct scsi_device *SDp)
2056 2052
2057 /* to do here: allocate memory; build a queue_full list */ 2053 /* to do here: allocate memory; build a queue_full list */
2058 if(SDp->tagged_supported) { 2054 if(SDp->tagged_supported) {
2059 scsi_set_tag_type(SDp, MSG_ORDERED_TAG); 2055 scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2060 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2061 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); 2056 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2062 } else {
2063 /* initialise to default depth */
2064 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2065 } 2057 }
2058
2066 if(hostdata->fast) { 2059 if(hostdata->fast) {
2067 /* Find the correct offset and period via domain validation */ 2060 /* Find the correct offset and period via domain validation */
2068 if (!spi_initial_dv(SDp->sdev_target)) 2061 if (!spi_initial_dv(SDp->sdev_target))
@@ -2082,16 +2075,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp)
2082} 2075}
2083 2076
2084static int 2077static int
2085NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason) 2078NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2086{ 2079{
2087 if (reason != SCSI_QDEPTH_DEFAULT)
2088 return -EOPNOTSUPP;
2089
2090 if (depth > NCR_700_MAX_TAGS) 2080 if (depth > NCR_700_MAX_TAGS)
2091 depth = NCR_700_MAX_TAGS; 2081 depth = NCR_700_MAX_TAGS;
2092 2082 return scsi_change_queue_depth(SDp, depth);
2093 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2094 return depth;
2095} 2083}
2096 2084
2097static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) 2085static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
@@ -2101,8 +2089,6 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2101 struct NCR_700_Host_Parameters *hostdata = 2089 struct NCR_700_Host_Parameters *hostdata =
2102 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; 2090 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2103 2091
2104 scsi_set_tag_type(SDp, tag_type);
2105
2106 /* We have a global (per target) flag to track whether TCQ is 2092 /* We have a global (per target) flag to track whether TCQ is
2107 * enabled, so we'll be turning it off for the entire target here. 2093 * enabled, so we'll be turning it off for the entire target here.
2108 * our tag algorithm will fail if we mix tagged and untagged commands, 2094 * our tag algorithm will fail if we mix tagged and untagged commands,
@@ -2110,15 +2096,16 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2110 if (change_tag) 2096 if (change_tag)
2111 scsi_target_quiesce(SDp->sdev_target); 2097 scsi_target_quiesce(SDp->sdev_target);
2112 2098
2099 scsi_set_tag_type(SDp, tag_type);
2113 if (!tag_type) { 2100 if (!tag_type) {
2114 /* shift back to the default unqueued number of commands 2101 /* shift back to the default unqueued number of commands
2115 * (the user can still raise this) */ 2102 * (the user can still raise this) */
2116 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun); 2103 scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
2117 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp)); 2104 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2118 } else { 2105 } else {
2119 /* Here, we cleared the negotiation flag above, so this 2106 /* Here, we cleared the negotiation flag above, so this
2120 * will force the driver to renegotiate */ 2107 * will force the driver to renegotiate */
2121 scsi_activate_tcq(SDp, SDp->queue_depth); 2108 scsi_change_queue_depth(SDp, SDp->queue_depth);
2122 if (change_tag) 2109 if (change_tag)
2123 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); 2110 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2124 } 2111 }
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 64c75143c89a..8d66a6469e29 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2327,12 +2327,12 @@ static int blogic_slaveconfig(struct scsi_device *dev)
2327 if (qdepth == 0) 2327 if (qdepth == 0)
2328 qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; 2328 qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH;
2329 adapter->qdepth[tgt_id] = qdepth; 2329 adapter->qdepth[tgt_id] = qdepth;
2330 scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, qdepth); 2330 scsi_change_queue_depth(dev, qdepth);
2331 } else { 2331 } else {
2332 adapter->tagq_ok &= ~(1 << tgt_id); 2332 adapter->tagq_ok &= ~(1 << tgt_id);
2333 qdepth = adapter->untag_qdepth; 2333 qdepth = adapter->untag_qdepth;
2334 adapter->qdepth[tgt_id] = qdepth; 2334 adapter->qdepth[tgt_id] = qdepth;
2335 scsi_adjust_queue_depth(dev, 0, qdepth); 2335 scsi_change_queue_depth(dev, qdepth);
2336 } 2336 }
2337 qdepth = 0; 2337 qdepth = 0;
2338 for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) 2338 for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3a820f61ce65..86cf3d671eb9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1341,13 +1341,15 @@ config SCSI_DC395x
1341 To compile this driver as a module, choose M here: the 1341 To compile this driver as a module, choose M here: the
1342 module will be called dc395x. 1342 module will be called dc395x.
1343 1343
1344config SCSI_DC390T 1344config SCSI_AM53C974
1345 tristate "Tekram DC390(T) and Am53/79C974 SCSI support" 1345 tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)"
1346 depends on PCI && SCSI 1346 depends on PCI && SCSI
1347 select SCSI_SPI_ATTRS
1347 ---help--- 1348 ---help---
1348 This driver supports PCI SCSI host adapters based on the Am53C974A 1349 This driver supports PCI SCSI host adapters based on the Am53C974A
1349 chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard 1350 chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard
1350 PCscsi/PCnet (Am53/79C974) solutions. 1351 PCscsi/PCnet (Am53/79C974) solutions.
1352 This is a new implementation base on the generic esp_scsi driver.
1351 1353
1352 Documentation can be found in <file:Documentation/scsi/tmscsim.txt>. 1354 Documentation can be found in <file:Documentation/scsi/tmscsim.txt>.
1353 1355
@@ -1355,7 +1357,7 @@ config SCSI_DC390T
1355 based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. 1357 based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those.
1356 1358
1357 To compile this driver as a module, choose M here: the 1359 To compile this driver as a module, choose M here: the
1358 module will be called tmscsim. 1360 module will be called am53c974.
1359 1361
1360config SCSI_T128 1362config SCSI_T128
1361 tristate "Trantor T128/T128F/T228 SCSI support" 1363 tristate "Trantor T128/T128F/T228 SCSI support"
@@ -1451,6 +1453,14 @@ config SCSI_NSP32
1451 To compile this driver as a module, choose M here: the 1453 To compile this driver as a module, choose M here: the
1452 module will be called nsp32. 1454 module will be called nsp32.
1453 1455
1456config SCSI_WD719X
1457 tristate "Western Digital WD7193/7197/7296 support"
1458 depends on PCI && SCSI
1459 select EEPROM_93CX6
1460 ---help---
1461 This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI
1462 SCSI controllers (based on WD33C296A chip).
1463
1454config SCSI_DEBUG 1464config SCSI_DEBUG
1455 tristate "SCSI debugging host simulator" 1465 tristate "SCSI debugging host simulator"
1456 depends on SCSI 1466 depends on SCSI
@@ -1615,7 +1625,7 @@ config ATARI_SCSI_RESET_BOOT
1615 that leave the devices with SCSI operations partway completed. 1625 that leave the devices with SCSI operations partway completed.
1616 1626
1617config MAC_SCSI 1627config MAC_SCSI
1618 bool "Macintosh NCR5380 SCSI" 1628 tristate "Macintosh NCR5380 SCSI"
1619 depends on MAC && SCSI=y 1629 depends on MAC && SCSI=y
1620 select SCSI_SPI_ATTRS 1630 select SCSI_SPI_ATTRS
1621 help 1631 help
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 59f1ce6df2d6..58158f11ed7b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -100,7 +100,7 @@ obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
100obj-$(CONFIG_SCSI_7000FASST) += wd7000.o 100obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
101obj-$(CONFIG_SCSI_EATA) += eata.o 101obj-$(CONFIG_SCSI_EATA) += eata.o
102obj-$(CONFIG_SCSI_DC395x) += dc395x.o 102obj-$(CONFIG_SCSI_DC395x) += dc395x.o
103obj-$(CONFIG_SCSI_DC390T) += tmscsim.o 103obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
104obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 104obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
105obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 105obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
106obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 106obj-$(CONFIG_MEGARAID_SAS) += megaraid/
@@ -143,6 +143,7 @@ obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
143obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o 143obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
144obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o 144obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o
145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o 145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
146obj-$(CONFIG_SCSI_WD719X) += wd719x.o
146 147
147obj-$(CONFIG_ARM) += arm/ 148obj-$(CONFIG_ARM) += arm/
148 149
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 45da3c823322..36244d63def2 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -11,8 +11,6 @@
11 * drew@colorado.edu 11 * drew@colorado.edu
12 * +1 (303) 666-5836 12 * +1 (303) 666-5836
13 * 13 *
14 * DISTRIBUTION RELEASE 6.
15 *
16 * For more information, please consult 14 * For more information, please consult
17 * 15 *
18 * NCR 5380 Family 16 * NCR 5380 Family
@@ -279,7 +277,7 @@ static void do_reset(struct Scsi_Host *host);
279 * Set up the internal fields in the SCSI command. 277 * Set up the internal fields in the SCSI command.
280 */ 278 */
281 279
282static __inline__ void initialize_SCp(Scsi_Cmnd * cmd) 280static inline void initialize_SCp(struct scsi_cmnd *cmd)
283{ 281{
284 /* 282 /*
285 * Initialize the Scsi Pointer field so that all of the commands in the 283 * Initialize the Scsi Pointer field so that all of the commands in the
@@ -574,12 +572,12 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
574 int trying_irqs, i, mask; 572 int trying_irqs, i, mask;
575 NCR5380_setup(instance); 573 NCR5380_setup(instance);
576 574
577 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) 575 for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
578 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) 576 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
579 trying_irqs |= mask; 577 trying_irqs |= mask;
580 578
581 timeout = jiffies + (250 * HZ / 1000); 579 timeout = jiffies + (250 * HZ / 1000);
582 probe_irq = SCSI_IRQ_NONE; 580 probe_irq = NO_IRQ;
583 581
584 /* 582 /*
585 * A interrupt is triggered whenever BSY = false, SEL = true 583 * A interrupt is triggered whenever BSY = false, SEL = true
@@ -596,13 +594,13 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
596 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); 594 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
597 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); 595 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
598 596
599 while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout)) 597 while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
600 schedule_timeout_uninterruptible(1); 598 schedule_timeout_uninterruptible(1);
601 599
602 NCR5380_write(SELECT_ENABLE_REG, 0); 600 NCR5380_write(SELECT_ENABLE_REG, 0);
603 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 601 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
604 602
605 for (i = 0, mask = 1; i < 16; ++i, mask <<= 1) 603 for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
606 if (trying_irqs & mask) 604 if (trying_irqs & mask)
607 free_irq(i, NULL); 605 free_irq(i, NULL);
608 606
@@ -610,50 +608,70 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
610} 608}
611 609
612/** 610/**
613 * NCR58380_print_options - show options 611 * NCR58380_info - report driver and host information
614 * @instance: unused for now 612 * @instance: relevant scsi host instance
615 * 613 *
616 * Called by probe code indicating the NCR5380 driver options that 614 * For use as the host template info() handler.
617 * were selected. At some point this will switch to runtime options
618 * read from the adapter in question
619 * 615 *
620 * Locks: none 616 * Locks: none
621 */ 617 */
622 618
623static void __init __maybe_unused 619static const char *NCR5380_info(struct Scsi_Host *instance)
624NCR5380_print_options(struct Scsi_Host *instance)
625{ 620{
626 printk(" generic options" 621 struct NCR5380_hostdata *hostdata = shost_priv(instance);
627#ifdef AUTOPROBE_IRQ 622
628 " AUTOPROBE_IRQ" 623 return hostdata->info;
624}
625
626static void prepare_info(struct Scsi_Host *instance)
627{
628 struct NCR5380_hostdata *hostdata = shost_priv(instance);
629
630 snprintf(hostdata->info, sizeof(hostdata->info),
631 "%s, io_port 0x%lx, n_io_port %d, "
632 "base 0x%lx, irq %d, "
633 "can_queue %d, cmd_per_lun %d, "
634 "sg_tablesize %d, this_id %d, "
635 "flags { %s%s%s}, "
636#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
637 "USLEEP_POLL %d, USLEEP_WAITLONG %d, "
629#endif 638#endif
630#ifdef AUTOSENSE 639 "options { %s} ",
631 " AUTOSENSE" 640 instance->hostt->name, instance->io_port, instance->n_io_port,
641 instance->base, instance->irq,
642 instance->can_queue, instance->cmd_per_lun,
643 instance->sg_tablesize, instance->this_id,
644 hostdata->flags & FLAG_NCR53C400 ? "NCR53C400 " : "",
645 hostdata->flags & FLAG_DTC3181E ? "DTC3181E " : "",
646 hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
647#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG)
648 USLEEP_POLL, USLEEP_WAITLONG,
649#endif
650#ifdef AUTOPROBE_IRQ
651 "AUTOPROBE_IRQ "
632#endif 652#endif
633#ifdef DIFFERENTIAL 653#ifdef DIFFERENTIAL
634 " DIFFERENTIAL" 654 "DIFFERENTIAL "
635#endif 655#endif
636#ifdef REAL_DMA 656#ifdef REAL_DMA
637 " REAL DMA" 657 "REAL_DMA "
638#endif 658#endif
639#ifdef REAL_DMA_POLL 659#ifdef REAL_DMA_POLL
640 " REAL DMA POLL" 660 "REAL_DMA_POLL "
641#endif 661#endif
642#ifdef PARITY 662#ifdef PARITY
643 " PARITY" 663 "PARITY "
644#endif 664#endif
645#ifdef PSEUDO_DMA 665#ifdef PSEUDO_DMA
646 " PSEUDO DMA" 666 "PSEUDO_DMA "
647#endif 667#endif
648#ifdef UNSAFE 668#ifdef UNSAFE
649 " UNSAFE " 669 "UNSAFE "
650#endif 670#endif
651 ); 671#ifdef NCR53C400
652 printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP); 672 "NCR53C400 "
653 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); 673#endif
654 if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) { 674 "");
655 printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
656 }
657} 675}
658 676
659/** 677/**
@@ -672,6 +690,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
672 NCR5380_dprint_phase(NDEBUG_ANY, instance); 690 NCR5380_dprint_phase(NDEBUG_ANY, instance);
673} 691}
674 692
693#ifdef PSEUDO_DMA
675/******************************************/ 694/******************************************/
676/* 695/*
677 * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] 696 * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
@@ -689,19 +708,18 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
689static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, 708static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
690 char *buffer, int length) 709 char *buffer, int length)
691{ 710{
692#ifdef DTC_PUBLIC_RELEASE 711 struct NCR5380_hostdata *hostdata = shost_priv(instance);
693 dtc_wmaxi = dtc_maxi = 0; 712
694#endif 713 hostdata->spin_max_r = 0;
695#ifdef PAS16_PUBLIC_RELEASE 714 hostdata->spin_max_w = 0;
696 pas_wmaxi = pas_maxi = 0; 715 return 0;
697#endif
698 return (-ENOSYS); /* Currently this is a no-op */
699} 716}
717#endif
700 718
701#undef SPRINTF 719#undef SPRINTF
702#define SPRINTF(args...) seq_printf(m, ## args) 720#define SPRINTF(args...) seq_printf(m, ## args)
703static 721static
704void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m); 722void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m);
705static 723static
706void lprint_command(unsigned char *cmd, struct seq_file *m); 724void lprint_command(unsigned char *cmd, struct seq_file *m);
707static 725static
@@ -711,56 +729,31 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
711 struct Scsi_Host *instance) 729 struct Scsi_Host *instance)
712{ 730{
713 struct NCR5380_hostdata *hostdata; 731 struct NCR5380_hostdata *hostdata;
714 Scsi_Cmnd *ptr; 732 struct scsi_cmnd *ptr;
715 733
716 hostdata = (struct NCR5380_hostdata *) instance->hostdata; 734 hostdata = (struct NCR5380_hostdata *) instance->hostdata;
717 735
718 SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE); 736#ifdef PSEUDO_DMA
719 if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) 737 SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n",
720 SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE); 738 hostdata->spin_max_w, hostdata->spin_max_r);
721#ifdef DTC_PUBLIC_RELEASE
722 SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE);
723#endif
724#ifdef T128_PUBLIC_RELEASE
725 SPRINTF("T128 release %d", T128_PUBLIC_RELEASE);
726#endif
727#ifdef GENERIC_NCR5380_PUBLIC_RELEASE
728 SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE);
729#endif
730#ifdef PAS16_PUBLIC_RELEASE
731 SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE);
732#endif
733
734 SPRINTF("\nBase Addr: 0x%05lX ", (long) instance->base);
735 SPRINTF("io_port: %04x ", (int) instance->io_port);
736 if (instance->irq == SCSI_IRQ_NONE)
737 SPRINTF("IRQ: None.\n");
738 else
739 SPRINTF("IRQ: %d.\n", instance->irq);
740
741#ifdef DTC_PUBLIC_RELEASE
742 SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", dtc_wmaxi, dtc_maxi);
743#endif
744#ifdef PAS16_PUBLIC_RELEASE
745 SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", pas_wmaxi, pas_maxi);
746#endif 739#endif
747 spin_lock_irq(instance->host_lock); 740 spin_lock_irq(instance->host_lock);
748 if (!hostdata->connected) 741 if (!hostdata->connected)
749 SPRINTF("scsi%d: no currently connected command\n", instance->host_no); 742 SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
750 else 743 else
751 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); 744 lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
752 SPRINTF("scsi%d: issue_queue\n", instance->host_no); 745 SPRINTF("scsi%d: issue_queue\n", instance->host_no);
753 for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) 746 for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
754 lprint_Scsi_Cmnd(ptr, m); 747 lprint_Scsi_Cmnd(ptr, m);
755 748
756 SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); 749 SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
757 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) 750 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble)
758 lprint_Scsi_Cmnd(ptr, m); 751 lprint_Scsi_Cmnd(ptr, m);
759 spin_unlock_irq(instance->host_lock); 752 spin_unlock_irq(instance->host_lock);
760 return 0; 753 return 0;
761} 754}
762 755
763static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) 756static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
764{ 757{
765 SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); 758 SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
766 SPRINTF(" command = "); 759 SPRINTF(" command = ");
@@ -836,18 +829,6 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
836 829
837 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); 830 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
838 831
839#ifdef NCR5380_STATS
840 for (i = 0; i < 8; ++i) {
841 hostdata->time_read[i] = 0;
842 hostdata->time_write[i] = 0;
843 hostdata->bytes_read[i] = 0;
844 hostdata->bytes_write[i] = 0;
845 }
846 hostdata->timebase = 0;
847 hostdata->pendingw = 0;
848 hostdata->pendingr = 0;
849#endif
850
851 /* The CHECK code seems to break the 53C400. Will check it later maybe */ 832 /* The CHECK code seems to break the 53C400. Will check it later maybe */
852 if (flags & FLAG_NCR53C400) 833 if (flags & FLAG_NCR53C400)
853 hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags; 834 hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
@@ -857,11 +838,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
857 hostdata->host = instance; 838 hostdata->host = instance;
858 hostdata->time_expires = 0; 839 hostdata->time_expires = 0;
859 840
860#ifndef AUTOSENSE 841 prepare_info(instance);
861 if ((instance->cmd_per_lun > 1) || instance->can_queue > 1)
862 printk(KERN_WARNING "scsi%d : WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n"
863 " be incorrectly cleared.\n", instance->host_no);
864#endif /* def AUTOSENSE */
865 842
866 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 843 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
867 NCR5380_write(MODE_REG, MR_BASE); 844 NCR5380_write(MODE_REG, MR_BASE);
@@ -935,11 +912,11 @@ static void NCR5380_exit(struct Scsi_Host *instance)
935 * Locks: host lock taken by caller 912 * Locks: host lock taken by caller
936 */ 913 */
937 914
938static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) 915static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *))
939{ 916{
940 struct Scsi_Host *instance = cmd->device->host; 917 struct Scsi_Host *instance = cmd->device->host;
941 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 918 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
942 Scsi_Cmnd *tmp; 919 struct scsi_cmnd *tmp;
943 920
944#if (NDEBUG & NDEBUG_NO_WRITE) 921#if (NDEBUG & NDEBUG_NO_WRITE)
945 switch (cmd->cmnd[0]) { 922 switch (cmd->cmnd[0]) {
@@ -952,25 +929,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
952 } 929 }
953#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 930#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
954 931
955#ifdef NCR5380_STATS
956 switch (cmd->cmnd[0]) {
957 case WRITE:
958 case WRITE_6:
959 case WRITE_10:
960 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
961 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
962 hostdata->pendingw++;
963 break;
964 case READ:
965 case READ_6:
966 case READ_10:
967 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
968 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
969 hostdata->pendingr++;
970 break;
971 }
972#endif
973
974 /* 932 /*
975 * We use the host_scribble field as a pointer to the next command 933 * We use the host_scribble field as a pointer to the next command
976 * in a queue 934 * in a queue
@@ -992,7 +950,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
992 cmd->host_scribble = (unsigned char *) hostdata->issue_queue; 950 cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
993 hostdata->issue_queue = cmd; 951 hostdata->issue_queue = cmd;
994 } else { 952 } else {
995 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); 953 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble);
996 LIST(cmd, tmp); 954 LIST(cmd, tmp);
997 tmp->host_scribble = (unsigned char *) cmd; 955 tmp->host_scribble = (unsigned char *) cmd;
998 } 956 }
@@ -1023,7 +981,7 @@ static void NCR5380_main(struct work_struct *work)
1023 struct NCR5380_hostdata *hostdata = 981 struct NCR5380_hostdata *hostdata =
1024 container_of(work, struct NCR5380_hostdata, coroutine.work); 982 container_of(work, struct NCR5380_hostdata, coroutine.work);
1025 struct Scsi_Host *instance = hostdata->host; 983 struct Scsi_Host *instance = hostdata->host;
1026 Scsi_Cmnd *tmp, *prev; 984 struct scsi_cmnd *tmp, *prev;
1027 int done; 985 int done;
1028 986
1029 spin_lock_irq(instance->host_lock); 987 spin_lock_irq(instance->host_lock);
@@ -1036,7 +994,7 @@ static void NCR5380_main(struct work_struct *work)
1036 * Search through the issue_queue for a command destined 994 * Search through the issue_queue for a command destined
1037 * for a target that's not busy. 995 * for a target that's not busy.
1038 */ 996 */
1039 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 997 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
1040 { 998 {
1041 if (prev != tmp) 999 if (prev != tmp)
1042 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); 1000 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
@@ -1048,7 +1006,7 @@ static void NCR5380_main(struct work_struct *work)
1048 prev->host_scribble = tmp->host_scribble; 1006 prev->host_scribble = tmp->host_scribble;
1049 } else { 1007 } else {
1050 REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble); 1008 REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
1051 hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble; 1009 hostdata->issue_queue = (struct scsi_cmnd *) tmp->host_scribble;
1052 } 1010 }
1053 tmp->host_scribble = NULL; 1011 tmp->host_scribble = NULL;
1054 1012
@@ -1073,14 +1031,14 @@ static void NCR5380_main(struct work_struct *work)
1073 hostdata->selecting = NULL; 1031 hostdata->selecting = NULL;
1074 /* RvC: have to preset this to indicate a new command is being performed */ 1032 /* RvC: have to preset this to indicate a new command is being performed */
1075 1033
1076 if (!NCR5380_select(instance, tmp, 1034 /*
1077 /* 1035 * REQUEST SENSE commands are issued without tagged
1078 * REQUEST SENSE commands are issued without tagged 1036 * queueing, even on SCSI-II devices because the
1079 * queueing, even on SCSI-II devices because the 1037 * contingent allegiance condition exists for the
1080 * contingent allegiance condition exists for the 1038 * entire unit.
1081 * entire unit. 1039 */
1082 */ 1040
1083 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { 1041 if (!NCR5380_select(instance, tmp)) {
1084 break; 1042 break;
1085 } else { 1043 } else {
1086 LIST(tmp, hostdata->issue_queue); 1044 LIST(tmp, hostdata->issue_queue);
@@ -1095,9 +1053,9 @@ static void NCR5380_main(struct work_struct *work)
1095 /* exited locked */ 1053 /* exited locked */
1096 } /* if (!hostdata->connected) */ 1054 } /* if (!hostdata->connected) */
1097 if (hostdata->selecting) { 1055 if (hostdata->selecting) {
1098 tmp = (Scsi_Cmnd *) hostdata->selecting; 1056 tmp = (struct scsi_cmnd *) hostdata->selecting;
1099 /* Selection will drop and retake the lock */ 1057 /* Selection will drop and retake the lock */
1100 if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { 1058 if (!NCR5380_select(instance, tmp)) {
1101 /* Ok ?? */ 1059 /* Ok ?? */
1102 } else { 1060 } else {
1103 /* RvC: device failed, so we wait a long time 1061 /* RvC: device failed, so we wait a long time
@@ -1216,47 +1174,16 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1216 1174
1217#endif 1175#endif
1218 1176
1219/**
1220 * collect_stats - collect stats on a scsi command
1221 * @hostdata: adapter
1222 * @cmd: command being issued
1223 *
1224 * Update the statistical data by parsing the command in question
1225 */
1226
1227static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
1228{
1229#ifdef NCR5380_STATS
1230 switch (cmd->cmnd[0]) {
1231 case WRITE:
1232 case WRITE_6:
1233 case WRITE_10:
1234 hostdata->time_write[scmd_id(cmd)] += (jiffies - hostdata->timebase);
1235 hostdata->pendingw--;
1236 break;
1237 case READ:
1238 case READ_6:
1239 case READ_10:
1240 hostdata->time_read[scmd_id(cmd)] += (jiffies - hostdata->timebase);
1241 hostdata->pendingr--;
1242 break;
1243 }
1244#endif
1245}
1246
1247
1248/* 1177/*
1249 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, 1178 * Function : int NCR5380_select(struct Scsi_Host *instance,
1250 * int tag); 1179 * struct scsi_cmnd *cmd)
1251 * 1180 *
1252 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1181 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1253 * including ARBITRATION, SELECTION, and initial message out for 1182 * including ARBITRATION, SELECTION, and initial message out for
1254 * IDENTIFY and queue messages. 1183 * IDENTIFY and queue messages.
1255 * 1184 *
1256 * Inputs : instance - instantiation of the 5380 driver on which this 1185 * Inputs : instance - instantiation of the 5380 driver on which this
1257 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for 1186 * target lives, cmd - SCSI command to execute.
1258 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1259 * the command that is presently connected.
1260 * 1187 *
1261 * Returns : -1 if selection could not execute for some reason, 1188 * Returns : -1 if selection could not execute for some reason,
1262 * 0 if selection succeeded or failed because the target 1189 * 0 if selection succeeded or failed because the target
@@ -1278,7 +1205,7 @@ static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
1278 * Locks: caller holds hostdata lock in IRQ mode 1205 * Locks: caller holds hostdata lock in IRQ mode
1279 */ 1206 */
1280 1207
1281static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) 1208static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
1282{ 1209{
1283 NCR5380_local_declare(); 1210 NCR5380_local_declare();
1284 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 1211 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -1476,7 +1403,6 @@ part2:
1476 return -1; 1403 return -1;
1477 } 1404 }
1478 cmd->result = DID_BAD_TARGET << 16; 1405 cmd->result = DID_BAD_TARGET << 16;
1479 collect_stats(hostdata, cmd);
1480 cmd->scsi_done(cmd); 1406 cmd->scsi_done(cmd);
1481 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1407 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1482 dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); 1408 dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
@@ -1513,7 +1439,7 @@ part2:
1513 } 1439 }
1514 1440
1515 dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); 1441 dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
1516 tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); 1442 tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
1517 1443
1518 len = 1; 1444 len = 1;
1519 cmd->tag = 0; 1445 cmd->tag = 0;
@@ -2086,7 +2012,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2086#endif 2012#endif
2087 unsigned char *data; 2013 unsigned char *data;
2088 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 2014 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
2089 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; 2015 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
2090 /* RvC: we need to set the end of the polling time */ 2016 /* RvC: we need to set the end of the polling time */
2091 unsigned long poll_time = jiffies + USLEEP_POLL; 2017 unsigned long poll_time = jiffies + USLEEP_POLL;
2092 2018
@@ -2228,7 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2228 cmd->next_link->tag = cmd->tag; 2154 cmd->next_link->tag = cmd->tag;
2229 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2155 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2230 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); 2156 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
2231 collect_stats(hostdata, cmd);
2232 cmd->scsi_done(cmd); 2157 cmd->scsi_done(cmd);
2233 cmd = hostdata->connected; 2158 cmd = hostdata->connected;
2234 break; 2159 break;
@@ -2263,7 +2188,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2263 else if (status_byte(cmd->SCp.Status) != GOOD) 2188 else if (status_byte(cmd->SCp.Status) != GOOD)
2264 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); 2189 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2265 2190
2266#ifdef AUTOSENSE
2267 if ((cmd->cmnd[0] == REQUEST_SENSE) && 2191 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2268 hostdata->ses.cmd_len) { 2192 hostdata->ses.cmd_len) {
2269 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 2193 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -2278,12 +2202,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2278 LIST(cmd, hostdata->issue_queue); 2202 LIST(cmd, hostdata->issue_queue);
2279 cmd->host_scribble = (unsigned char *) 2203 cmd->host_scribble = (unsigned char *)
2280 hostdata->issue_queue; 2204 hostdata->issue_queue;
2281 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2205 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2282 dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); 2206 dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
2283 } else 2207 } else {
2284#endif /* def AUTOSENSE */
2285 {
2286 collect_stats(hostdata, cmd);
2287 cmd->scsi_done(cmd); 2208 cmd->scsi_done(cmd);
2288 } 2209 }
2289 2210
@@ -2430,7 +2351,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2430 hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); 2351 hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
2431 hostdata->connected = NULL; 2352 hostdata->connected = NULL;
2432 cmd->result = DID_ERROR << 16; 2353 cmd->result = DID_ERROR << 16;
2433 collect_stats(hostdata, cmd);
2434 cmd->scsi_done(cmd); 2354 cmd->scsi_done(cmd);
2435 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2355 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2436 return; 2356 return;
@@ -2479,7 +2399,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2479 * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2399 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2480 * 2400 *
2481 * Purpose : does reselection, initializing the instance->connected 2401 * Purpose : does reselection, initializing the instance->connected
2482 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q 2402 * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
2483 * nexus has been reestablished, 2403 * nexus has been reestablished,
2484 * 2404 *
2485 * Inputs : instance - this instance of the NCR5380. 2405 * Inputs : instance - this instance of the NCR5380.
@@ -2496,7 +2416,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2496 int len; 2416 int len;
2497 unsigned char msg[3]; 2417 unsigned char msg[3];
2498 unsigned char *data; 2418 unsigned char *data;
2499 Scsi_Cmnd *tmp = NULL, *prev; 2419 struct scsi_cmnd *tmp = NULL, *prev;
2500 int abort = 0; 2420 int abort = 0;
2501 NCR5380_setup(instance); 2421 NCR5380_setup(instance);
2502 2422
@@ -2562,7 +2482,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2562 */ 2482 */
2563 2483
2564 2484
2565 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 2485 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble)
2566 if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun) 2486 if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun)
2567 ) { 2487 ) {
2568 if (prev) { 2488 if (prev) {
@@ -2570,7 +2490,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2570 prev->host_scribble = tmp->host_scribble; 2490 prev->host_scribble = tmp->host_scribble;
2571 } else { 2491 } else {
2572 REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble); 2492 REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
2573 hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble; 2493 hostdata->disconnected_queue = (struct scsi_cmnd *) tmp->host_scribble;
2574 } 2494 }
2575 tmp->host_scribble = NULL; 2495 tmp->host_scribble = NULL;
2576 break; 2496 break;
@@ -2601,7 +2521,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2601 * 2521 *
2602 * Inputs : instance - this instance of the NCR5380. 2522 * Inputs : instance - this instance of the NCR5380.
2603 * 2523 *
2604 * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L 2524 * Returns : pointer to the scsi_cmnd structure for which the I_T_L
2605 * nexus has been reestablished, on failure NULL is returned. 2525 * nexus has been reestablished, on failure NULL is returned.
2606 */ 2526 */
2607 2527
@@ -2643,32 +2563,32 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
2643#endif /* def REAL_DMA */ 2563#endif /* def REAL_DMA */
2644 2564
2645/* 2565/*
2646 * Function : int NCR5380_abort (Scsi_Cmnd *cmd) 2566 * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
2647 * 2567 *
2648 * Purpose : abort a command 2568 * Purpose : abort a command
2649 * 2569 *
2650 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 2570 * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
2651 * host byte of the result field to, if zero DID_ABORTED is 2571 * host byte of the result field to, if zero DID_ABORTED is
2652 * used. 2572 * used.
2653 * 2573 *
2654 * Returns : 0 - success, -1 on failure. 2574 * Returns : SUCCESS - success, FAILED on failure.
2655 * 2575 *
2656 * XXX - there is no way to abort the command that is currently 2576 * XXX - there is no way to abort the command that is currently
2657 * connected, you have to wait for it to complete. If this is 2577 * connected, you have to wait for it to complete. If this is
2658 * a problem, we could implement longjmp() / setjmp(), setjmp() 2578 * a problem, we could implement longjmp() / setjmp(), setjmp()
2659 * called where the loop started in NCR5380_main(). 2579 * called where the loop started in NCR5380_main().
2660 * 2580 *
2661 * Locks: host lock taken by caller 2581 * Locks: host lock taken by caller
2662 */ 2582 */
2663 2583
2664static int NCR5380_abort(Scsi_Cmnd * cmd) { 2584static int NCR5380_abort(struct scsi_cmnd *cmd)
2585{
2665 NCR5380_local_declare(); 2586 NCR5380_local_declare();
2666 struct Scsi_Host *instance = cmd->device->host; 2587 struct Scsi_Host *instance = cmd->device->host;
2667 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 2588 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
2668 Scsi_Cmnd *tmp, **prev; 2589 struct scsi_cmnd *tmp, **prev;
2669 2590
2670 printk(KERN_WARNING "scsi%d : aborting command\n", instance->host_no); 2591 scmd_printk(KERN_WARNING, cmd, "aborting command\n");
2671 scsi_print_command(cmd);
2672 2592
2673 NCR5380_print_status(instance); 2593 NCR5380_print_status(instance);
2674 2594
@@ -2704,7 +2624,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2704 * aborted flag and get back into our main loop. 2624 * aborted flag and get back into our main loop.
2705 */ 2625 */
2706 2626
2707 return 0; 2627 return SUCCESS;
2708 } 2628 }
2709#endif 2629#endif
2710 2630
@@ -2714,10 +2634,10 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2714 */ 2634 */
2715 2635
2716 dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); 2636 dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
2717 for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) 2637 for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
2718 if (cmd == tmp) { 2638 if (cmd == tmp) {
2719 REMOVE(5, *prev, tmp, tmp->host_scribble); 2639 REMOVE(5, *prev, tmp, tmp->host_scribble);
2720 (*prev) = (Scsi_Cmnd *) tmp->host_scribble; 2640 (*prev) = (struct scsi_cmnd *) tmp->host_scribble;
2721 tmp->host_scribble = NULL; 2641 tmp->host_scribble = NULL;
2722 tmp->result = DID_ABORT << 16; 2642 tmp->result = DID_ABORT << 16;
2723 dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); 2643 dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
@@ -2770,20 +2690,20 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2770 * it from the disconnected queue. 2690 * it from the disconnected queue.
2771 */ 2691 */
2772 2692
2773 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) 2693 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble)
2774 if (cmd == tmp) { 2694 if (cmd == tmp) {
2775 dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); 2695 dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
2776 2696
2777 if (NCR5380_select(instance, cmd, (int) cmd->tag)) 2697 if (NCR5380_select(instance, cmd))
2778 return FAILED; 2698 return FAILED;
2779 dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); 2699 dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
2780 2700
2781 do_abort(instance); 2701 do_abort(instance);
2782 2702
2783 for (prev = (Scsi_Cmnd **) & (hostdata->disconnected_queue), tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) 2703 for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble)
2784 if (cmd == tmp) { 2704 if (cmd == tmp) {
2785 REMOVE(5, *prev, tmp, tmp->host_scribble); 2705 REMOVE(5, *prev, tmp, tmp->host_scribble);
2786 *prev = (Scsi_Cmnd *) tmp->host_scribble; 2706 *prev = (struct scsi_cmnd *) tmp->host_scribble;
2787 tmp->host_scribble = NULL; 2707 tmp->host_scribble = NULL;
2788 tmp->result = DID_ABORT << 16; 2708 tmp->result = DID_ABORT << 16;
2789 tmp->scsi_done(tmp); 2709 tmp->scsi_done(tmp);
@@ -2806,7 +2726,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2806 2726
2807 2727
2808/* 2728/*
2809 * Function : int NCR5380_bus_reset (Scsi_Cmnd *cmd) 2729 * Function : int NCR5380_bus_reset (struct scsi_cmnd *cmd)
2810 * 2730 *
2811 * Purpose : reset the SCSI bus. 2731 * Purpose : reset the SCSI bus.
2812 * 2732 *
@@ -2815,7 +2735,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2815 * Locks: host lock taken by caller 2735 * Locks: host lock taken by caller
2816 */ 2736 */
2817 2737
2818static int NCR5380_bus_reset(Scsi_Cmnd * cmd) 2738static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2819{ 2739{
2820 struct Scsi_Host *instance = cmd->device->host; 2740 struct Scsi_Host *instance = cmd->device->host;
2821 2741
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index c79ddfa6f53c..162112dd1bf8 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -7,8 +7,6 @@
7 * drew@colorado.edu 7 * drew@colorado.edu
8 * +1 (303) 666-5836 8 * +1 (303) 666-5836
9 * 9 *
10 * DISTRIBUTION RELEASE 7
11 *
12 * For more information, please consult 10 * For more information, please consult
13 * 11 *
14 * NCR 5380 Family 12 * NCR 5380 Family
@@ -25,13 +23,7 @@
25#define NCR5380_H 23#define NCR5380_H
26 24
27#include <linux/interrupt.h> 25#include <linux/interrupt.h>
28
29#ifdef AUTOSENSE
30#include <scsi/scsi_eh.h> 26#include <scsi/scsi_eh.h>
31#endif
32
33#define NCR5380_PUBLIC_RELEASE 7
34#define NCR53C400_PUBLIC_RELEASE 2
35 27
36#define NDEBUG_ARBITRATION 0x1 28#define NDEBUG_ARBITRATION 0x1
37#define NDEBUG_AUTOSENSE 0x2 29#define NDEBUG_AUTOSENSE 0x2
@@ -224,33 +216,44 @@
224#define DISCONNECT_LONG 2 216#define DISCONNECT_LONG 2
225 217
226/* 218/*
227 * These are "special" values for the tag parameter passed to NCR5380_select. 219 * "Special" value for the (unsigned char) command tag, to indicate
220 * I_T_L nexus instead of I_T_L_Q.
228 */ 221 */
229 222
230#define TAG_NEXT -1 /* Use next free tag */ 223#define TAG_NONE 0xff
231#define TAG_NONE -2 /*
232 * Establish I_T_L nexus instead of I_T_L_Q
233 * even on SCSI-II devices.
234 */
235 224
236/* 225/*
237 * These are "special" values for the irq and dma_channel fields of the 226 * These are "special" values for the irq and dma_channel fields of the
238 * Scsi_Host structure 227 * Scsi_Host structure
239 */ 228 */
240 229
241#define SCSI_IRQ_NONE 255
242#define DMA_NONE 255 230#define DMA_NONE 255
243#define IRQ_AUTO 254 231#define IRQ_AUTO 254
244#define DMA_AUTO 254 232#define DMA_AUTO 254
245#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */ 233#define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */
246 234
235#ifndef NO_IRQ
236#define NO_IRQ 0
237#endif
238
247#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */ 239#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
248#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */ 240#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
249#define FLAG_NCR53C400 4 /* NCR53c400 */ 241#define FLAG_NCR53C400 4 /* NCR53c400 */
250#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ 242#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
251#define FLAG_DTC3181E 16 /* DTC3181E */ 243#define FLAG_DTC3181E 16 /* DTC3181E */
244#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */
245#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */
252 246
253#ifndef ASM 247#ifndef ASM
248
249#ifdef SUPPORT_TAGS
250struct tag_alloc {
251 DECLARE_BITMAP(allocated, MAX_TAGS);
252 int nr_allocated;
253 int queue_size;
254};
255#endif
256
254struct NCR5380_hostdata { 257struct NCR5380_hostdata {
255 NCR5380_implementation_fields; /* implementation specific */ 258 NCR5380_implementation_fields; /* implementation specific */
256 struct Scsi_Host *host; /* Host backpointer */ 259 struct Scsi_Host *host; /* Host backpointer */
@@ -263,9 +266,9 @@ struct NCR5380_hostdata {
263 volatile int dma_len; /* requested length of DMA */ 266 volatile int dma_len; /* requested length of DMA */
264#endif 267#endif
265 volatile unsigned char last_message; /* last message OUT */ 268 volatile unsigned char last_message; /* last message OUT */
266 volatile Scsi_Cmnd *connected; /* currently connected command */ 269 volatile struct scsi_cmnd *connected; /* currently connected command */
267 volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */ 270 volatile struct scsi_cmnd *issue_queue; /* waiting to be issued */
268 volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */ 271 volatile struct scsi_cmnd *disconnected_queue; /* waiting for reconnect */
269 volatile int restart_select; /* we have disconnected, 272 volatile int restart_select; /* we have disconnected,
270 used to restart 273 used to restart
271 NCR5380_select() */ 274 NCR5380_select() */
@@ -273,19 +276,21 @@ struct NCR5380_hostdata {
273 int flags; 276 int flags;
274 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 277 unsigned long time_expires; /* in jiffies, set prior to sleeping */
275 int select_time; /* timer in select for target response */ 278 int select_time; /* timer in select for target response */
276 volatile Scsi_Cmnd *selecting; 279 volatile struct scsi_cmnd *selecting;
277 struct delayed_work coroutine; /* our co-routine */ 280 struct delayed_work coroutine; /* our co-routine */
278#ifdef NCR5380_STATS
279 unsigned timebase; /* Base for time calcs */
280 long time_read[8]; /* time to do reads */
281 long time_write[8]; /* time to do writes */
282 unsigned long bytes_read[8]; /* bytes read */
283 unsigned long bytes_write[8]; /* bytes written */
284 unsigned pendingr;
285 unsigned pendingw;
286#endif
287#ifdef AUTOSENSE
288 struct scsi_eh_save ses; 281 struct scsi_eh_save ses;
282 char info[256];
283 int read_overruns; /* number of bytes to cut from a
284 * transfer to handle chip overruns */
285 int retain_dma_intr;
286 struct work_struct main_task;
287 volatile int main_running;
288#ifdef SUPPORT_TAGS
289 struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */
290#endif
291#ifdef PSEUDO_DMA
292 unsigned spin_max_r;
293 unsigned spin_max_w;
289#endif 294#endif
290}; 295};
291 296
@@ -296,7 +301,8 @@ struct NCR5380_hostdata {
296#endif 301#endif
297 302
298#define dprintk(flg, fmt, ...) \ 303#define dprintk(flg, fmt, ...) \
299 do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) 304 do { if ((NDEBUG) & (flg)) \
305 printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0)
300 306
301#if NDEBUG 307#if NDEBUG
302#define NCR5380_dprint(flg, arg) \ 308#define NCR5380_dprint(flg, arg) \
@@ -320,17 +326,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
320static irqreturn_t NCR5380_intr(int irq, void *dev_id); 326static irqreturn_t NCR5380_intr(int irq, void *dev_id);
321#endif 327#endif
322static void NCR5380_main(struct work_struct *work); 328static void NCR5380_main(struct work_struct *work);
323static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); 329static const char *NCR5380_info(struct Scsi_Host *instance);
324static int NCR5380_abort(Scsi_Cmnd * cmd);
325static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
326static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
327static int __maybe_unused NCR5380_show_info(struct seq_file *,
328 struct Scsi_Host *);
329static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
330 char *buffer, int length);
331
332static void NCR5380_reselect(struct Scsi_Host *instance); 330static void NCR5380_reselect(struct Scsi_Host *instance);
333static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); 331static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd);
334#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) 332#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
335static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 333static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
336#endif 334#endif
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 681434e2dfe9..b32e77db0c48 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2181,7 +2181,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2181 (fsa_dev_ptr[cid].sense_data.sense_key == 2181 (fsa_dev_ptr[cid].sense_data.sense_key ==
2182 NOT_READY)) { 2182 NOT_READY)) {
2183 switch (scsicmd->cmnd[0]) { 2183 switch (scsicmd->cmnd[0]) {
2184 case SERVICE_ACTION_IN: 2184 case SERVICE_ACTION_IN_16:
2185 if (!(dev->raw_io_interface) || 2185 if (!(dev->raw_io_interface) ||
2186 !(dev->raw_io_64) || 2186 !(dev->raw_io_64) ||
2187 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 2187 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
@@ -2309,7 +2309,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2309 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); 2309 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
2310 return aac_get_container_name(scsicmd); 2310 return aac_get_container_name(scsicmd);
2311 } 2311 }
2312 case SERVICE_ACTION_IN: 2312 case SERVICE_ACTION_IN_16:
2313 if (!(dev->raw_io_interface) || 2313 if (!(dev->raw_io_interface) ||
2314 !(dev->raw_io_64) || 2314 !(dev->raw_io_64) ||
2315 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 2315 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a759cb2d4b15..fdcdf9f781bc 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -462,9 +462,9 @@ static int aac_slave_configure(struct scsi_device *sdev)
462 depth = 256; 462 depth = 256;
463 else if (depth < 2) 463 else if (depth < 2)
464 depth = 2; 464 depth = 2;
465 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 465 scsi_change_queue_depth(sdev, depth);
466 } else 466 } else
467 scsi_adjust_queue_depth(sdev, 0, 1); 467 scsi_change_queue_depth(sdev, 1);
468 468
469 return 0; 469 return 0;
470} 470}
@@ -478,12 +478,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
478 * total capacity and the queue depth supported by the target device. 478 * total capacity and the queue depth supported by the target device.
479 */ 479 */
480 480
481static int aac_change_queue_depth(struct scsi_device *sdev, int depth, 481static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
482 int reason)
483{ 482{
484 if (reason != SCSI_QDEPTH_DEFAULT)
485 return -EOPNOTSUPP;
486
487 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 483 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
488 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 484 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
489 struct scsi_device * dev; 485 struct scsi_device * dev;
@@ -504,10 +500,10 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
504 depth = 256; 500 depth = 256;
505 else if (depth < 2) 501 else if (depth < 2)
506 depth = 2; 502 depth = 2;
507 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 503 return scsi_change_queue_depth(sdev, depth);
508 } else 504 }
509 scsi_adjust_queue_depth(sdev, 0, 1); 505
510 return sdev->queue_depth; 506 return scsi_change_queue_depth(sdev, 1);
511} 507}
512 508
513static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 509static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -555,7 +551,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
555 AAC_DRIVERNAME, 551 AAC_DRIVERNAME,
556 host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); 552 host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
557 switch (cmd->cmnd[0]) { 553 switch (cmd->cmnd[0]) {
558 case SERVICE_ACTION_IN: 554 case SERVICE_ACTION_IN_16:
559 if (!(aac->raw_io_interface) || 555 if (!(aac->raw_io_interface) ||
560 !(aac->raw_io_64) || 556 !(aac->raw_io_64) ||
561 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 557 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 43761c1c46f0..6719a3390ebd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7706,7 +7706,7 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
7706 asc_dvc->cfg->can_tagged_qng |= tid_bit; 7706 asc_dvc->cfg->can_tagged_qng |= tid_bit;
7707 asc_dvc->use_tagged_qng |= tid_bit; 7707 asc_dvc->use_tagged_qng |= tid_bit;
7708 } 7708 }
7709 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 7709 scsi_change_queue_depth(sdev,
7710 asc_dvc->max_dvc_qng[sdev->id]); 7710 asc_dvc->max_dvc_qng[sdev->id]);
7711 } 7711 }
7712 } else { 7712 } else {
@@ -7714,7 +7714,6 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
7714 asc_dvc->cfg->can_tagged_qng &= ~tid_bit; 7714 asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
7715 asc_dvc->use_tagged_qng &= ~tid_bit; 7715 asc_dvc->use_tagged_qng &= ~tid_bit;
7716 } 7716 }
7717 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
7718 } 7717 }
7719 7718
7720 if ((sdev->lun == 0) && 7719 if ((sdev->lun == 0) &&
@@ -7848,12 +7847,8 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
7848 } 7847 }
7849 } 7848 }
7850 7849
7851 if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { 7850 if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported)
7852 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 7851 scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng);
7853 adv_dvc->max_dvc_qng);
7854 } else {
7855 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
7856 }
7857} 7852}
7858 7853
7859/* 7854/*
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e77b72f78006..2b960b326daf 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -230,7 +230,7 @@
230 * 230 *
231 * 231 *
232 ************************************************************************** 232 **************************************************************************
233 233
234 see Documentation/scsi/aha152x.txt for configuration details 234 see Documentation/scsi/aha152x.txt for configuration details
235 235
236 **************************************************************************/ 236 **************************************************************************/
@@ -279,45 +279,11 @@ static LIST_HEAD(aha152x_host_list);
279#error define AUTOCONF or SETUP0 279#error define AUTOCONF or SETUP0
280#endif 280#endif
281 281
282#if defined(AHA152X_DEBUG)
283#define DEBUG_DEFAULT debug_eh
284
285#define DPRINTK(when,msgs...) \
286 do { if(HOSTDATA(shpnt)->debug & (when)) printk(msgs); } while(0)
287
288#define DO_LOCK(flags) \
289 do { \
290 if(spin_is_locked(&QLOCK)) { \
291 DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
292 } \
293 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
294 spin_lock_irqsave(&QLOCK,flags); \
295 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
296 QLOCKER=__func__; \
297 QLOCKERL=__LINE__; \
298 } while(0)
299
300#define DO_UNLOCK(flags) \
301 do { \
302 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
303 spin_unlock_irqrestore(&QLOCK,flags); \
304 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
305 QLOCKER="(not locked)"; \
306 QLOCKERL=0; \
307 } while(0)
308
309#else
310#define DPRINTK(when,msgs...)
311#define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags) 282#define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags)
312#define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags) 283#define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags)
313#endif
314 284
315#define LEAD "(scsi%d:%d:%d) " 285#define LEAD "(scsi%d:%d:%d) "
316#define WARN_LEAD KERN_WARNING LEAD
317#define INFO_LEAD KERN_INFO LEAD 286#define INFO_LEAD KERN_INFO LEAD
318#define NOTE_LEAD KERN_NOTICE LEAD
319#define ERR_LEAD KERN_ERR LEAD
320#define DEBUG_LEAD KERN_DEBUG LEAD
321#define CMDINFO(cmd) \ 287#define CMDINFO(cmd) \
322 (cmd) ? ((cmd)->device->host->host_no) : -1, \ 288 (cmd) ? ((cmd)->device->host->host_no) : -1, \
323 (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ 289 (cmd) ? ((cmd)->device->id & 0x0f) : -1, \
@@ -345,10 +311,10 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
345 311
346enum { 312enum {
347 not_issued = 0x0001, /* command not yet issued */ 313 not_issued = 0x0001, /* command not yet issued */
348 selecting = 0x0002, /* target is beeing selected */ 314 selecting = 0x0002, /* target is being selected */
349 identified = 0x0004, /* IDENTIFY was sent */ 315 identified = 0x0004, /* IDENTIFY was sent */
350 disconnected = 0x0008, /* target disconnected */ 316 disconnected = 0x0008, /* target disconnected */
351 completed = 0x0010, /* target sent COMMAND COMPLETE */ 317 completed = 0x0010, /* target sent COMMAND COMPLETE */
352 aborted = 0x0020, /* ABORT was sent */ 318 aborted = 0x0020, /* ABORT was sent */
353 resetted = 0x0040, /* BUS DEVICE RESET was sent */ 319 resetted = 0x0040, /* BUS DEVICE RESET was sent */
354 spiordy = 0x0080, /* waiting for SPIORDY to raise */ 320 spiordy = 0x0080, /* waiting for SPIORDY to raise */
@@ -396,7 +362,6 @@ static int exttrans[] = {0, 0};
396module_param_array(exttrans, int, NULL, 0); 362module_param_array(exttrans, int, NULL, 0);
397MODULE_PARM_DESC(exttrans,"use extended translation"); 363MODULE_PARM_DESC(exttrans,"use extended translation");
398 364
399#if !defined(AHA152X_DEBUG)
400static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; 365static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0};
401module_param_array(aha152x, int, NULL, 0); 366module_param_array(aha152x, int, NULL, 0);
402MODULE_PARM_DESC(aha152x, "parameters for first controller"); 367MODULE_PARM_DESC(aha152x, "parameters for first controller");
@@ -404,19 +369,6 @@ MODULE_PARM_DESC(aha152x, "parameters for first controller");
404static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; 369static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0};
405module_param_array(aha152x1, int, NULL, 0); 370module_param_array(aha152x1, int, NULL, 0);
406MODULE_PARM_DESC(aha152x1, "parameters for second controller"); 371MODULE_PARM_DESC(aha152x1, "parameters for second controller");
407#else
408static int debug[] = {DEBUG_DEFAULT, DEBUG_DEFAULT};
409module_param_array(debug, int, NULL, 0);
410MODULE_PARM_DESC(debug, "flags for driver debugging");
411
412static int aha152x[] = {0, 11, 7, 1, 1, 1, DELAY_DEFAULT, 0, DEBUG_DEFAULT};
413module_param_array(aha152x, int, NULL, 0);
414MODULE_PARM_DESC(aha152x, "parameters for first controller");
415
416static int aha152x1[] = {0, 11, 7, 1, 1, 1, DELAY_DEFAULT, 0, DEBUG_DEFAULT};
417module_param_array(aha152x1, int, NULL, 0);
418MODULE_PARM_DESC(aha152x1, "parameters for second controller");
419#endif /* !defined(AHA152X_DEBUG) */
420#endif /* MODULE */ 372#endif /* MODULE */
421 373
422#ifdef __ISAPNP__ 374#ifdef __ISAPNP__
@@ -446,7 +398,7 @@ static struct scsi_host_template aha152x_driver_template;
446/* 398/*
447 * internal states of the host 399 * internal states of the host
448 * 400 *
449 */ 401 */
450enum aha152x_state { 402enum aha152x_state {
451 idle=0, 403 idle=0,
452 unknown, 404 unknown,
@@ -485,24 +437,16 @@ struct aha152x_hostdata {
485 spinlock_t lock; 437 spinlock_t lock;
486 /* host lock */ 438 /* host lock */
487 439
488#if defined(AHA152X_DEBUG)
489 const char *locker;
490 /* which function has the lock */
491 int lockerl; /* where did it get it */
492
493 int debug; /* current debugging setting */
494#endif
495
496#if defined(AHA152X_STAT) 440#if defined(AHA152X_STAT)
497 int total_commands; 441 int total_commands;
498 int disconnections; 442 int disconnections;
499 int busfree_without_any_action; 443 int busfree_without_any_action;
500 int busfree_without_old_command; 444 int busfree_without_old_command;
501 int busfree_without_new_command; 445 int busfree_without_new_command;
502 int busfree_without_done_command; 446 int busfree_without_done_command;
503 int busfree_with_check_condition; 447 int busfree_with_check_condition;
504 int count[maxstate]; 448 int count[maxstate];
505 int count_trans[maxstate]; 449 int count_trans[maxstate];
506 unsigned long time[maxstate]; 450 unsigned long time[maxstate];
507#endif 451#endif
508 452
@@ -514,7 +458,7 @@ struct aha152x_hostdata {
514 int delay; /* reset out delay */ 458 int delay; /* reset out delay */
515 int ext_trans; /* extended translation enabled */ 459 int ext_trans; /* extended translation enabled */
516 460
517 int swint; /* software-interrupt was fired during detect() */ 461 int swint; /* software-interrupt was fired during detect() */
518 int service; /* bh needs to be run */ 462 int service; /* bh needs to be run */
519 int in_intr; /* bh is running */ 463 int in_intr; /* bh is running */
520 464
@@ -543,7 +487,7 @@ struct aha152x_hostdata {
543 unsigned char msgi[256]; 487 unsigned char msgi[256];
544 /* received message bytes */ 488 /* received message bytes */
545 489
546 int msgo_i, msgo_len; 490 int msgo_i, msgo_len;
547 /* number of sent bytes and length of current messages */ 491 /* number of sent bytes and length of current messages */
548 unsigned char msgo[256]; 492 unsigned char msgo[256];
549 /* pending messages */ 493 /* pending messages */
@@ -689,7 +633,6 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
689static void done(struct Scsi_Host *shpnt, int error); 633static void done(struct Scsi_Host *shpnt, int error);
690 634
691/* diagnostics */ 635/* diagnostics */
692static void disp_ports(struct Scsi_Host *shpnt);
693static void show_command(Scsi_Cmnd * ptr); 636static void show_command(Scsi_Cmnd * ptr);
694static void show_queues(struct Scsi_Host *shpnt); 637static void show_queues(struct Scsi_Host *shpnt);
695static void disp_enintr(struct Scsi_Host *shpnt); 638static void disp_enintr(struct Scsi_Host *shpnt);
@@ -812,10 +755,6 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
812 DELAY = setup->delay; 755 DELAY = setup->delay;
813 EXT_TRANS = setup->ext_trans; 756 EXT_TRANS = setup->ext_trans;
814 757
815#if defined(AHA152X_DEBUG)
816 HOSTDATA(shpnt)->debug = setup->debug;
817#endif
818
819 SETPORT(SCSIID, setup->scsiid << 4); 758 SETPORT(SCSIID, setup->scsiid << 4);
820 shpnt->this_id = setup->scsiid; 759 shpnt->this_id = setup->scsiid;
821 760
@@ -941,31 +880,24 @@ void aha152x_release(struct Scsi_Host *shpnt)
941 * setup controller to generate interrupts depending 880 * setup controller to generate interrupts depending
942 * on current state (lock has to be acquired) 881 * on current state (lock has to be acquired)
943 * 882 *
944 */ 883 */
945static int setup_expected_interrupts(struct Scsi_Host *shpnt) 884static int setup_expected_interrupts(struct Scsi_Host *shpnt)
946{ 885{
947 if(CURRENT_SC) { 886 if(CURRENT_SC) {
948 CURRENT_SC->SCp.phase |= 1 << 16; 887 CURRENT_SC->SCp.phase |= 1 << 16;
949 888
950 if(CURRENT_SC->SCp.phase & selecting) { 889 if(CURRENT_SC->SCp.phase & selecting) {
951 DPRINTK(debug_intr, DEBUG_LEAD "expecting: (seldo) (seltimo) (seldi)\n", CMDINFO(CURRENT_SC));
952 SETPORT(SSTAT1, SELTO); 890 SETPORT(SSTAT1, SELTO);
953 SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); 891 SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
954 SETPORT(SIMODE1, ENSELTIMO); 892 SETPORT(SIMODE1, ENSELTIMO);
955 } else { 893 } else {
956 DPRINTK(debug_intr, DEBUG_LEAD "expecting: (phase change) (busfree) %s\n", CMDINFO(CURRENT_SC), CURRENT_SC->SCp.phase & spiordy ? "(spiordy)" : "");
957 SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0); 894 SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
958 SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); 895 SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
959 } 896 }
960 } else if(STATE==seldi) { 897 } else if(STATE==seldi) {
961 DPRINTK(debug_intr, DEBUG_LEAD "expecting: (phase change) (identify)\n", CMDINFO(CURRENT_SC));
962 SETPORT(SIMODE0, 0); 898 SETPORT(SIMODE0, 0);
963 SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); 899 SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
964 } else { 900 } else {
965 DPRINTK(debug_intr, DEBUG_LEAD "expecting: %s %s\n",
966 CMDINFO(CURRENT_SC),
967 DISCONNECTED_SC ? "(reselection)" : "",
968 ISSUE_SC ? "(busfree)" : "");
969 SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0); 901 SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
970 SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0)); 902 SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0));
971 } 903 }
@@ -977,7 +909,7 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
977} 909}
978 910
979 911
980/* 912/*
981 * Queue a command and setup interrupts for a free bus. 913 * Queue a command and setup interrupts for a free bus.
982 */ 914 */
983static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, 915static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
@@ -986,15 +918,6 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
986 struct Scsi_Host *shpnt = SCpnt->device->host; 918 struct Scsi_Host *shpnt = SCpnt->device->host;
987 unsigned long flags; 919 unsigned long flags;
988 920
989#if defined(AHA152X_DEBUG)
990 if (HOSTDATA(shpnt)->debug & debug_queue) {
991 printk(INFO_LEAD "queue: %p; cmd_len=%d pieces=%d size=%u cmnd=",
992 CMDINFO(SCpnt), SCpnt, SCpnt->cmd_len,
993 scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
994 __scsi_print_command(SCpnt->cmnd);
995 }
996#endif
997
998 SCpnt->scsi_done = done; 921 SCpnt->scsi_done = done;
999 SCpnt->SCp.phase = not_issued | phase; 922 SCpnt->SCp.phase = not_issued | phase;
1000 SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */ 923 SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
@@ -1004,13 +927,13 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
1004 927
1005 if(SCpnt->SCp.phase & (resetting|check_condition)) { 928 if(SCpnt->SCp.phase & (resetting|check_condition)) {
1006 if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { 929 if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
1007 printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt)); 930 scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
1008 return FAILED; 931 return FAILED;
1009 } 932 }
1010 } else { 933 } else {
1011 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 934 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1012 if(!SCpnt->host_scribble) { 935 if(!SCpnt->host_scribble) {
1013 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 936 scmd_printk(KERN_ERR, SCpnt, "allocation failed\n");
1014 return FAILED; 937 return FAILED;
1015 } 938 }
1016 } 939 }
@@ -1066,15 +989,6 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
1066 */ 989 */
1067static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 990static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
1068{ 991{
1069#if 0
1070 if(*SCpnt->cmnd == REQUEST_SENSE) {
1071 SCpnt->result = 0;
1072 done(SCpnt);
1073
1074 return 0;
1075 }
1076#endif
1077
1078 return aha152x_internal_queue(SCpnt, NULL, 0, done); 992 return aha152x_internal_queue(SCpnt, NULL, 0, done);
1079} 993}
1080 994
@@ -1082,15 +996,10 @@ static DEF_SCSI_QCMD(aha152x_queue)
1082 996
1083 997
1084/* 998/*
1085 *
1086 * 999 *
1087 */ 1000 */
1088static void reset_done(Scsi_Cmnd *SCpnt) 1001static void reset_done(Scsi_Cmnd *SCpnt)
1089{ 1002{
1090#if 0
1091 struct Scsi_Host *shpnt = SCpnt->host;
1092 DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt));
1093#endif
1094 if(SCSEM(SCpnt)) { 1003 if(SCSEM(SCpnt)) {
1095 complete(SCSEM(SCpnt)); 1004 complete(SCSEM(SCpnt));
1096 } else { 1005 } else {
@@ -1108,20 +1017,11 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
1108 Scsi_Cmnd *ptr; 1017 Scsi_Cmnd *ptr;
1109 unsigned long flags; 1018 unsigned long flags;
1110 1019
1111#if defined(AHA152X_DEBUG)
1112 if(HOSTDATA(shpnt)->debug & debug_eh) {
1113 printk(DEBUG_LEAD "abort(%p)", CMDINFO(SCpnt), SCpnt);
1114 show_queues(shpnt);
1115 }
1116#endif
1117
1118 DO_LOCK(flags); 1020 DO_LOCK(flags);
1119 1021
1120 ptr=remove_SC(&ISSUE_SC, SCpnt); 1022 ptr=remove_SC(&ISSUE_SC, SCpnt);
1121 1023
1122 if(ptr) { 1024 if(ptr) {
1123 DPRINTK(debug_eh, DEBUG_LEAD "not yet issued - SUCCESS\n", CMDINFO(SCpnt));
1124
1125 HOSTDATA(shpnt)->commands--; 1025 HOSTDATA(shpnt)->commands--;
1126 if (!HOSTDATA(shpnt)->commands) 1026 if (!HOSTDATA(shpnt)->commands)
1127 SETPORT(PORTA, 0); 1027 SETPORT(PORTA, 0);
@@ -1131,7 +1031,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
1131 SCpnt->host_scribble=NULL; 1031 SCpnt->host_scribble=NULL;
1132 1032
1133 return SUCCESS; 1033 return SUCCESS;
1134 } 1034 }
1135 1035
1136 DO_UNLOCK(flags); 1036 DO_UNLOCK(flags);
1137 1037
@@ -1142,7 +1042,8 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
1142 * 1042 *
1143 */ 1043 */
1144 1044
1145 printk(ERR_LEAD "cannot abort running or disconnected command\n", CMDINFO(SCpnt)); 1045 scmd_printk(KERN_ERR, SCpnt,
1046 "cannot abort running or disconnected command\n");
1146 1047
1147 return FAILED; 1048 return FAILED;
1148} 1049}
@@ -1160,15 +1061,8 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1160 unsigned long flags; 1061 unsigned long flags;
1161 unsigned long timeleft; 1062 unsigned long timeleft;
1162 1063
1163#if defined(AHA152X_DEBUG)
1164 if(HOSTDATA(shpnt)->debug & debug_eh) {
1165 printk(INFO_LEAD "aha152x_device_reset(%p)", CMDINFO(SCpnt), SCpnt);
1166 show_queues(shpnt);
1167 }
1168#endif
1169
1170 if(CURRENT_SC==SCpnt) { 1064 if(CURRENT_SC==SCpnt) {
1171 printk(ERR_LEAD "cannot reset current device\n", CMDINFO(SCpnt)); 1065 scmd_printk(KERN_ERR, SCpnt, "cannot reset current device\n");
1172 return FAILED; 1066 return FAILED;
1173 } 1067 }
1174 1068
@@ -1208,7 +1102,7 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1208 } else if(disconnected) { 1102 } else if(disconnected) {
1209 append_SC(&DISCONNECTED_SC, SCpnt); 1103 append_SC(&DISCONNECTED_SC, SCpnt);
1210 } 1104 }
1211 1105
1212 ret = FAILED; 1106 ret = FAILED;
1213 } 1107 }
1214 1108
@@ -1227,12 +1121,12 @@ static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
1227 if(SCDATA(ptr)) { 1121 if(SCDATA(ptr)) {
1228 next = SCNEXT(ptr); 1122 next = SCNEXT(ptr);
1229 } else { 1123 } else {
1230 printk(DEBUG_LEAD "queue corrupted at %p\n", CMDINFO(ptr), ptr); 1124 scmd_printk(KERN_DEBUG, ptr,
1125 "queue corrupted at %p\n", ptr);
1231 next = NULL; 1126 next = NULL;
1232 } 1127 }
1233 1128
1234 if (!ptr->device->soft_reset) { 1129 if (!ptr->device->soft_reset) {
1235 DPRINTK(debug_eh, DEBUG_LEAD "disconnected command %p removed\n", CMDINFO(ptr), ptr);
1236 remove_SC(SCs, ptr); 1130 remove_SC(SCs, ptr);
1237 HOSTDATA(shpnt)->commands--; 1131 HOSTDATA(shpnt)->commands--;
1238 kfree(ptr->host_scribble); 1132 kfree(ptr->host_scribble);
@@ -1253,25 +1147,14 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
1253 1147
1254 DO_LOCK(flags); 1148 DO_LOCK(flags);
1255 1149
1256#if defined(AHA152X_DEBUG)
1257 if(HOSTDATA(shpnt)->debug & debug_eh) {
1258 printk(KERN_DEBUG "scsi%d: bus reset", shpnt->host_no);
1259 show_queues(shpnt);
1260 }
1261#endif
1262
1263 free_hard_reset_SCs(shpnt, &ISSUE_SC); 1150 free_hard_reset_SCs(shpnt, &ISSUE_SC);
1264 free_hard_reset_SCs(shpnt, &DISCONNECTED_SC); 1151 free_hard_reset_SCs(shpnt, &DISCONNECTED_SC);
1265 1152
1266 DPRINTK(debug_eh, KERN_DEBUG "scsi%d: resetting bus\n", shpnt->host_no);
1267
1268 SETPORT(SCSISEQ, SCSIRSTO); 1153 SETPORT(SCSISEQ, SCSIRSTO);
1269 mdelay(256); 1154 mdelay(256);
1270 SETPORT(SCSISEQ, 0); 1155 SETPORT(SCSISEQ, 0);
1271 mdelay(DELAY); 1156 mdelay(DELAY);
1272 1157
1273 DPRINTK(debug_eh, KERN_DEBUG "scsi%d: bus resetted\n", shpnt->host_no);
1274
1275 setup_expected_interrupts(shpnt); 1158 setup_expected_interrupts(shpnt);
1276 if(HOSTDATA(shpnt)->commands==0) 1159 if(HOSTDATA(shpnt)->commands==0)
1277 SETPORT(PORTA, 0); 1160 SETPORT(PORTA, 0);
@@ -1333,11 +1216,7 @@ static void reset_ports(struct Scsi_Host *shpnt)
1333 */ 1216 */
1334int aha152x_host_reset_host(struct Scsi_Host *shpnt) 1217int aha152x_host_reset_host(struct Scsi_Host *shpnt)
1335{ 1218{
1336 DPRINTK(debug_eh, KERN_DEBUG "scsi%d: host reset\n", shpnt->host_no);
1337
1338 aha152x_bus_reset_host(shpnt); 1219 aha152x_bus_reset_host(shpnt);
1339
1340 DPRINTK(debug_eh, KERN_DEBUG "scsi%d: resetting ports\n", shpnt->host_no);
1341 reset_ports(shpnt); 1220 reset_ports(shpnt);
1342 1221
1343 return SUCCESS; 1222 return SUCCESS;
@@ -1345,7 +1224,7 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt)
1345 1224
1346/* 1225/*
1347 * Reset the host (bus and controller) 1226 * Reset the host (bus and controller)
1348 * 1227 *
1349 */ 1228 */
1350static int aha152x_host_reset(Scsi_Cmnd *SCpnt) 1229static int aha152x_host_reset(Scsi_Cmnd *SCpnt)
1351{ 1230{
@@ -1411,7 +1290,9 @@ static void done(struct Scsi_Host *shpnt, int error)
1411{ 1290{
1412 if (CURRENT_SC) { 1291 if (CURRENT_SC) {
1413 if(DONE_SC) 1292 if(DONE_SC)
1414 printk(ERR_LEAD "there's already a completed command %p - will cause abort\n", CMDINFO(CURRENT_SC), DONE_SC); 1293 scmd_printk(KERN_ERR, CURRENT_SC,
1294 "there's already a completed command %p "
1295 "- will cause abort\n", DONE_SC);
1415 1296
1416 DONE_SC = CURRENT_SC; 1297 DONE_SC = CURRENT_SC;
1417 CURRENT_SC = NULL; 1298 CURRENT_SC = NULL;
@@ -1466,7 +1347,7 @@ static irqreturn_t intr(int irqno, void *dev_id)
1466 return IRQ_NONE; 1347 return IRQ_NONE;
1467 1348
1468 if( TESTLO(DMASTAT, INTSTAT) ) 1349 if( TESTLO(DMASTAT, INTSTAT) )
1469 return IRQ_NONE; 1350 return IRQ_NONE;
1470 1351
1471 /* no more interrupts from the controller, while we're busy. 1352 /* no more interrupts from the controller, while we're busy.
1472 INTEN is restored by the BH handler */ 1353 INTEN is restored by the BH handler */
@@ -1501,7 +1382,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
1501 SETPORT(SXFRCTL0, CH1); 1382 SETPORT(SXFRCTL0, CH1);
1502 1383
1503 SETPORT(SSTAT1, CLRBUSFREE); 1384 SETPORT(SSTAT1, CLRBUSFREE);
1504 1385
1505 if(CURRENT_SC) { 1386 if(CURRENT_SC) {
1506#if defined(AHA152X_STAT) 1387#if defined(AHA152X_STAT)
1507 action++; 1388 action++;
@@ -1513,19 +1394,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
1513 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16)); 1394 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
1514 1395
1515 } else if(CURRENT_SC->SCp.phase & aborted) { 1396 } else if(CURRENT_SC->SCp.phase & aborted) {
1516 DPRINTK(debug_eh, DEBUG_LEAD "ABORT sent\n", CMDINFO(CURRENT_SC));
1517 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16)); 1397 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
1518 1398
1519 } else if(CURRENT_SC->SCp.phase & resetted) { 1399 } else if(CURRENT_SC->SCp.phase & resetted) {
1520 DPRINTK(debug_eh, DEBUG_LEAD "BUS DEVICE RESET sent\n", CMDINFO(CURRENT_SC));
1521 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16)); 1400 done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
1522 1401
1523 } else if(CURRENT_SC->SCp.phase & disconnected) { 1402 } else if(CURRENT_SC->SCp.phase & disconnected) {
1524 /* target sent DISCONNECT */ 1403 /* target sent DISCONNECT */
1525 DPRINTK(debug_selection, DEBUG_LEAD "target disconnected at %d/%d\n",
1526 CMDINFO(CURRENT_SC),
1527 scsi_get_resid(CURRENT_SC),
1528 scsi_bufflen(CURRENT_SC));
1529#if defined(AHA152X_STAT) 1404#if defined(AHA152X_STAT)
1530 HOSTDATA(shpnt)->disconnections++; 1405 HOSTDATA(shpnt)->disconnections++;
1531#endif 1406#endif
@@ -1553,13 +1428,6 @@ static void busfree_run(struct Scsi_Host *shpnt)
1553 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; 1428 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
1554 struct aha152x_scdata *sc = SCDATA(cmd); 1429 struct aha152x_scdata *sc = SCDATA(cmd);
1555 1430
1556#if 0
1557 if(HOSTDATA(shpnt)->debug & debug_eh) {
1558 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
1559 scsi_print_sense("bh", DONE_SC);
1560 }
1561#endif
1562
1563 scsi_eh_restore_cmnd(cmd, &sc->ses); 1431 scsi_eh_restore_cmnd(cmd, &sc->ses);
1564 1432
1565 cmd->SCp.Status = SAM_STAT_CHECK_CONDITION; 1433 cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
@@ -1571,17 +1439,11 @@ static void busfree_run(struct Scsi_Host *shpnt)
1571#if defined(AHA152X_STAT) 1439#if defined(AHA152X_STAT)
1572 HOSTDATA(shpnt)->busfree_with_check_condition++; 1440 HOSTDATA(shpnt)->busfree_with_check_condition++;
1573#endif 1441#endif
1574#if 0
1575 DPRINTK(debug_eh, ERR_LEAD "CHECK CONDITION found\n", CMDINFO(DONE_SC));
1576#endif
1577 1442
1578 if(!(DONE_SC->SCp.phase & not_issued)) { 1443 if(!(DONE_SC->SCp.phase & not_issued)) {
1579 struct aha152x_scdata *sc; 1444 struct aha152x_scdata *sc;
1580 Scsi_Cmnd *ptr = DONE_SC; 1445 Scsi_Cmnd *ptr = DONE_SC;
1581 DONE_SC=NULL; 1446 DONE_SC=NULL;
1582#if 0
1583 DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
1584#endif
1585 1447
1586 sc = SCDATA(ptr); 1448 sc = SCDATA(ptr);
1587 /* It was allocated in aha152x_internal_queue? */ 1449 /* It was allocated in aha152x_internal_queue? */
@@ -1591,19 +1453,10 @@ static void busfree_run(struct Scsi_Host *shpnt)
1591 DO_UNLOCK(flags); 1453 DO_UNLOCK(flags);
1592 aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done); 1454 aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
1593 DO_LOCK(flags); 1455 DO_LOCK(flags);
1594#if 0
1595 } else {
1596 DPRINTK(debug_eh, ERR_LEAD "command not issued - CHECK CONDITION ignored\n", CMDINFO(DONE_SC));
1597#endif
1598 } 1456 }
1599 } 1457 }
1600 1458
1601 if(DONE_SC && DONE_SC->scsi_done) { 1459 if(DONE_SC && DONE_SC->scsi_done) {
1602#if defined(AHA152X_DEBUG)
1603 int hostno=DONE_SC->device->host->host_no;
1604 int id=DONE_SC->device->id & 0xf;
1605 int lun=((u8)DONE_SC->device->lun) & 0x7;
1606#endif
1607 Scsi_Cmnd *ptr = DONE_SC; 1460 Scsi_Cmnd *ptr = DONE_SC;
1608 DONE_SC=NULL; 1461 DONE_SC=NULL;
1609 1462
@@ -1618,9 +1471,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
1618 } 1471 }
1619 1472
1620 DO_UNLOCK(flags); 1473 DO_UNLOCK(flags);
1621 DPRINTK(debug_done, DEBUG_LEAD "calling scsi_done(%p)\n", hostno, id, lun, ptr); 1474 ptr->scsi_done(ptr);
1622 ptr->scsi_done(ptr);
1623 DPRINTK(debug_done, DEBUG_LEAD "scsi_done(%p) returned\n", hostno, id, lun, ptr);
1624 DO_LOCK(flags); 1475 DO_LOCK(flags);
1625 } 1476 }
1626 1477
@@ -1640,9 +1491,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
1640#if defined(AHA152X_STAT) 1491#if defined(AHA152X_STAT)
1641 action++; 1492 action++;
1642#endif 1493#endif
1643 CURRENT_SC->SCp.phase |= selecting; 1494 CURRENT_SC->SCp.phase |= selecting;
1644
1645 DPRINTK(debug_selection, DEBUG_LEAD "selecting target\n", CMDINFO(CURRENT_SC));
1646 1495
1647 /* clear selection timeout */ 1496 /* clear selection timeout */
1648 SETPORT(SSTAT1, SELTO); 1497 SETPORT(SSTAT1, SELTO);
@@ -1674,18 +1523,19 @@ static void seldo_run(struct Scsi_Host *shpnt)
1674 SETPORT(SSTAT1, CLRBUSFREE); 1523 SETPORT(SSTAT1, CLRBUSFREE);
1675 SETPORT(SSTAT1, CLRPHASECHG); 1524 SETPORT(SSTAT1, CLRPHASECHG);
1676 1525
1677 CURRENT_SC->SCp.phase &= ~(selecting|not_issued); 1526 CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
1678 1527
1679 SETPORT(SCSISEQ, 0); 1528 SETPORT(SCSISEQ, 0);
1680 1529
1681 if (TESTLO(SSTAT0, SELDO)) { 1530 if (TESTLO(SSTAT0, SELDO)) {
1682 printk(ERR_LEAD "aha152x: passing bus free condition\n", CMDINFO(CURRENT_SC)); 1531 scmd_printk(KERN_ERR, CURRENT_SC,
1532 "aha152x: passing bus free condition\n");
1683 done(shpnt, DID_NO_CONNECT << 16); 1533 done(shpnt, DID_NO_CONNECT << 16);
1684 return; 1534 return;
1685 } 1535 }
1686 1536
1687 SETPORT(SSTAT0, CLRSELDO); 1537 SETPORT(SSTAT0, CLRSELDO);
1688 1538
1689 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); 1539 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
1690 1540
1691 if (CURRENT_SC->SCp.phase & aborting) { 1541 if (CURRENT_SC->SCp.phase & aborting) {
@@ -1693,7 +1543,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
1693 } else if (CURRENT_SC->SCp.phase & resetting) { 1543 } else if (CURRENT_SC->SCp.phase & resetting) {
1694 ADDMSGO(BUS_DEVICE_RESET); 1544 ADDMSGO(BUS_DEVICE_RESET);
1695 } else if (SYNCNEG==0 && SYNCHRONOUS) { 1545 } else if (SYNCNEG==0 && SYNCHRONOUS) {
1696 CURRENT_SC->SCp.phase |= syncneg; 1546 CURRENT_SC->SCp.phase |= syncneg;
1697 MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); 1547 MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
1698 SYNCNEG=1; /* negotiation in progress */ 1548 SYNCNEG=1; /* negotiation in progress */
1699 } 1549 }
@@ -1708,29 +1558,21 @@ static void seldo_run(struct Scsi_Host *shpnt)
1708 */ 1558 */
1709static void selto_run(struct Scsi_Host *shpnt) 1559static void selto_run(struct Scsi_Host *shpnt)
1710{ 1560{
1711 SETPORT(SCSISEQ, 0); 1561 SETPORT(SCSISEQ, 0);
1712 SETPORT(SSTAT1, CLRSELTIMO); 1562 SETPORT(SSTAT1, CLRSELTIMO);
1713 1563
1714 DPRINTK(debug_selection, DEBUG_LEAD "selection timeout\n", CMDINFO(CURRENT_SC)); 1564 if (!CURRENT_SC)
1715
1716 if(!CURRENT_SC) {
1717 DPRINTK(debug_selection, DEBUG_LEAD "!CURRENT_SC\n", CMDINFO(CURRENT_SC));
1718 return; 1565 return;
1719 }
1720 1566
1721 CURRENT_SC->SCp.phase &= ~selecting; 1567 CURRENT_SC->SCp.phase &= ~selecting;
1722 1568
1723 if (CURRENT_SC->SCp.phase & aborted) { 1569 if (CURRENT_SC->SCp.phase & aborted)
1724 DPRINTK(debug_selection, DEBUG_LEAD "aborted\n", CMDINFO(CURRENT_SC));
1725 done(shpnt, DID_ABORT << 16); 1570 done(shpnt, DID_ABORT << 16);
1726 } else if (TESTLO(SSTAT0, SELINGO)) { 1571 else if (TESTLO(SSTAT0, SELINGO))
1727 DPRINTK(debug_selection, DEBUG_LEAD "arbitration not won\n", CMDINFO(CURRENT_SC));
1728 done(shpnt, DID_BUS_BUSY << 16); 1572 done(shpnt, DID_BUS_BUSY << 16);
1729 } else { 1573 else
1730 /* ARBITRATION won, but SELECTION failed */ 1574 /* ARBITRATION won, but SELECTION failed */
1731 DPRINTK(debug_selection, DEBUG_LEAD "selection failed\n", CMDINFO(CURRENT_SC));
1732 done(shpnt, DID_NO_CONNECT << 16); 1575 done(shpnt, DID_NO_CONNECT << 16);
1733 }
1734} 1576}
1735 1577
1736/* 1578/*
@@ -1753,9 +1595,8 @@ static void seldi_run(struct Scsi_Host *shpnt)
1753 1595
1754 if(CURRENT_SC) { 1596 if(CURRENT_SC) {
1755 if(!(CURRENT_SC->SCp.phase & not_issued)) 1597 if(!(CURRENT_SC->SCp.phase & not_issued))
1756 printk(ERR_LEAD "command should not have been issued yet\n", CMDINFO(CURRENT_SC)); 1598 scmd_printk(KERN_ERR, CURRENT_SC,
1757 1599 "command should not have been issued yet\n");
1758 DPRINTK(debug_selection, ERR_LEAD "command requeued - reselection\n", CMDINFO(CURRENT_SC));
1759 1600
1760 DO_LOCK(flags); 1601 DO_LOCK(flags);
1761 append_SC(&ISSUE_SC, CURRENT_SC); 1602 append_SC(&ISSUE_SC, CURRENT_SC);
@@ -1764,17 +1605,16 @@ static void seldi_run(struct Scsi_Host *shpnt)
1764 CURRENT_SC = NULL; 1605 CURRENT_SC = NULL;
1765 } 1606 }
1766 1607
1767 if(!DISCONNECTED_SC) { 1608 if (!DISCONNECTED_SC)
1768 DPRINTK(debug_selection, DEBUG_LEAD "unexpected SELDI ", CMDINFO(CURRENT_SC));
1769 return; 1609 return;
1770 }
1771 1610
1772 RECONN_TARGET=-1; 1611 RECONN_TARGET=-1;
1773 1612
1774 selid = GETPORT(SELID) & ~(1 << shpnt->this_id); 1613 selid = GETPORT(SELID) & ~(1 << shpnt->this_id);
1775 1614
1776 if (selid==0) { 1615 if (selid==0) {
1777 printk("aha152x%d: target id unknown (%02x)\n", HOSTNO, selid); 1616 shost_printk(KERN_INFO, shpnt,
1617 "target id unknown (%02x)\n", selid);
1778 return; 1618 return;
1779 } 1619 }
1780 1620
@@ -1782,8 +1622,8 @@ static void seldi_run(struct Scsi_Host *shpnt)
1782 ; 1622 ;
1783 1623
1784 if(selid & ~(1 << target)) { 1624 if(selid & ~(1 << target)) {
1785 printk("aha152x%d: multiple targets reconnected (%02x)\n", 1625 shost_printk(KERN_INFO, shpnt,
1786 HOSTNO, selid); 1626 "multiple targets reconnected (%02x)\n", selid);
1787 } 1627 }
1788 1628
1789 1629
@@ -1793,7 +1633,6 @@ static void seldi_run(struct Scsi_Host *shpnt)
1793 SETRATE(HOSTDATA(shpnt)->syncrate[target]); 1633 SETRATE(HOSTDATA(shpnt)->syncrate[target]);
1794 1634
1795 RECONN_TARGET=target; 1635 RECONN_TARGET=target;
1796 DPRINTK(debug_selection, DEBUG_LEAD "target %d reselected (%02x).\n", CMDINFO(CURRENT_SC), target, selid);
1797} 1636}
1798 1637
1799/* 1638/*
@@ -1817,31 +1656,24 @@ static void msgi_run(struct Scsi_Host *shpnt)
1817 if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) 1656 if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
1818 return; 1657 return;
1819 1658
1820 if(TESTLO(SSTAT0,SPIORDY)) { 1659 if (TESTLO(SSTAT0, SPIORDY))
1821 DPRINTK(debug_msgi, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC));
1822 return; 1660 return;
1823 }
1824 1661
1825 ADDMSGI(GETPORT(SCSIDAT)); 1662 ADDMSGI(GETPORT(SCSIDAT));
1826 1663
1827#if defined(AHA152X_DEBUG)
1828 if (HOSTDATA(shpnt)->debug & debug_msgi) {
1829 printk(INFO_LEAD "inbound message %02x ", CMDINFO(CURRENT_SC), MSGI(0));
1830 spi_print_msg(&MSGI(0));
1831 printk("\n");
1832 }
1833#endif
1834
1835 if(!CURRENT_SC) { 1664 if(!CURRENT_SC) {
1836 if(LASTSTATE!=seldi) { 1665 if(LASTSTATE!=seldi) {
1837 printk(KERN_ERR "aha152x%d: message in w/o current command not after reselection\n", HOSTNO); 1666 shost_printk(KERN_ERR, shpnt,
1667 "message in w/o current command"
1668 " not after reselection\n");
1838 } 1669 }
1839 1670
1840 /* 1671 /*
1841 * Handle reselection 1672 * Handle reselection
1842 */ 1673 */
1843 if(!(MSGI(0) & IDENTIFY_BASE)) { 1674 if(!(MSGI(0) & IDENTIFY_BASE)) {
1844 printk(KERN_ERR "aha152x%d: target didn't identify after reselection\n", HOSTNO); 1675 shost_printk(KERN_ERR, shpnt,
1676 "target didn't identify after reselection\n");
1845 continue; 1677 continue;
1846 } 1678 }
1847 1679
@@ -1849,12 +1681,13 @@ static void msgi_run(struct Scsi_Host *shpnt)
1849 1681
1850 if (!CURRENT_SC) { 1682 if (!CURRENT_SC) {
1851 show_queues(shpnt); 1683 show_queues(shpnt);
1852 printk(KERN_ERR "aha152x%d: no disconnected command for target %d/%d\n", HOSTNO, RECONN_TARGET, MSGI(0) & 0x3f); 1684 shost_printk(KERN_ERR, shpnt,
1685 "no disconnected command"
1686 " for target %d/%d\n",
1687 RECONN_TARGET, MSGI(0) & 0x3f);
1853 continue; 1688 continue;
1854 } 1689 }
1855 1690
1856 DPRINTK(debug_msgi, DEBUG_LEAD "target reconnected\n", CMDINFO(CURRENT_SC));
1857
1858 CURRENT_SC->SCp.Message = MSGI(0); 1691 CURRENT_SC->SCp.Message = MSGI(0);
1859 CURRENT_SC->SCp.phase &= ~disconnected; 1692 CURRENT_SC->SCp.phase &= ~disconnected;
1860 1693
@@ -1862,31 +1695,32 @@ static void msgi_run(struct Scsi_Host *shpnt)
1862 1695
1863 /* next message if any */ 1696 /* next message if any */
1864 continue; 1697 continue;
1865 } 1698 }
1866 1699
1867 CURRENT_SC->SCp.Message = MSGI(0); 1700 CURRENT_SC->SCp.Message = MSGI(0);
1868 1701
1869 switch (MSGI(0)) { 1702 switch (MSGI(0)) {
1870 case DISCONNECT: 1703 case DISCONNECT:
1871 if (!RECONNECT) 1704 if (!RECONNECT)
1872 printk(WARN_LEAD "target was not allowed to disconnect\n", CMDINFO(CURRENT_SC)); 1705 scmd_printk(KERN_WARNING, CURRENT_SC,
1706 "target was not allowed to disconnect\n");
1873 1707
1874 CURRENT_SC->SCp.phase |= disconnected; 1708 CURRENT_SC->SCp.phase |= disconnected;
1875 break; 1709 break;
1876 1710
1877 case COMMAND_COMPLETE: 1711 case COMMAND_COMPLETE:
1878 if(CURRENT_SC->SCp.phase & completed)
1879 DPRINTK(debug_msgi, DEBUG_LEAD "again COMMAND COMPLETE\n", CMDINFO(CURRENT_SC));
1880
1881 CURRENT_SC->SCp.phase |= completed; 1712 CURRENT_SC->SCp.phase |= completed;
1882 break; 1713 break;
1883 1714
1884 case MESSAGE_REJECT: 1715 case MESSAGE_REJECT:
1885 if (SYNCNEG==1) { 1716 if (SYNCNEG==1) {
1886 printk(INFO_LEAD "Synchronous Data Transfer Request was rejected\n", CMDINFO(CURRENT_SC)); 1717 scmd_printk(KERN_INFO, CURRENT_SC,
1718 "Synchronous Data Transfer Request"
1719 " was rejected\n");
1887 SYNCNEG=2; /* negotiation completed */ 1720 SYNCNEG=2; /* negotiation completed */
1888 } else 1721 } else
1889 printk(INFO_LEAD "inbound message (MESSAGE REJECT)\n", CMDINFO(CURRENT_SC)); 1722 scmd_printk(KERN_INFO, CURRENT_SC,
1723 "inbound message (MESSAGE REJECT)\n");
1890 break; 1724 break;
1891 1725
1892 case SAVE_POINTERS: 1726 case SAVE_POINTERS:
@@ -1907,7 +1741,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
1907 long ticks; 1741 long ticks;
1908 1742
1909 if (MSGI(1) != 3) { 1743 if (MSGI(1) != 3) {
1910 printk(ERR_LEAD "SDTR message length!=3\n", CMDINFO(CURRENT_SC)); 1744 scmd_printk(KERN_ERR, CURRENT_SC,
1745 "SDTR message length!=3\n");
1911 break; 1746 break;
1912 } 1747 }
1913 1748
@@ -1924,10 +1759,12 @@ static void msgi_run(struct Scsi_Host *shpnt)
1924 /* negotiation in progress */ 1759 /* negotiation in progress */
1925 if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) { 1760 if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) {
1926 ADDMSGO(MESSAGE_REJECT); 1761 ADDMSGO(MESSAGE_REJECT);
1927 printk(INFO_LEAD "received Synchronous Data Transfer Request invalid - rejected\n", CMDINFO(CURRENT_SC)); 1762 scmd_printk(KERN_INFO,
1763 CURRENT_SC,
1764 "received Synchronous Data Transfer Request invalid - rejected\n");
1928 break; 1765 break;
1929 } 1766 }
1930 1767
1931 SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); 1768 SYNCRATE |= ((ticks - 2) << 4) + MSGI(4);
1932 } else if (ticks <= 9 && MSGI(4) >= 1) { 1769 } else if (ticks <= 9 && MSGI(4) >= 1) {
1933 ADDMSGO(EXTENDED_MESSAGE); 1770 ADDMSGO(EXTENDED_MESSAGE);
@@ -1947,11 +1784,14 @@ static void msgi_run(struct Scsi_Host *shpnt)
1947 SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); 1784 SYNCRATE |= ((ticks - 2) << 4) + MSGI(4);
1948 } else { 1785 } else {
1949 /* requested SDTR is too slow, do it asynchronously */ 1786 /* requested SDTR is too slow, do it asynchronously */
1950 printk(INFO_LEAD "Synchronous Data Transfer Request too slow - Rejecting\n", CMDINFO(CURRENT_SC)); 1787 scmd_printk(KERN_INFO,
1788 CURRENT_SC,
1789 "Synchronous Data Transfer Request too slow - Rejecting\n");
1951 ADDMSGO(MESSAGE_REJECT); 1790 ADDMSGO(MESSAGE_REJECT);
1952 } 1791 }
1953 1792
1954 SYNCNEG=2; /* negotiation completed */ 1793 /* negotiation completed */
1794 SYNCNEG=2;
1955 SETRATE(SYNCRATE); 1795 SETRATE(SYNCRATE);
1956 } 1796 }
1957 break; 1797 break;
@@ -1985,12 +1825,12 @@ static void msgi_run(struct Scsi_Host *shpnt)
1985static void msgi_end(struct Scsi_Host *shpnt) 1825static void msgi_end(struct Scsi_Host *shpnt)
1986{ 1826{
1987 if(MSGILEN>0) 1827 if(MSGILEN>0)
1988 printk(WARN_LEAD "target left before message completed (%d)\n", CMDINFO(CURRENT_SC), MSGILEN); 1828 scmd_printk(KERN_WARNING, CURRENT_SC,
1829 "target left before message completed (%d)\n",
1830 MSGILEN);
1989 1831
1990 if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE)) { 1832 if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE))
1991 DPRINTK(debug_msgi, DEBUG_LEAD "msgo pending\n", CMDINFO(CURRENT_SC));
1992 SETPORT(SCSISIG, P_MSGI | SIG_ATNO); 1833 SETPORT(SCSISIG, P_MSGI | SIG_ATNO);
1993 }
1994} 1834}
1995 1835
1996/* 1836/*
@@ -2003,21 +1843,12 @@ static void msgo_init(struct Scsi_Host *shpnt)
2003 if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) { 1843 if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
2004 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); 1844 ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
2005 } else { 1845 } else {
2006 printk(INFO_LEAD "unexpected MESSAGE OUT phase; rejecting\n", CMDINFO(CURRENT_SC)); 1846 scmd_printk(KERN_INFO, CURRENT_SC,
1847 "unexpected MESSAGE OUT phase; rejecting\n");
2007 ADDMSGO(MESSAGE_REJECT); 1848 ADDMSGO(MESSAGE_REJECT);
2008 } 1849 }
2009 } 1850 }
2010 1851
2011#if defined(AHA152X_DEBUG)
2012 if(HOSTDATA(shpnt)->debug & debug_msgo) {
2013 int i;
2014
2015 printk(DEBUG_LEAD "messages( ", CMDINFO(CURRENT_SC));
2016 for (i=0; i<MSGOLEN; i+=spi_print_msg(&MSGO(i)), printk(" "))
2017 ;
2018 printk(")\n");
2019 }
2020#endif
2021} 1852}
2022 1853
2023/* 1854/*
@@ -2026,16 +1857,9 @@ static void msgo_init(struct Scsi_Host *shpnt)
2026 */ 1857 */
2027static void msgo_run(struct Scsi_Host *shpnt) 1858static void msgo_run(struct Scsi_Host *shpnt)
2028{ 1859{
2029 if(MSGO_I==MSGOLEN)
2030 DPRINTK(debug_msgo, DEBUG_LEAD "messages all sent (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO_I, MSGOLEN);
2031
2032 while(MSGO_I<MSGOLEN) { 1860 while(MSGO_I<MSGOLEN) {
2033 DPRINTK(debug_msgo, DEBUG_LEAD "message byte %02x (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO(MSGO_I), MSGO_I, MSGOLEN); 1861 if (TESTLO(SSTAT0, SPIORDY))
2034
2035 if(TESTLO(SSTAT0, SPIORDY)) {
2036 DPRINTK(debug_msgo, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC));
2037 return; 1862 return;
2038 }
2039 1863
2040 if (MSGO_I==MSGOLEN-1) { 1864 if (MSGO_I==MSGOLEN-1) {
2041 /* Leave MESSAGE OUT after transfer */ 1865 /* Leave MESSAGE OUT after transfer */
@@ -2059,36 +1883,33 @@ static void msgo_run(struct Scsi_Host *shpnt)
2059static void msgo_end(struct Scsi_Host *shpnt) 1883static void msgo_end(struct Scsi_Host *shpnt)
2060{ 1884{
2061 if(MSGO_I<MSGOLEN) { 1885 if(MSGO_I<MSGOLEN) {
2062 printk(ERR_LEAD "message sent incompletely (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO_I, MSGOLEN); 1886 scmd_printk(KERN_ERR, CURRENT_SC,
1887 "message sent incompletely (%d/%d)\n",
1888 MSGO_I, MSGOLEN);
2063 if(SYNCNEG==1) { 1889 if(SYNCNEG==1) {
2064 printk(INFO_LEAD "Synchronous Data Transfer Request was rejected\n", CMDINFO(CURRENT_SC)); 1890 scmd_printk(KERN_INFO, CURRENT_SC,
1891 "Synchronous Data Transfer Request was rejected\n");
2065 SYNCNEG=2; 1892 SYNCNEG=2;
2066 } 1893 }
2067 } 1894 }
2068 1895
2069 MSGO_I = 0; 1896 MSGO_I = 0;
2070 MSGOLEN = 0; 1897 MSGOLEN = 0;
2071} 1898}
2072 1899
2073/* 1900/*
2074 * command phase 1901 * command phase
2075 * 1902 *
2076 */ 1903 */
2077static void cmd_init(struct Scsi_Host *shpnt) 1904static void cmd_init(struct Scsi_Host *shpnt)
2078{ 1905{
2079 if (CURRENT_SC->SCp.sent_command) { 1906 if (CURRENT_SC->SCp.sent_command) {
2080 printk(ERR_LEAD "command already sent\n", CMDINFO(CURRENT_SC)); 1907 scmd_printk(KERN_ERR, CURRENT_SC,
1908 "command already sent\n");
2081 done(shpnt, DID_ERROR << 16); 1909 done(shpnt, DID_ERROR << 16);
2082 return; 1910 return;
2083 } 1911 }
2084 1912
2085#if defined(AHA152X_DEBUG)
2086 if (HOSTDATA(shpnt)->debug & debug_cmd) {
2087 printk(DEBUG_LEAD "cmd_init: ", CMDINFO(CURRENT_SC));
2088 __scsi_print_command(CURRENT_SC->cmnd);
2089 }
2090#endif
2091
2092 CMD_I=0; 1913 CMD_I=0;
2093} 1914}
2094 1915
@@ -2098,18 +1919,9 @@ static void cmd_init(struct Scsi_Host *shpnt)
2098 */ 1919 */
2099static void cmd_run(struct Scsi_Host *shpnt) 1920static void cmd_run(struct Scsi_Host *shpnt)
2100{ 1921{
2101 if(CMD_I==CURRENT_SC->cmd_len) {
2102 DPRINTK(debug_cmd, DEBUG_LEAD "command already completely sent (%d/%d)", CMDINFO(CURRENT_SC), CMD_I, CURRENT_SC->cmd_len);
2103 disp_ports(shpnt);
2104 }
2105
2106 while(CMD_I<CURRENT_SC->cmd_len) { 1922 while(CMD_I<CURRENT_SC->cmd_len) {
2107 DPRINTK(debug_cmd, DEBUG_LEAD "command byte %02x (%d/%d)\n", CMDINFO(CURRENT_SC), CURRENT_SC->cmnd[CMD_I], CMD_I, CURRENT_SC->cmd_len); 1923 if (TESTLO(SSTAT0, SPIORDY))
2108
2109 if(TESTLO(SSTAT0, SPIORDY)) {
2110 DPRINTK(debug_cmd, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC));
2111 return; 1924 return;
2112 }
2113 1925
2114 SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]); 1926 SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]);
2115 } 1927 }
@@ -2118,7 +1930,9 @@ static void cmd_run(struct Scsi_Host *shpnt)
2118static void cmd_end(struct Scsi_Host *shpnt) 1930static void cmd_end(struct Scsi_Host *shpnt)
2119{ 1931{
2120 if(CMD_I<CURRENT_SC->cmd_len) 1932 if(CMD_I<CURRENT_SC->cmd_len)
2121 printk(ERR_LEAD "command sent incompletely (%d/%d)\n", CMDINFO(CURRENT_SC), CMD_I, CURRENT_SC->cmd_len); 1933 scmd_printk(KERN_ERR, CURRENT_SC,
1934 "command sent incompletely (%d/%d)\n",
1935 CMD_I, CURRENT_SC->cmd_len);
2122 else 1936 else
2123 CURRENT_SC->SCp.sent_command++; 1937 CURRENT_SC->SCp.sent_command++;
2124} 1938}
@@ -2129,20 +1943,11 @@ static void cmd_end(struct Scsi_Host *shpnt)
2129 */ 1943 */
2130static void status_run(struct Scsi_Host *shpnt) 1944static void status_run(struct Scsi_Host *shpnt)
2131{ 1945{
2132 if(TESTLO(SSTAT0,SPIORDY)) { 1946 if (TESTLO(SSTAT0, SPIORDY))
2133 DPRINTK(debug_status, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC));
2134 return; 1947 return;
2135 }
2136 1948
2137 CURRENT_SC->SCp.Status = GETPORT(SCSIDAT); 1949 CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
2138 1950
2139#if defined(AHA152X_DEBUG)
2140 if (HOSTDATA(shpnt)->debug & debug_status) {
2141 printk(DEBUG_LEAD "inbound status %02x ", CMDINFO(CURRENT_SC), CURRENT_SC->SCp.Status);
2142 scsi_print_status(CURRENT_SC->SCp.Status);
2143 printk("\n");
2144 }
2145#endif
2146} 1951}
2147 1952
2148/* 1953/*
@@ -2161,10 +1966,6 @@ static void datai_init(struct Scsi_Host *shpnt)
2161 SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE); 1966 SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE);
2162 1967
2163 DATA_LEN=0; 1968 DATA_LEN=0;
2164 DPRINTK(debug_datai,
2165 DEBUG_LEAD "datai_init: request_bufflen=%d resid=%d\n",
2166 CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
2167 scsi_get_resid(CURRENT_SC));
2168} 1969}
2169 1970
2170static void datai_run(struct Scsi_Host *shpnt) 1971static void datai_run(struct Scsi_Host *shpnt)
@@ -2186,8 +1987,7 @@ static void datai_run(struct Scsi_Host *shpnt)
2186 barrier(); 1987 barrier();
2187 1988
2188 if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) { 1989 if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) {
2189 printk(ERR_LEAD "datai timeout", CMDINFO(CURRENT_SC)); 1990 scmd_printk(KERN_ERR, CURRENT_SC, "datai timeout\n");
2190 disp_ports(shpnt);
2191 break; 1991 break;
2192 } 1992 }
2193 1993
@@ -2199,8 +1999,8 @@ static void datai_run(struct Scsi_Host *shpnt)
2199 barrier(); 1999 barrier();
2200 2000
2201 if(TESTLO(SSTAT2, SEMPTY)) { 2001 if(TESTLO(SSTAT2, SEMPTY)) {
2202 printk(ERR_LEAD "datai sempty timeout", CMDINFO(CURRENT_SC)); 2002 scmd_printk(KERN_ERR, CURRENT_SC,
2203 disp_ports(shpnt); 2003 "datai sempty timeout");
2204 break; 2004 break;
2205 } 2005 }
2206 2006
@@ -2209,48 +2009,49 @@ static void datai_run(struct Scsi_Host *shpnt)
2209 2009
2210 if(CURRENT_SC->SCp.this_residual>0) { 2010 if(CURRENT_SC->SCp.this_residual>0) {
2211 while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) { 2011 while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
2212 data_count = fifodata>CURRENT_SC->SCp.this_residual ? 2012 data_count = fifodata > CURRENT_SC->SCp.this_residual ?
2213 CURRENT_SC->SCp.this_residual : 2013 CURRENT_SC->SCp.this_residual :
2214 fifodata; 2014 fifodata;
2215 fifodata -= data_count; 2015 fifodata -= data_count;
2216 2016
2217 if(data_count & 1) { 2017 if (data_count & 1) {
2218 DPRINTK(debug_datai, DEBUG_LEAD "8bit\n", CMDINFO(CURRENT_SC)); 2018 SETPORT(DMACNTRL0, ENDMA|_8BIT);
2219 SETPORT(DMACNTRL0, ENDMA|_8BIT); 2019 *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
2220 *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT); 2020 CURRENT_SC->SCp.this_residual--;
2221 CURRENT_SC->SCp.this_residual--; 2021 DATA_LEN++;
2222 DATA_LEN++; 2022 SETPORT(DMACNTRL0, ENDMA);
2223 SETPORT(DMACNTRL0, ENDMA); 2023 }
2224 } 2024
2225 2025 if (data_count > 1) {
2226 if(data_count > 1) { 2026 data_count >>= 1;
2227 DPRINTK(debug_datai, DEBUG_LEAD "16bit(%d)\n", CMDINFO(CURRENT_SC), data_count); 2027 insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
2228 data_count >>= 1; 2028 CURRENT_SC->SCp.ptr += 2 * data_count;
2229 insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count); 2029 CURRENT_SC->SCp.this_residual -= 2 * data_count;
2230 CURRENT_SC->SCp.ptr += 2 * data_count; 2030 DATA_LEN += 2 * data_count;
2231 CURRENT_SC->SCp.this_residual -= 2 * data_count; 2031 }
2232 DATA_LEN += 2 * data_count; 2032
2233 } 2033 if (CURRENT_SC->SCp.this_residual == 0 &&
2234 2034 CURRENT_SC->SCp.buffers_residual > 0) {
2235 if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) { 2035 /* advance to next buffer */
2236 /* advance to next buffer */ 2036 CURRENT_SC->SCp.buffers_residual--;
2237 CURRENT_SC->SCp.buffers_residual--; 2037 CURRENT_SC->SCp.buffer++;
2238 CURRENT_SC->SCp.buffer++; 2038 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
2239 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer); 2039 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
2240 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length; 2040 }
2241 } 2041 }
2242 } 2042 } else if (fifodata > 0) {
2243 } else if(fifodata>0) { 2043 scmd_printk(KERN_ERR, CURRENT_SC,
2244 printk(ERR_LEAD "no buffers left for %d(%d) bytes (data overrun!?)\n", CMDINFO(CURRENT_SC), fifodata, GETPORT(FIFOSTAT)); 2044 "no buffers left for %d(%d) bytes"
2245 SETPORT(DMACNTRL0, ENDMA|_8BIT); 2045 " (data overrun!?)\n",
2046 fifodata, GETPORT(FIFOSTAT));
2047 SETPORT(DMACNTRL0, ENDMA|_8BIT);
2246 while(fifodata>0) { 2048 while(fifodata>0) {
2247 int data; 2049 int data;
2248 data=GETPORT(DATAPORT); 2050 data=GETPORT(DATAPORT);
2249 DPRINTK(debug_datai, DEBUG_LEAD "data=%02x\n", CMDINFO(CURRENT_SC), data);
2250 fifodata--; 2051 fifodata--;
2251 DATA_LEN++; 2052 DATA_LEN++;
2252 } 2053 }
2253 SETPORT(DMACNTRL0, ENDMA|_8BIT); 2054 SETPORT(DMACNTRL0, ENDMA|_8BIT);
2254 } 2055 }
2255 } 2056 }
2256 2057
@@ -2258,19 +2059,20 @@ static void datai_run(struct Scsi_Host *shpnt)
2258 TESTLO(DMASTAT, DFIFOEMP) || 2059 TESTLO(DMASTAT, DFIFOEMP) ||
2259 TESTLO(SSTAT2, SEMPTY) || 2060 TESTLO(SSTAT2, SEMPTY) ||
2260 GETPORT(FIFOSTAT)>0) { 2061 GETPORT(FIFOSTAT)>0) {
2261 /* 2062 /*
2262 * something went wrong, if there's something left in the fifos 2063 * something went wrong, if there's something left in the fifos
2263 * or the phase didn't change 2064 * or the phase didn't change
2264 */ 2065 */
2265 printk(ERR_LEAD "fifos should be empty and phase should have changed\n", CMDINFO(CURRENT_SC)); 2066 scmd_printk(KERN_ERR, CURRENT_SC,
2266 disp_ports(shpnt); 2067 "fifos should be empty and phase should have changed\n");
2267 } 2068 }
2268 2069
2269 if(DATA_LEN!=GETSTCNT()) { 2070 if(DATA_LEN!=GETSTCNT()) {
2270 printk(ERR_LEAD 2071 scmd_printk(KERN_ERR, CURRENT_SC,
2271 "manual transfer count differs from automatic (count=%d;stcnt=%d;diff=%d;fifostat=%d)", 2072 "manual transfer count differs from automatic "
2272 CMDINFO(CURRENT_SC), DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN, GETPORT(FIFOSTAT)); 2073 "(count=%d;stcnt=%d;diff=%d;fifostat=%d)",
2273 disp_ports(shpnt); 2074 DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN,
2075 GETPORT(FIFOSTAT));
2274 mdelay(10000); 2076 mdelay(10000);
2275 } 2077 }
2276} 2078}
@@ -2279,11 +2081,6 @@ static void datai_end(struct Scsi_Host *shpnt)
2279{ 2081{
2280 CMD_INC_RESID(CURRENT_SC, -GETSTCNT()); 2082 CMD_INC_RESID(CURRENT_SC, -GETSTCNT());
2281 2083
2282 DPRINTK(debug_datai,
2283 DEBUG_LEAD "datai_end: request_bufflen=%d resid=%d stcnt=%d\n",
2284 CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
2285 scsi_get_resid(CURRENT_SC), GETSTCNT());
2286
2287 SETPORT(SXFRCTL0, CH1|CLRSTCNT); 2084 SETPORT(SXFRCTL0, CH1|CLRSTCNT);
2288 SETPORT(DMACNTRL0, 0); 2085 SETPORT(DMACNTRL0, 0);
2289} 2086}
@@ -2304,11 +2101,6 @@ static void datao_init(struct Scsi_Host *shpnt)
2304 SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE ); 2101 SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE );
2305 2102
2306 DATA_LEN = scsi_get_resid(CURRENT_SC); 2103 DATA_LEN = scsi_get_resid(CURRENT_SC);
2307
2308 DPRINTK(debug_datao,
2309 DEBUG_LEAD "datao_init: request_bufflen=%d; resid=%d\n",
2310 CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
2311 scsi_get_resid(CURRENT_SC));
2312} 2104}
2313 2105
2314static void datao_run(struct Scsi_Host *shpnt) 2106static void datao_run(struct Scsi_Host *shpnt)
@@ -2323,8 +2115,9 @@ static void datao_run(struct Scsi_Host *shpnt)
2323 data_count=CURRENT_SC->SCp.this_residual; 2115 data_count=CURRENT_SC->SCp.this_residual;
2324 2116
2325 if(TESTLO(DMASTAT, DFIFOEMP)) { 2117 if(TESTLO(DMASTAT, DFIFOEMP)) {
2326 printk(ERR_LEAD "datao fifo not empty (%d)", CMDINFO(CURRENT_SC), GETPORT(FIFOSTAT)); 2118 scmd_printk(KERN_ERR, CURRENT_SC,
2327 disp_ports(shpnt); 2119 "datao fifo not empty (%d)",
2120 GETPORT(FIFOSTAT));
2328 break; 2121 break;
2329 } 2122 }
2330 2123
@@ -2342,7 +2135,7 @@ static void datao_run(struct Scsi_Host *shpnt)
2342 CURRENT_SC->SCp.ptr += 2 * data_count; 2135 CURRENT_SC->SCp.ptr += 2 * data_count;
2343 CURRENT_SC->SCp.this_residual -= 2 * data_count; 2136 CURRENT_SC->SCp.this_residual -= 2 * data_count;
2344 CMD_INC_RESID(CURRENT_SC, -2 * data_count); 2137 CMD_INC_RESID(CURRENT_SC, -2 * data_count);
2345 } 2138 }
2346 2139
2347 if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) { 2140 if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
2348 /* advance to next buffer */ 2141 /* advance to next buffer */
@@ -2357,8 +2150,7 @@ static void datao_run(struct Scsi_Host *shpnt)
2357 barrier(); 2150 barrier();
2358 2151
2359 if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) { 2152 if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) {
2360 printk(ERR_LEAD "dataout timeout", CMDINFO(CURRENT_SC)); 2153 scmd_printk(KERN_ERR, CURRENT_SC, "dataout timeout\n");
2361 disp_ports(shpnt);
2362 break; 2154 break;
2363 } 2155 }
2364 } 2156 }
@@ -2368,35 +2160,23 @@ static void datao_end(struct Scsi_Host *shpnt)
2368{ 2160{
2369 if(TESTLO(DMASTAT, DFIFOEMP)) { 2161 if(TESTLO(DMASTAT, DFIFOEMP)) {
2370 int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) - 2162 int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
2371 GETSTCNT(); 2163 GETSTCNT();
2372
2373 DPRINTK(debug_datao, DEBUG_LEAD "datao: %d bytes to resend (%d written, %d transferred)\n",
2374 CMDINFO(CURRENT_SC),
2375 data_count,
2376 DATA_LEN - scsi_get_resid(CURRENT_SC),
2377 GETSTCNT());
2378 2164
2379 CMD_INC_RESID(CURRENT_SC, data_count); 2165 CMD_INC_RESID(CURRENT_SC, data_count);
2380 2166
2381 data_count -= CURRENT_SC->SCp.ptr - 2167 data_count -= CURRENT_SC->SCp.ptr -
2382 SG_ADDRESS(CURRENT_SC->SCp.buffer); 2168 SG_ADDRESS(CURRENT_SC->SCp.buffer);
2383 while(data_count>0) { 2169 while(data_count>0) {
2384 CURRENT_SC->SCp.buffer--; 2170 CURRENT_SC->SCp.buffer--;
2385 CURRENT_SC->SCp.buffers_residual++; 2171 CURRENT_SC->SCp.buffers_residual++;
2386 data_count -= CURRENT_SC->SCp.buffer->length; 2172 data_count -= CURRENT_SC->SCp.buffer->length;
2387 } 2173 }
2388 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) - 2174 CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
2389 data_count; 2175 data_count;
2390 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length + 2176 CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
2391 data_count; 2177 data_count;
2392 } 2178 }
2393 2179
2394 DPRINTK(debug_datao, DEBUG_LEAD "datao_end: request_bufflen=%d; resid=%d; stcnt=%d\n",
2395 CMDINFO(CURRENT_SC),
2396 scsi_bufflen(CURRENT_SC),
2397 scsi_get_resid(CURRENT_SC),
2398 GETSTCNT());
2399
2400 SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); 2180 SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
2401 SETPORT(SXFRCTL0, CH1); 2181 SETPORT(SXFRCTL0, CH1);
2402 2182
@@ -2420,7 +2200,7 @@ static int update_state(struct Scsi_Host *shpnt)
2420 STATE=rsti; 2200 STATE=rsti;
2421 SETPORT(SCSISEQ,0); 2201 SETPORT(SCSISEQ,0);
2422 SETPORT(SSTAT1,SCSIRSTI); 2202 SETPORT(SSTAT1,SCSIRSTI);
2423 } else if(stat0 & SELDI && PREVSTATE==busfree) { 2203 } else if (stat0 & SELDI && PREVSTATE == busfree) {
2424 STATE=seldi; 2204 STATE=seldi;
2425 } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) { 2205 } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
2426 STATE=seldo; 2206 STATE=seldo;
@@ -2445,8 +2225,7 @@ static int update_state(struct Scsi_Host *shpnt)
2445 } 2225 }
2446 2226
2447 if((stat0 & SELDI) && STATE!=seldi && !dataphase) { 2227 if((stat0 & SELDI) && STATE!=seldi && !dataphase) {
2448 printk(INFO_LEAD "reselection missed?", CMDINFO(CURRENT_SC)); 2228 scmd_printk(KERN_INFO, CURRENT_SC, "reselection missed?");
2449 disp_ports(shpnt);
2450 } 2229 }
2451 2230
2452 if(STATE!=PREVSTATE) { 2231 if(STATE!=PREVSTATE) {
@@ -2464,7 +2243,7 @@ static int update_state(struct Scsi_Host *shpnt)
2464 */ 2243 */
2465static void parerr_run(struct Scsi_Host *shpnt) 2244static void parerr_run(struct Scsi_Host *shpnt)
2466{ 2245{
2467 printk(ERR_LEAD "parity error\n", CMDINFO(CURRENT_SC)); 2246 scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
2468 done(shpnt, DID_PARITY << 16); 2247 done(shpnt, DID_PARITY << 16);
2469} 2248}
2470 2249
@@ -2476,8 +2255,8 @@ static void rsti_run(struct Scsi_Host *shpnt)
2476{ 2255{
2477 Scsi_Cmnd *ptr; 2256 Scsi_Cmnd *ptr;
2478 2257
2479 printk(KERN_NOTICE "aha152x%d: scsi reset in\n", HOSTNO); 2258 shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
2480 2259
2481 ptr=DISCONNECTED_SC; 2260 ptr=DISCONNECTED_SC;
2482 while(ptr) { 2261 while(ptr) {
2483 Scsi_Cmnd *next = SCNEXT(ptr); 2262 Scsi_Cmnd *next = SCNEXT(ptr);
@@ -2539,8 +2318,6 @@ static void is_complete(struct Scsi_Host *shpnt)
2539 2318
2540 dataphase=update_state(shpnt); 2319 dataphase=update_state(shpnt);
2541 2320
2542 DPRINTK(debug_phases, LEAD "start %s %s(%s)\n", CMDINFO(CURRENT_SC), states[STATE].name, states[PREVSTATE].name, states[LASTSTATE].name);
2543
2544 /* 2321 /*
2545 * end previous state 2322 * end previous state
2546 * 2323 *
@@ -2567,9 +2344,9 @@ static void is_complete(struct Scsi_Host *shpnt)
2567 if(dataphase) { 2344 if(dataphase) {
2568 SETPORT(SSTAT0, REQINIT); 2345 SETPORT(SSTAT0, REQINIT);
2569 SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK); 2346 SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK);
2570 SETPORT(SSTAT1, PHASECHG); 2347 SETPORT(SSTAT1, PHASECHG);
2571 } 2348 }
2572 2349
2573 /* 2350 /*
2574 * enable SPIO mode if previous didn't use it 2351 * enable SPIO mode if previous didn't use it
2575 * and this one does 2352 * and this one does
@@ -2581,14 +2358,14 @@ static void is_complete(struct Scsi_Host *shpnt)
2581 if(CURRENT_SC) 2358 if(CURRENT_SC)
2582 CURRENT_SC->SCp.phase |= spiordy; 2359 CURRENT_SC->SCp.phase |= spiordy;
2583 } 2360 }
2584 2361
2585 /* 2362 /*
2586 * initialize for new state 2363 * initialize for new state
2587 * 2364 *
2588 */ 2365 */
2589 if(PREVSTATE!=STATE && states[STATE].init) 2366 if(PREVSTATE!=STATE && states[STATE].init)
2590 states[STATE].init(shpnt); 2367 states[STATE].init(shpnt);
2591 2368
2592 /* 2369 /*
2593 * handle current state 2370 * handle current state
2594 * 2371 *
@@ -2596,8 +2373,9 @@ static void is_complete(struct Scsi_Host *shpnt)
2596 if(states[STATE].run) 2373 if(states[STATE].run)
2597 states[STATE].run(shpnt); 2374 states[STATE].run(shpnt);
2598 else 2375 else
2599 printk(ERR_LEAD "unexpected state (%x)\n", CMDINFO(CURRENT_SC), STATE); 2376 scmd_printk(KERN_ERR, CURRENT_SC,
2600 2377 "unexpected state (%x)\n", STATE);
2378
2601 /* 2379 /*
2602 * setup controller to interrupt on 2380 * setup controller to interrupt on
2603 * the next expected condition and 2381 * the next expected condition and
@@ -2613,7 +2391,6 @@ static void is_complete(struct Scsi_Host *shpnt)
2613 HOSTDATA(shpnt)->time[STATE] += jiffies-start; 2391 HOSTDATA(shpnt)->time[STATE] += jiffies-start;
2614#endif 2392#endif
2615 2393
2616 DPRINTK(debug_phases, LEAD "end %s %s(%s)\n", CMDINFO(CURRENT_SC), states[STATE].name, states[PREVSTATE].name, states[LASTSTATE].name);
2617 } while(pending); 2394 } while(pending);
2618 2395
2619 /* 2396 /*
@@ -2626,289 +2403,42 @@ static void is_complete(struct Scsi_Host *shpnt)
2626} 2403}
2627 2404
2628 2405
2629/* 2406/*
2630 * Dump the current driver status and panic 2407 * Dump the current driver status and panic
2631 */ 2408 */
2632static void aha152x_error(struct Scsi_Host *shpnt, char *msg) 2409static void aha152x_error(struct Scsi_Host *shpnt, char *msg)
2633{ 2410{
2634 printk(KERN_EMERG "\naha152x%d: %s\n", HOSTNO, msg); 2411 shost_printk(KERN_EMERG, shpnt, "%s\n", msg);
2635 show_queues(shpnt); 2412 show_queues(shpnt);
2636 panic("aha152x panic\n"); 2413 panic("aha152x panic\n");
2637} 2414}
2638 2415
2639/* 2416/*
2640 * Display registers of AIC-6260
2641 */
2642static void disp_ports(struct Scsi_Host *shpnt)
2643{
2644#if defined(AHA152X_DEBUG)
2645 int s;
2646
2647 printk("\n%s: %s(%s) ",
2648 CURRENT_SC ? "busy" : "waiting",
2649 states[STATE].name,
2650 states[PREVSTATE].name);
2651
2652 s = GETPORT(SCSISEQ);
2653 printk("SCSISEQ( ");
2654 if (s & TEMODEO)
2655 printk("TARGET MODE ");
2656 if (s & ENSELO)
2657 printk("SELO ");
2658 if (s & ENSELI)
2659 printk("SELI ");
2660 if (s & ENRESELI)
2661 printk("RESELI ");
2662 if (s & ENAUTOATNO)
2663 printk("AUTOATNO ");
2664 if (s & ENAUTOATNI)
2665 printk("AUTOATNI ");
2666 if (s & ENAUTOATNP)
2667 printk("AUTOATNP ");
2668 if (s & SCSIRSTO)
2669 printk("SCSIRSTO ");
2670 printk(");");
2671
2672 printk(" SCSISIG(");
2673 s = GETPORT(SCSISIG);
2674 switch (s & P_MASK) {
2675 case P_DATAO:
2676 printk("DATA OUT");
2677 break;
2678 case P_DATAI:
2679 printk("DATA IN");
2680 break;
2681 case P_CMD:
2682 printk("COMMAND");
2683 break;
2684 case P_STATUS:
2685 printk("STATUS");
2686 break;
2687 case P_MSGO:
2688 printk("MESSAGE OUT");
2689 break;
2690 case P_MSGI:
2691 printk("MESSAGE IN");
2692 break;
2693 default:
2694 printk("*invalid*");
2695 break;
2696 }
2697
2698 printk("); ");
2699
2700 printk("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
2701
2702 printk("SSTAT( ");
2703 s = GETPORT(SSTAT0);
2704 if (s & TARGET)
2705 printk("TARGET ");
2706 if (s & SELDO)
2707 printk("SELDO ");
2708 if (s & SELDI)
2709 printk("SELDI ");
2710 if (s & SELINGO)
2711 printk("SELINGO ");
2712 if (s & SWRAP)
2713 printk("SWRAP ");
2714 if (s & SDONE)
2715 printk("SDONE ");
2716 if (s & SPIORDY)
2717 printk("SPIORDY ");
2718 if (s & DMADONE)
2719 printk("DMADONE ");
2720
2721 s = GETPORT(SSTAT1);
2722 if (s & SELTO)
2723 printk("SELTO ");
2724 if (s & ATNTARG)
2725 printk("ATNTARG ");
2726 if (s & SCSIRSTI)
2727 printk("SCSIRSTI ");
2728 if (s & PHASEMIS)
2729 printk("PHASEMIS ");
2730 if (s & BUSFREE)
2731 printk("BUSFREE ");
2732 if (s & SCSIPERR)
2733 printk("SCSIPERR ");
2734 if (s & PHASECHG)
2735 printk("PHASECHG ");
2736 if (s & REQINIT)
2737 printk("REQINIT ");
2738 printk("); ");
2739
2740
2741 printk("SSTAT( ");
2742
2743 s = GETPORT(SSTAT0) & GETPORT(SIMODE0);
2744
2745 if (s & TARGET)
2746 printk("TARGET ");
2747 if (s & SELDO)
2748 printk("SELDO ");
2749 if (s & SELDI)
2750 printk("SELDI ");
2751 if (s & SELINGO)
2752 printk("SELINGO ");
2753 if (s & SWRAP)
2754 printk("SWRAP ");
2755 if (s & SDONE)
2756 printk("SDONE ");
2757 if (s & SPIORDY)
2758 printk("SPIORDY ");
2759 if (s & DMADONE)
2760 printk("DMADONE ");
2761
2762 s = GETPORT(SSTAT1) & GETPORT(SIMODE1);
2763
2764 if (s & SELTO)
2765 printk("SELTO ");
2766 if (s & ATNTARG)
2767 printk("ATNTARG ");
2768 if (s & SCSIRSTI)
2769 printk("SCSIRSTI ");
2770 if (s & PHASEMIS)
2771 printk("PHASEMIS ");
2772 if (s & BUSFREE)
2773 printk("BUSFREE ");
2774 if (s & SCSIPERR)
2775 printk("SCSIPERR ");
2776 if (s & PHASECHG)
2777 printk("PHASECHG ");
2778 if (s & REQINIT)
2779 printk("REQINIT ");
2780 printk("); ");
2781
2782 printk("SXFRCTL0( ");
2783
2784 s = GETPORT(SXFRCTL0);
2785 if (s & SCSIEN)
2786 printk("SCSIEN ");
2787 if (s & DMAEN)
2788 printk("DMAEN ");
2789 if (s & CH1)
2790 printk("CH1 ");
2791 if (s & CLRSTCNT)
2792 printk("CLRSTCNT ");
2793 if (s & SPIOEN)
2794 printk("SPIOEN ");
2795 if (s & CLRCH1)
2796 printk("CLRCH1 ");
2797 printk("); ");
2798
2799 printk("SIGNAL( ");
2800
2801 s = GETPORT(SCSISIG);
2802 if (s & SIG_ATNI)
2803 printk("ATNI ");
2804 if (s & SIG_SELI)
2805 printk("SELI ");
2806 if (s & SIG_BSYI)
2807 printk("BSYI ");
2808 if (s & SIG_REQI)
2809 printk("REQI ");
2810 if (s & SIG_ACKI)
2811 printk("ACKI ");
2812 printk("); ");
2813
2814 printk("SELID (%02x), ", GETPORT(SELID));
2815
2816 printk("STCNT (%d), ", GETSTCNT());
2817
2818 printk("SSTAT2( ");
2819
2820 s = GETPORT(SSTAT2);
2821 if (s & SOFFSET)
2822 printk("SOFFSET ");
2823 if (s & SEMPTY)
2824 printk("SEMPTY ");
2825 if (s & SFULL)
2826 printk("SFULL ");
2827 printk("); SFCNT (%d); ", s & (SFULL | SFCNT));
2828
2829 s = GETPORT(SSTAT3);
2830 printk("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f);
2831
2832 printk("SSTAT4( ");
2833 s = GETPORT(SSTAT4);
2834 if (s & SYNCERR)
2835 printk("SYNCERR ");
2836 if (s & FWERR)
2837 printk("FWERR ");
2838 if (s & FRERR)
2839 printk("FRERR ");
2840 printk("); ");
2841
2842 printk("DMACNTRL0( ");
2843 s = GETPORT(DMACNTRL0);
2844 printk("%s ", s & _8BIT ? "8BIT" : "16BIT");
2845 printk("%s ", s & DMA ? "DMA" : "PIO");
2846 printk("%s ", s & WRITE_READ ? "WRITE" : "READ");
2847 if (s & ENDMA)
2848 printk("ENDMA ");
2849 if (s & INTEN)
2850 printk("INTEN ");
2851 if (s & RSTFIFO)
2852 printk("RSTFIFO ");
2853 if (s & SWINT)
2854 printk("SWINT ");
2855 printk("); ");
2856
2857 printk("DMASTAT( ");
2858 s = GETPORT(DMASTAT);
2859 if (s & ATDONE)
2860 printk("ATDONE ");
2861 if (s & WORDRDY)
2862 printk("WORDRDY ");
2863 if (s & DFIFOFULL)
2864 printk("DFIFOFULL ");
2865 if (s & DFIFOEMP)
2866 printk("DFIFOEMP ");
2867 printk(")\n");
2868#endif
2869}
2870
2871/*
2872 * display enabled interrupts 2417 * display enabled interrupts
2873 */ 2418 */
2874static void disp_enintr(struct Scsi_Host *shpnt) 2419static void disp_enintr(struct Scsi_Host *shpnt)
2875{ 2420{
2876 int s; 2421 int s0, s1;
2877 2422
2878 printk(KERN_DEBUG "enabled interrupts ( "); 2423 s0 = GETPORT(SIMODE0);
2879 2424 s1 = GETPORT(SIMODE1);
2880 s = GETPORT(SIMODE0); 2425
2881 if (s & ENSELDO) 2426 shost_printk(KERN_DEBUG, shpnt,
2882 printk("ENSELDO "); 2427 "enabled interrupts (%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
2883 if (s & ENSELDI) 2428 (s0 & ENSELDO) ? "ENSELDO " : "",
2884 printk("ENSELDI "); 2429 (s0 & ENSELDI) ? "ENSELDI " : "",
2885 if (s & ENSELINGO) 2430 (s0 & ENSELINGO) ? "ENSELINGO " : "",
2886 printk("ENSELINGO "); 2431 (s0 & ENSWRAP) ? "ENSWRAP " : "",
2887 if (s & ENSWRAP) 2432 (s0 & ENSDONE) ? "ENSDONE " : "",
2888 printk("ENSWRAP "); 2433 (s0 & ENSPIORDY) ? "ENSPIORDY " : "",
2889 if (s & ENSDONE) 2434 (s0 & ENDMADONE) ? "ENDMADONE " : "",
2890 printk("ENSDONE "); 2435 (s1 & ENSELTIMO) ? "ENSELTIMO " : "",
2891 if (s & ENSPIORDY) 2436 (s1 & ENATNTARG) ? "ENATNTARG " : "",
2892 printk("ENSPIORDY "); 2437 (s1 & ENPHASEMIS) ? "ENPHASEMIS " : "",
2893 if (s & ENDMADONE) 2438 (s1 & ENBUSFREE) ? "ENBUSFREE " : "",
2894 printk("ENDMADONE "); 2439 (s1 & ENSCSIPERR) ? "ENSCSIPERR " : "",
2895 2440 (s1 & ENPHASECHG) ? "ENPHASECHG " : "",
2896 s = GETPORT(SIMODE1); 2441 (s1 & ENREQINIT) ? "ENREQINIT " : "");
2897 if (s & ENSELTIMO)
2898 printk("ENSELTIMO ");
2899 if (s & ENATNTARG)
2900 printk("ENATNTARG ");
2901 if (s & ENPHASEMIS)
2902 printk("ENPHASEMIS ");
2903 if (s & ENBUSFREE)
2904 printk("ENBUSFREE ");
2905 if (s & ENSCSIPERR)
2906 printk("ENSCSIPERR ");
2907 if (s & ENPHASECHG)
2908 printk("ENPHASECHG ");
2909 if (s & ENREQINIT)
2910 printk("ENREQINIT ");
2911 printk(")\n");
2912} 2442}
2913 2443
2914/* 2444/*
@@ -2916,36 +2446,21 @@ static void disp_enintr(struct Scsi_Host *shpnt)
2916 */ 2446 */
2917static void show_command(Scsi_Cmnd *ptr) 2447static void show_command(Scsi_Cmnd *ptr)
2918{ 2448{
2919 scmd_printk(KERN_DEBUG, ptr, "%p: cmnd=(", ptr); 2449 scsi_print_command(ptr);
2920 2450 scmd_printk(KERN_DEBUG, ptr,
2921 __scsi_print_command(ptr->cmnd); 2451 "request_bufflen=%d; resid=%d; "
2922 2452 "phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
2923 printk(KERN_DEBUG "); request_bufflen=%d; resid=%d; phase |", 2453 scsi_bufflen(ptr), scsi_get_resid(ptr),
2924 scsi_bufflen(ptr), scsi_get_resid(ptr)); 2454 (ptr->SCp.phase & not_issued) ? "not issued|" : "",
2925 2455 (ptr->SCp.phase & selecting) ? "selecting|" : "",
2926 if (ptr->SCp.phase & not_issued) 2456 (ptr->SCp.phase & identified) ? "identified|" : "",
2927 printk("not issued|"); 2457 (ptr->SCp.phase & disconnected) ? "disconnected|" : "",
2928 if (ptr->SCp.phase & selecting) 2458 (ptr->SCp.phase & completed) ? "completed|" : "",
2929 printk("selecting|"); 2459 (ptr->SCp.phase & spiordy) ? "spiordy|" : "",
2930 if (ptr->SCp.phase & identified) 2460 (ptr->SCp.phase & syncneg) ? "syncneg|" : "",
2931 printk("identified|"); 2461 (ptr->SCp.phase & aborted) ? "aborted|" : "",
2932 if (ptr->SCp.phase & disconnected) 2462 (ptr->SCp.phase & resetted) ? "resetted|" : "",
2933 printk("disconnected|"); 2463 (SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
2934 if (ptr->SCp.phase & completed)
2935 printk("completed|");
2936 if (ptr->SCp.phase & spiordy)
2937 printk("spiordy|");
2938 if (ptr->SCp.phase & syncneg)
2939 printk("syncneg|");
2940 if (ptr->SCp.phase & aborted)
2941 printk("aborted|");
2942 if (ptr->SCp.phase & resetted)
2943 printk("resetted|");
2944 if( SCDATA(ptr) ) {
2945 printk("; next=0x%p\n", SCNEXT(ptr));
2946 } else {
2947 printk("; next=(host scribble NULL)\n");
2948 }
2949} 2464}
2950 2465
2951/* 2466/*
@@ -2972,7 +2487,6 @@ static void show_queues(struct Scsi_Host *shpnt)
2972 for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL) 2487 for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL)
2973 show_command(ptr); 2488 show_command(ptr);
2974 2489
2975 disp_ports(shpnt);
2976 disp_enintr(shpnt); 2490 disp_enintr(shpnt);
2977} 2491}
2978 2492
@@ -3276,15 +2790,6 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
3276 if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0) 2790 if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0)
3277 return -EINVAL; 2791 return -EINVAL;
3278 2792
3279#if defined(AHA152X_DEBUG)
3280 if(length>14 && strncmp("debug ", buffer+8, 6)==0) {
3281 int debug = HOSTDATA(shpnt)->debug;
3282
3283 HOSTDATA(shpnt)->debug = simple_strtoul(buffer+14, NULL, 0);
3284
3285 printk(KERN_INFO "aha152x%d: debugging options set to 0x%04x (were 0x%04x)\n", HOSTNO, HOSTDATA(shpnt)->debug, debug);
3286 } else
3287#endif
3288#if defined(AHA152X_STAT) 2793#if defined(AHA152X_STAT)
3289 if(length>13 && strncmp("reset", buffer+8, 5)==0) { 2794 if(length>13 && strncmp("reset", buffer+8, 5)==0) {
3290 int i; 2795 int i;
@@ -3302,7 +2807,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
3302 HOSTDATA(shpnt)->time[i]=0; 2807 HOSTDATA(shpnt)->time[i]=0;
3303 } 2808 }
3304 2809
3305 printk(KERN_INFO "aha152x%d: stats reseted.\n", HOSTNO); 2810 shost_printk(KERN_INFO, shpnt, "aha152x: stats reset.\n");
3306 2811
3307 } else 2812 } else
3308#endif 2813#endif
@@ -3343,29 +2848,6 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
3343 (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, 2848 (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50,
3344 HOSTDATA(shpnt)->syncrate[i] & 0x0f); 2849 HOSTDATA(shpnt)->syncrate[i] & 0x0f);
3345 } 2850 }
3346#if defined(AHA152X_DEBUG)
3347#define PDEBUG(flags,txt) \
3348 if(HOSTDATA(shpnt)->debug & flags) SPRINTF("(%s) ", txt);
3349
3350 SPRINTF("enabled debugging options: ");
3351
3352 PDEBUG(debug_procinfo, "procinfo");
3353 PDEBUG(debug_queue, "queue");
3354 PDEBUG(debug_intr, "interrupt");
3355 PDEBUG(debug_selection, "selection");
3356 PDEBUG(debug_msgo, "message out");
3357 PDEBUG(debug_msgi, "message in");
3358 PDEBUG(debug_status, "status");
3359 PDEBUG(debug_cmd, "command");
3360 PDEBUG(debug_datai, "data in");
3361 PDEBUG(debug_datao, "data out");
3362 PDEBUG(debug_eh, "eh");
3363 PDEBUG(debug_locking, "locks");
3364 PDEBUG(debug_phases, "phases");
3365
3366 SPRINTF("\n");
3367#endif
3368
3369 SPRINTF("\nqueue status:\n"); 2851 SPRINTF("\nqueue status:\n");
3370 DO_LOCK(flags); 2852 DO_LOCK(flags);
3371 if (ISSUE_SC) { 2853 if (ISSUE_SC) {
@@ -3393,8 +2875,8 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
3393 2875
3394#if defined(AHA152X_STAT) 2876#if defined(AHA152X_STAT)
3395 SPRINTF("statistics:\n" 2877 SPRINTF("statistics:\n"
3396 "total commands: %d\n" 2878 "total commands: %d\n"
3397 "disconnections: %d\n" 2879 "disconnections: %d\n"
3398 "busfree with check condition: %d\n" 2880 "busfree with check condition: %d\n"
3399 "busfree without old command: %d\n" 2881 "busfree without old command: %d\n"
3400 "busfree without new command: %d\n" 2882 "busfree without new command: %d\n"
@@ -3413,7 +2895,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
3413 HOSTDATA(shpnt)->busfree_without_any_action); 2895 HOSTDATA(shpnt)->busfree_without_any_action);
3414 for(i=0; i<maxstate; i++) { 2896 for(i=0; i<maxstate; i++) {
3415 SPRINTF("%-10s %-12d %-12d %-12ld\n", 2897 SPRINTF("%-10s %-12d %-12d %-12ld\n",
3416 states[i].name, 2898 states[i].name,
3417 HOSTDATA(shpnt)->count_trans[i], 2899 HOSTDATA(shpnt)->count_trans[i],
3418 HOSTDATA(shpnt)->count[i], 2900 HOSTDATA(shpnt)->count[i],
3419 HOSTDATA(shpnt)->time[i]); 2901 HOSTDATA(shpnt)->time[i]);
@@ -3671,25 +3153,19 @@ static int __init aha152x_init(void)
3671 setup[setup_count].synchronous = aha152x[5]; 3153 setup[setup_count].synchronous = aha152x[5];
3672 setup[setup_count].delay = aha152x[6]; 3154 setup[setup_count].delay = aha152x[6];
3673 setup[setup_count].ext_trans = aha152x[7]; 3155 setup[setup_count].ext_trans = aha152x[7];
3674#if defined(AHA152X_DEBUG) 3156 } else if (io[0] != 0 || irq[0] != 0) {
3675 setup[setup_count].debug = aha152x[8];
3676#endif
3677 } else if(io[0]!=0 || irq[0]!=0) {
3678 if(io[0]!=0) setup[setup_count].io_port = io[0]; 3157 if(io[0]!=0) setup[setup_count].io_port = io[0];
3679 if(irq[0]!=0) setup[setup_count].irq = irq[0]; 3158 if(irq[0]!=0) setup[setup_count].irq = irq[0];
3680 3159
3681 setup[setup_count].scsiid = scsiid[0]; 3160 setup[setup_count].scsiid = scsiid[0];
3682 setup[setup_count].reconnect = reconnect[0]; 3161 setup[setup_count].reconnect = reconnect[0];
3683 setup[setup_count].parity = parity[0]; 3162 setup[setup_count].parity = parity[0];
3684 setup[setup_count].synchronous = sync[0]; 3163 setup[setup_count].synchronous = sync[0];
3685 setup[setup_count].delay = delay[0]; 3164 setup[setup_count].delay = delay[0];
3686 setup[setup_count].ext_trans = exttrans[0]; 3165 setup[setup_count].ext_trans = exttrans[0];
3687#if defined(AHA152X_DEBUG)
3688 setup[setup_count].debug = debug[0];
3689#endif
3690 } 3166 }
3691 3167
3692 if (checksetup(&setup[setup_count])) 3168 if (checksetup(&setup[setup_count]))
3693 setup_count++; 3169 setup_count++;
3694 else 3170 else
3695 printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n", 3171 printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n",
@@ -3714,22 +3190,16 @@ static int __init aha152x_init(void)
3714 setup[setup_count].synchronous = aha152x1[5]; 3190 setup[setup_count].synchronous = aha152x1[5];
3715 setup[setup_count].delay = aha152x1[6]; 3191 setup[setup_count].delay = aha152x1[6];
3716 setup[setup_count].ext_trans = aha152x1[7]; 3192 setup[setup_count].ext_trans = aha152x1[7];
3717#if defined(AHA152X_DEBUG) 3193 } else if (io[1] != 0 || irq[1] != 0) {
3718 setup[setup_count].debug = aha152x1[8];
3719#endif
3720 } else if(io[1]!=0 || irq[1]!=0) {
3721 if(io[1]!=0) setup[setup_count].io_port = io[1]; 3194 if(io[1]!=0) setup[setup_count].io_port = io[1];
3722 if(irq[1]!=0) setup[setup_count].irq = irq[1]; 3195 if(irq[1]!=0) setup[setup_count].irq = irq[1];
3723 3196
3724 setup[setup_count].scsiid = scsiid[1]; 3197 setup[setup_count].scsiid = scsiid[1];
3725 setup[setup_count].reconnect = reconnect[1]; 3198 setup[setup_count].reconnect = reconnect[1];
3726 setup[setup_count].parity = parity[1]; 3199 setup[setup_count].parity = parity[1];
3727 setup[setup_count].synchronous = sync[1]; 3200 setup[setup_count].synchronous = sync[1];
3728 setup[setup_count].delay = delay[1]; 3201 setup[setup_count].delay = delay[1];
3729 setup[setup_count].ext_trans = exttrans[1]; 3202 setup[setup_count].ext_trans = exttrans[1];
3730#if defined(AHA152X_DEBUG)
3731 setup[setup_count].debug = debug[1];
3732#endif
3733 } 3203 }
3734 if (checksetup(&setup[setup_count])) 3204 if (checksetup(&setup[setup_count]))
3735 setup_count++; 3205 setup_count++;
@@ -3776,9 +3246,6 @@ static int __init aha152x_init(void)
3776 setup[setup_count].synchronous = 1; 3246 setup[setup_count].synchronous = 1;
3777 setup[setup_count].delay = DELAY_DEFAULT; 3247 setup[setup_count].delay = DELAY_DEFAULT;
3778 setup[setup_count].ext_trans = 0; 3248 setup[setup_count].ext_trans = 0;
3779#if defined(AHA152X_DEBUG)
3780 setup[setup_count].debug = DEBUG_DEFAULT;
3781#endif
3782#if defined(__ISAPNP__) 3249#if defined(__ISAPNP__)
3783 pnpdev[setup_count] = dev; 3250 pnpdev[setup_count] = dev;
3784#endif 3251#endif
@@ -3847,9 +3314,6 @@ static int __init aha152x_init(void)
3847 setup[setup_count].synchronous = conf.cf_syncneg; 3314 setup[setup_count].synchronous = conf.cf_syncneg;
3848 setup[setup_count].delay = DELAY_DEFAULT; 3315 setup[setup_count].delay = DELAY_DEFAULT;
3849 setup[setup_count].ext_trans = 0; 3316 setup[setup_count].ext_trans = 0;
3850#if defined(AHA152X_DEBUG)
3851 setup[setup_count].debug = DEBUG_DEFAULT;
3852#endif
3853 setup_count++; 3317 setup_count++;
3854 3318
3855 } 3319 }
@@ -3903,11 +3367,8 @@ module_exit(aha152x_exit);
3903#if !defined(MODULE) 3367#if !defined(MODULE)
3904static int __init aha152x_setup(char *str) 3368static int __init aha152x_setup(char *str)
3905{ 3369{
3906#if defined(AHA152X_DEBUG)
3907 int ints[11];
3908#else
3909 int ints[10]; 3370 int ints[10];
3910#endif 3371
3911 get_options(str, ARRAY_SIZE(ints), ints); 3372 get_options(str, ARRAY_SIZE(ints), ints);
3912 3373
3913 if(setup_count>=ARRAY_SIZE(setup)) { 3374 if(setup_count>=ARRAY_SIZE(setup)) {
@@ -3924,16 +3385,9 @@ static int __init aha152x_setup(char *str)
3924 setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; 3385 setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
3925 setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; 3386 setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
3926 setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; 3387 setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
3927#if defined(AHA152X_DEBUG)
3928 setup[setup_count].debug = ints[0] >= 9 ? ints[9] : DEBUG_DEFAULT;
3929 if (ints[0] > 9) {
3930 printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
3931 "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>[,<DEBUG>]]]]]]]]\n");
3932#else
3933 if (ints[0] > 8) { /*}*/ 3388 if (ints[0] > 8) { /*}*/
3934 printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>" 3389 printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
3935 "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n"); 3390 "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
3936#endif
3937 } else { 3391 } else {
3938 setup_count++; 3392 setup_count++;
3939 return 0; 3393 return 0;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 5f3101797c93..31ace4bef8fe 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
531 * quiet as possible... 531 * quiet as possible...
532 */ 532 */
533 533
534 return 0; 534 return SUCCESS;
535} 535}
536 536
537static struct scsi_host_template aha1740_template = { 537static struct scsi_host_template aha1740_template = {
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index ed333669a7dc..d5c7b193d8d3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -925,6 +925,7 @@ struct scsi_host_template aic79xx_driver_template = {
925 .slave_configure = ahd_linux_slave_configure, 925 .slave_configure = ahd_linux_slave_configure,
926 .target_alloc = ahd_linux_target_alloc, 926 .target_alloc = ahd_linux_target_alloc,
927 .target_destroy = ahd_linux_target_destroy, 927 .target_destroy = ahd_linux_target_destroy,
928 .use_blk_tags = 1,
928}; 929};
929 930
930/******************************** Bus DMA *************************************/ 931/******************************** Bus DMA *************************************/
@@ -1468,12 +1469,9 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1468 1469
1469 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) { 1470 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
1470 case AHD_DEV_Q_BASIC: 1471 case AHD_DEV_Q_BASIC:
1471 scsi_set_tag_type(sdev, MSG_SIMPLE_TASK);
1472 scsi_activate_tcq(sdev, dev->openings + dev->active);
1473 break;
1474 case AHD_DEV_Q_TAGGED: 1472 case AHD_DEV_Q_TAGGED:
1475 scsi_set_tag_type(sdev, MSG_ORDERED_TASK); 1473 scsi_change_queue_depth(sdev,
1476 scsi_activate_tcq(sdev, dev->openings + dev->active); 1474 dev->openings + dev->active);
1477 break; 1475 break;
1478 default: 1476 default:
1479 /* 1477 /*
@@ -1482,7 +1480,7 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1482 * serially on the controller/device. This should 1480 * serially on the controller/device. This should
1483 * remove some latency. 1481 * remove some latency.
1484 */ 1482 */
1485 scsi_deactivate_tcq(sdev, 1); 1483 scsi_change_queue_depth(sdev, 1);
1486 break; 1484 break;
1487 } 1485 }
1488} 1486}
@@ -1619,15 +1617,6 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1619 } 1617 }
1620 1618
1621 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { 1619 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
1622 int msg_bytes;
1623 uint8_t tag_msgs[2];
1624
1625 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
1626 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
1627 hscb->control |= tag_msgs[0];
1628 if (tag_msgs[0] == MSG_ORDERED_TASK)
1629 dev->commands_since_idle_or_otag = 0;
1630 } else
1631 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH 1620 if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
1632 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) { 1621 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
1633 hscb->control |= MSG_ORDERED_TASK; 1622 hscb->control |= MSG_ORDERED_TASK;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d2c9bf39033d..88360116dbcb 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -812,6 +812,7 @@ struct scsi_host_template aic7xxx_driver_template = {
812 .slave_configure = ahc_linux_slave_configure, 812 .slave_configure = ahc_linux_slave_configure,
813 .target_alloc = ahc_linux_target_alloc, 813 .target_alloc = ahc_linux_target_alloc,
814 .target_destroy = ahc_linux_target_destroy, 814 .target_destroy = ahc_linux_target_destroy,
815 .use_blk_tags = 1,
815}; 816};
816 817
817/**************************** Tasklet Handler *********************************/ 818/**************************** Tasklet Handler *********************************/
@@ -1334,13 +1335,9 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
1334 } 1335 }
1335 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { 1336 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
1336 case AHC_DEV_Q_BASIC: 1337 case AHC_DEV_Q_BASIC:
1337 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1338 scsi_activate_tcq(sdev, dev->openings + dev->active);
1339 break;
1340 case AHC_DEV_Q_TAGGED: 1338 case AHC_DEV_Q_TAGGED:
1341 scsi_set_tag_type(sdev, MSG_ORDERED_TAG); 1339 scsi_change_queue_depth(sdev,
1342 scsi_activate_tcq(sdev, dev->openings + dev->active); 1340 dev->openings + dev->active);
1343 break;
1344 default: 1341 default:
1345 /* 1342 /*
1346 * We allow the OS to queue 2 untagged transactions to 1343 * We allow the OS to queue 2 untagged transactions to
@@ -1348,7 +1345,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
1348 * serially on the controller/device. This should 1345 * serially on the controller/device. This should
1349 * remove some latency. 1346 * remove some latency.
1350 */ 1347 */
1351 scsi_deactivate_tcq(sdev, 2); 1348 scsi_change_queue_depth(sdev, 2);
1352 break; 1349 break;
1353 } 1350 }
1354} 1351}
@@ -1447,7 +1444,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1447 * we are storing a full busy target *lun* 1444 * we are storing a full busy target *lun*
1448 * table in SCB space. 1445 * table in SCB space.
1449 */ 1446 */
1450 if (!blk_rq_tagged(cmd->request) 1447 if (!(cmd->flags & SCMD_TAGGED)
1451 && (ahc->features & AHC_SCB_BTT) == 0) { 1448 && (ahc->features & AHC_SCB_BTT) == 0) {
1452 int target_offset; 1449 int target_offset;
1453 1450
@@ -1501,15 +1498,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1501 } 1498 }
1502 1499
1503 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { 1500 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
1504 int msg_bytes; 1501 if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
1505 uint8_t tag_msgs[2];
1506
1507 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
1508 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
1509 hscb->control |= tag_msgs[0];
1510 if (tag_msgs[0] == MSG_ORDERED_TASK)
1511 dev->commands_since_idle_or_otag = 0;
1512 } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
1513 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { 1502 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
1514 hscb->control |= MSG_ORDERED_TASK; 1503 hscb->control |= MSG_ORDERED_TASK;
1515 dev->commands_since_idle_or_otag = 0; 1504 dev->commands_since_idle_or_otag = 0;
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 66cda669b417..26d4ad9ede2e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -78,7 +78,7 @@ void asd_dev_gone(struct domain_device *dev);
78 78
79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); 79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
80 80
81int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags); 81int asd_execute_task(struct sas_task *task, gfp_t gfp_flags);
82 82
83void asd_set_dmamode(struct domain_device *dev); 83void asd_set_dmamode(struct domain_device *dev);
84 84
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 4df867e07b20..9f636a34d595 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1200,8 +1200,7 @@ static void asd_start_scb_timers(struct list_head *list)
1200 * Case A: we can send the whole batch at once. Increment "pending" 1200 * Case A: we can send the whole batch at once. Increment "pending"
1201 * in the beginning of this function, when it is checked, in order to 1201 * in the beginning of this function, when it is checked, in order to
1202 * eliminate races when this function is called by multiple processes. 1202 * eliminate races when this function is called by multiple processes.
1203 * Case B: should never happen if the managing layer considers 1203 * Case B: should never happen.
1204 * lldd_queue_size.
1205 */ 1204 */
1206int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, 1205int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1207 int num) 1206 int num)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index c56741fc4b99..14fc018436c2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -49,14 +49,6 @@ MODULE_PARM_DESC(use_msi, "\n"
49 "\tEnable(1) or disable(0) using PCI MSI.\n" 49 "\tEnable(1) or disable(0) using PCI MSI.\n"
50 "\tDefault: 0"); 50 "\tDefault: 0");
51 51
52static int lldd_max_execute_num = 0;
53module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
54MODULE_PARM_DESC(collector, "\n"
55 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
56 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
57 "\tThe aic94xx SAS LLDD supports both modes.\n"
58 "\tDefault: 0 (Direct Mode).\n");
59
60static struct scsi_transport_template *aic94xx_transport_template; 52static struct scsi_transport_template *aic94xx_transport_template;
61static int asd_scan_finished(struct Scsi_Host *, unsigned long); 53static int asd_scan_finished(struct Scsi_Host *, unsigned long);
62static void asd_scan_start(struct Scsi_Host *); 54static void asd_scan_start(struct Scsi_Host *);
@@ -83,6 +75,8 @@ static struct scsi_host_template aic94xx_sht = {
83 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 75 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
84 .target_destroy = sas_target_destroy, 76 .target_destroy = sas_target_destroy,
85 .ioctl = sas_ioctl, 77 .ioctl = sas_ioctl,
78 .use_blk_tags = 1,
79 .track_queue_depth = 1,
86}; 80};
87 81
88static int asd_map_memio(struct asd_ha_struct *asd_ha) 82static int asd_map_memio(struct asd_ha_struct *asd_ha)
@@ -709,9 +703,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
709 asd_ha->sas_ha.sas_port= sas_ports; 703 asd_ha->sas_ha.sas_port= sas_ports;
710 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; 704 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
711 705
712 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
713 asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num;
714
715 return sas_register_ha(&asd_ha->sas_ha); 706 return sas_register_ha(&asd_ha->sas_ha);
716} 707}
717 708
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 59b86e260ce9..5ff1ce7ba1f4 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -543,8 +543,7 @@ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
543 return res; 543 return res;
544} 544}
545 545
546int asd_execute_task(struct sas_task *task, const int num, 546int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
547 gfp_t gfp_flags)
548{ 547{
549 int res = 0; 548 int res = 0;
550 LIST_HEAD(alist); 549 LIST_HEAD(alist);
@@ -553,11 +552,11 @@ int asd_execute_task(struct sas_task *task, const int num,
553 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 552 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
554 unsigned long flags; 553 unsigned long flags;
555 554
556 res = asd_can_queue(asd_ha, num); 555 res = asd_can_queue(asd_ha, 1);
557 if (res) 556 if (res)
558 return res; 557 return res;
559 558
560 res = num; 559 res = 1;
561 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); 560 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
562 if (res) { 561 if (res) {
563 res = -ENOMEM; 562 res = -ENOMEM;
@@ -568,7 +567,7 @@ int asd_execute_task(struct sas_task *task, const int num,
568 list_for_each_entry(a, &alist, list) { 567 list_for_each_entry(a, &alist, list) {
569 a->uldd_task = t; 568 a->uldd_task = t;
570 t->lldd_task = a; 569 t->lldd_task = a;
571 t = list_entry(t->list.next, struct sas_task, list); 570 break;
572 } 571 }
573 list_for_each_entry(a, &alist, list) { 572 list_for_each_entry(a, &alist, list) {
574 t = a->uldd_task; 573 t = a->uldd_task;
@@ -601,7 +600,7 @@ int asd_execute_task(struct sas_task *task, const int num,
601 } 600 }
602 list_del_init(&alist); 601 list_del_init(&alist);
603 602
604 res = asd_post_ascb_list(asd_ha, ascb, num); 603 res = asd_post_ascb_list(asd_ha, ascb, 1);
605 if (unlikely(res)) { 604 if (unlikely(res)) {
606 a = NULL; 605 a = NULL;
607 __list_add(&alist, ascb->list.prev, &ascb->list); 606 __list_add(&alist, ascb->list.prev, &ascb->list);
@@ -639,6 +638,6 @@ out_err_unmap:
639out_err: 638out_err:
640 if (ascb) 639 if (ascb)
641 asd_ascb_free_list(ascb); 640 asd_ascb_free_list(ascb);
642 asd_can_dequeue(asd_ha, num); 641 asd_can_dequeue(asd_ha, 1);
643 return res; 642 return res;
644} 643}
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
new file mode 100644
index 000000000000..aa3e2c7cd83c
--- /dev/null
+++ b/drivers/scsi/am53c974.c
@@ -0,0 +1,586 @@
1/*
2 * AMD am53c974 driver.
3 * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/delay.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12
13#include <scsi/scsi_host.h>
14
15#include "esp_scsi.h"
16
17#define DRV_MODULE_NAME "am53c974"
18#define DRV_MODULE_VERSION "1.00"
19
20static bool am53c974_debug;
21static bool am53c974_fenab = true;
22
23#define esp_dma_log(f, a...) \
24 do { \
25 if (am53c974_debug) \
26 shost_printk(KERN_DEBUG, esp->host, f, ##a); \
27 } while (0)
28
29#define ESP_DMA_CMD 0x10
30#define ESP_DMA_STC 0x11
31#define ESP_DMA_SPA 0x12
32#define ESP_DMA_WBC 0x13
33#define ESP_DMA_WAC 0x14
34#define ESP_DMA_STATUS 0x15
35#define ESP_DMA_SMDLA 0x16
36#define ESP_DMA_WMAC 0x17
37
38#define ESP_DMA_CMD_IDLE 0x00
39#define ESP_DMA_CMD_BLAST 0x01
40#define ESP_DMA_CMD_ABORT 0x02
41#define ESP_DMA_CMD_START 0x03
42#define ESP_DMA_CMD_MASK 0x03
43#define ESP_DMA_CMD_DIAG 0x04
44#define ESP_DMA_CMD_MDL 0x10
45#define ESP_DMA_CMD_INTE_P 0x20
46#define ESP_DMA_CMD_INTE_D 0x40
47#define ESP_DMA_CMD_DIR 0x80
48
49#define ESP_DMA_STAT_PWDN 0x01
50#define ESP_DMA_STAT_ERROR 0x02
51#define ESP_DMA_STAT_ABORT 0x04
52#define ESP_DMA_STAT_DONE 0x08
53#define ESP_DMA_STAT_SCSIINT 0x10
54#define ESP_DMA_STAT_BCMPLT 0x20
55
56/* EEPROM is accessed with 16-bit values */
57#define DC390_EEPROM_READ 0x80
58#define DC390_EEPROM_LEN 0x40
59
60/*
61 * DC390 EEPROM
62 *
63 * 8 * 4 bytes of per-device options
64 * followed by HBA specific options
65 */
66
67/* Per-device options */
68#define DC390_EE_MODE1 0x00
69#define DC390_EE_SPEED 0x01
70
71/* HBA-specific options */
72#define DC390_EE_ADAPT_SCSI_ID 0x40
73#define DC390_EE_MODE2 0x41
74#define DC390_EE_DELAY 0x42
75#define DC390_EE_TAG_CMD_NUM 0x43
76
77#define DC390_EE_MODE1_PARITY_CHK 0x01
78#define DC390_EE_MODE1_SYNC_NEGO 0x02
79#define DC390_EE_MODE1_EN_DISC 0x04
80#define DC390_EE_MODE1_SEND_START 0x08
81#define DC390_EE_MODE1_TCQ 0x10
82
83#define DC390_EE_MODE2_MORE_2DRV 0x01
84#define DC390_EE_MODE2_GREATER_1G 0x02
85#define DC390_EE_MODE2_RST_SCSI_BUS 0x04
86#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
87#define DC390_EE_MODE2_NO_SEEK 0x10
88#define DC390_EE_MODE2_LUN_CHECK 0x20
89
90struct pci_esp_priv {
91 struct esp *esp;
92 u8 dma_status;
93};
94
95static void pci_esp_dma_drain(struct esp *esp);
96
97static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
98{
99 struct pci_dev *pdev = esp->dev;
100
101 return pci_get_drvdata(pdev);
102}
103
104static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
105{
106 iowrite8(val, esp->regs + (reg * 4UL));
107}
108
109static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
110{
111 return ioread8(esp->regs + (reg * 4UL));
112}
113
114static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
115{
116 return iowrite32(val, esp->regs + (reg * 4UL));
117}
118
119static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
120 size_t sz, int dir)
121{
122 return pci_map_single(esp->dev, buf, sz, dir);
123}
124
125static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
126 int num_sg, int dir)
127{
128 return pci_map_sg(esp->dev, sg, num_sg, dir);
129}
130
131static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
132 size_t sz, int dir)
133{
134 pci_unmap_single(esp->dev, addr, sz, dir);
135}
136
137static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
138 int num_sg, int dir)
139{
140 pci_unmap_sg(esp->dev, sg, num_sg, dir);
141}
142
143static int pci_esp_irq_pending(struct esp *esp)
144{
145 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
146
147 pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
148 esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
149
150 if (pep->dma_status & (ESP_DMA_STAT_ERROR |
151 ESP_DMA_STAT_ABORT |
152 ESP_DMA_STAT_DONE |
153 ESP_DMA_STAT_SCSIINT))
154 return 1;
155
156 return 0;
157}
158
159static void pci_esp_reset_dma(struct esp *esp)
160{
161 /* Nothing to do ? */
162}
163
164static void pci_esp_dma_drain(struct esp *esp)
165{
166 u8 resid;
167 int lim = 1000;
168
169
170 if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
171 (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
172 /* Data-In or Data-Out, nothing to be done */
173 return;
174
175 while (--lim > 0) {
176 resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
177 if (resid <= 1)
178 break;
179 cpu_relax();
180 }
181 if (resid > 1) {
182 /* FIFO not cleared */
183 shost_printk(KERN_INFO, esp->host,
184 "FIFO not cleared, %d bytes left\n",
185 resid);
186 }
187
188 /*
189 * When there is a residual BCMPLT will never be set
190 * (obviously). But we still have to issue the BLAST
191 * command, otherwise the data will not being transferred.
192 * But we'll never know when the BLAST operation is
193 * finished. So check for some time and give up eventually.
194 */
195 lim = 1000;
196 pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
197 while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
198 if (--lim == 0)
199 break;
200 cpu_relax();
201 }
202 pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
203 esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
204 /* BLAST residual handling is currently untested */
205 if (WARN_ON_ONCE(resid == 1)) {
206 struct esp_cmd_entry *ent = esp->active_cmd;
207
208 ent->flags |= ESP_CMD_FLAG_RESIDUAL;
209 }
210}
211
212static void pci_esp_dma_invalidate(struct esp *esp)
213{
214 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
215
216 esp_dma_log("invalidate DMA\n");
217
218 pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
219 pep->dma_status = 0;
220}
221
222static int pci_esp_dma_error(struct esp *esp)
223{
224 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
225
226 if (pep->dma_status & ESP_DMA_STAT_ERROR) {
227 u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
228
229 if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
230 pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
231
232 return 1;
233 }
234 if (pep->dma_status & ESP_DMA_STAT_ABORT) {
235 pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
236 pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
237 return 1;
238 }
239 return 0;
240}
241
242static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
243 u32 dma_count, int write, u8 cmd)
244{
245 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
246 u32 val = 0;
247
248 BUG_ON(!(cmd & ESP_CMD_DMA));
249
250 pep->dma_status = 0;
251
252 /* Set DMA engine to IDLE */
253 if (write)
254 /* DMA write direction logic is inverted */
255 val |= ESP_DMA_CMD_DIR;
256 pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
257
258 pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
259 pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
260 if (esp->config2 & ESP_CONFIG2_FENAB)
261 pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
262
263 pci_esp_write32(esp, esp_count, ESP_DMA_STC);
264 pci_esp_write32(esp, addr, ESP_DMA_SPA);
265
266 esp_dma_log("start dma addr[%x] count[%d:%d]\n",
267 addr, esp_count, dma_count);
268
269 scsi_esp_cmd(esp, cmd);
270 /* Send DMA Start command */
271 pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
272}
273
274static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
275{
276 int dma_limit = 16;
277 u32 base, end;
278
279 /*
280 * If CONFIG2_FENAB is set we can
281 * handle up to 24 bit addresses
282 */
283 if (esp->config2 & ESP_CONFIG2_FENAB)
284 dma_limit = 24;
285
286 if (dma_len > (1U << dma_limit))
287 dma_len = (1U << dma_limit);
288
289 /*
290 * Prevent crossing a 24-bit address boundary.
291 */
292 base = dma_addr & ((1U << 24) - 1U);
293 end = base + dma_len;
294 if (end > (1U << 24))
295 end = (1U <<24);
296 dma_len = end - base;
297
298 return dma_len;
299}
300
301static const struct esp_driver_ops pci_esp_ops = {
302 .esp_write8 = pci_esp_write8,
303 .esp_read8 = pci_esp_read8,
304 .map_single = pci_esp_map_single,
305 .map_sg = pci_esp_map_sg,
306 .unmap_single = pci_esp_unmap_single,
307 .unmap_sg = pci_esp_unmap_sg,
308 .irq_pending = pci_esp_irq_pending,
309 .reset_dma = pci_esp_reset_dma,
310 .dma_drain = pci_esp_dma_drain,
311 .dma_invalidate = pci_esp_dma_invalidate,
312 .send_dma_cmd = pci_esp_send_dma_cmd,
313 .dma_error = pci_esp_dma_error,
314 .dma_length_limit = pci_esp_dma_length_limit,
315};
316
317/*
318 * Read DC-390 eeprom
319 */
320static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
321{
322 u8 carry_flag = 1, j = 0x80, bval;
323 int i;
324
325 for (i = 0; i < 9; i++) {
326 if (carry_flag) {
327 pci_write_config_byte(pdev, 0x80, 0x40);
328 bval = 0xc0;
329 } else
330 bval = 0x80;
331
332 udelay(160);
333 pci_write_config_byte(pdev, 0x80, bval);
334 udelay(160);
335 pci_write_config_byte(pdev, 0x80, 0);
336 udelay(160);
337
338 carry_flag = (cmd & j) ? 1 : 0;
339 j >>= 1;
340 }
341}
342
343static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
344{
345 int i;
346 u16 wval = 0;
347 u8 bval;
348
349 for (i = 0; i < 16; i++) {
350 wval <<= 1;
351
352 pci_write_config_byte(pdev, 0x80, 0x80);
353 udelay(160);
354 pci_write_config_byte(pdev, 0x80, 0x40);
355 udelay(160);
356 pci_read_config_byte(pdev, 0x00, &bval);
357
358 if (bval == 0x22)
359 wval |= 1;
360 }
361
362 return wval;
363}
364
365static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
366{
367 u8 cmd = DC390_EEPROM_READ, i;
368
369 for (i = 0; i < DC390_EEPROM_LEN; i++) {
370 pci_write_config_byte(pdev, 0xc0, 0);
371 udelay(160);
372
373 dc390_eeprom_prepare_read(pdev, cmd++);
374 *ptr++ = dc390_eeprom_get_data(pdev);
375
376 pci_write_config_byte(pdev, 0x80, 0);
377 pci_write_config_byte(pdev, 0x80, 0);
378 udelay(160);
379 }
380}
381
382static void dc390_check_eeprom(struct esp *esp)
383{
384 u8 EEbuf[128];
385 u16 *ptr = (u16 *)EEbuf, wval = 0;
386 int i;
387
388 dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
389
390 for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
391 wval += *ptr;
392
393 /* no Tekram EEprom found */
394 if (wval != 0x1234) {
395 struct pci_dev *pdev = esp->dev;
396 dev_printk(KERN_INFO, &pdev->dev,
397 "No valid Tekram EEprom found\n");
398 return;
399 }
400 esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
401 esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
402 if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
403 esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
404}
405
406static int pci_esp_probe_one(struct pci_dev *pdev,
407 const struct pci_device_id *id)
408{
409 struct scsi_host_template *hostt = &scsi_esp_template;
410 int err = -ENODEV;
411 struct Scsi_Host *shost;
412 struct esp *esp;
413 struct pci_esp_priv *pep;
414
415 if (pci_enable_device(pdev)) {
416 dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
417 return -ENODEV;
418 }
419
420 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
421 dev_printk(KERN_INFO, &pdev->dev,
422 "failed to set 32bit DMA mask\n");
423 goto fail_disable_device;
424 }
425
426 shost = scsi_host_alloc(hostt, sizeof(struct esp));
427 if (!shost) {
428 dev_printk(KERN_INFO, &pdev->dev,
429 "failed to allocate scsi host\n");
430 err = -ENOMEM;
431 goto fail_disable_device;
432 }
433
434 pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
435 if (!pep) {
436 dev_printk(KERN_INFO, &pdev->dev,
437 "failed to allocate esp_priv\n");
438 err = -ENOMEM;
439 goto fail_host_alloc;
440 }
441
442 esp = shost_priv(shost);
443 esp->host = shost;
444 esp->dev = pdev;
445 esp->ops = &pci_esp_ops;
446 /*
447 * The am53c974 HBA has a design flaw of generating
448 * spurious DMA completion interrupts when using
449 * DMA for command submission.
450 */
451 esp->flags |= ESP_FLAG_USE_FIFO;
452 /*
453 * Enable CONFIG2_FENAB to allow for large DMA transfers
454 */
455 if (am53c974_fenab)
456 esp->config2 |= ESP_CONFIG2_FENAB;
457
458 pep->esp = esp;
459
460 if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
461 dev_printk(KERN_ERR, &pdev->dev,
462 "pci memory selection failed\n");
463 goto fail_priv_alloc;
464 }
465
466 esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
467 if (!esp->regs) {
468 dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
469 err = -EINVAL;
470 goto fail_release_regions;
471 }
472 esp->dma_regs = esp->regs;
473
474 pci_set_master(pdev);
475
476 esp->command_block = pci_alloc_consistent(pdev, 16,
477 &esp->command_block_dma);
478 if (!esp->command_block) {
479 dev_printk(KERN_ERR, &pdev->dev,
480 "failed to allocate command block\n");
481 err = -ENOMEM;
482 goto fail_unmap_regs;
483 }
484
485 err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
486 DRV_MODULE_NAME, esp);
487 if (err < 0) {
488 dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
489 goto fail_unmap_command_block;
490 }
491
492 esp->scsi_id = 7;
493 dc390_check_eeprom(esp);
494
495 shost->this_id = esp->scsi_id;
496 shost->max_id = 8;
497 shost->irq = pdev->irq;
498 shost->io_port = pci_resource_start(pdev, 0);
499 shost->n_io_port = pci_resource_len(pdev, 0);
500 shost->unique_id = shost->io_port;
501 esp->scsi_id_mask = (1 << esp->scsi_id);
502 /* Assume 40MHz clock */
503 esp->cfreq = 40000000;
504
505 pci_set_drvdata(pdev, pep);
506
507 err = scsi_esp_register(esp, &pdev->dev);
508 if (err)
509 goto fail_free_irq;
510
511 return 0;
512
513fail_free_irq:
514 free_irq(pdev->irq, esp);
515fail_unmap_command_block:
516 pci_free_consistent(pdev, 16, esp->command_block,
517 esp->command_block_dma);
518fail_unmap_regs:
519 pci_iounmap(pdev, esp->regs);
520fail_release_regions:
521 pci_release_regions(pdev);
522fail_priv_alloc:
523 kfree(pep);
524fail_host_alloc:
525 scsi_host_put(shost);
526fail_disable_device:
527 pci_disable_device(pdev);
528
529 return err;
530}
531
532static void pci_esp_remove_one(struct pci_dev *pdev)
533{
534 struct pci_esp_priv *pep = pci_get_drvdata(pdev);
535 struct esp *esp = pep->esp;
536
537 scsi_esp_unregister(esp);
538 free_irq(pdev->irq, esp);
539 pci_free_consistent(pdev, 16, esp->command_block,
540 esp->command_block_dma);
541 pci_iounmap(pdev, esp->regs);
542 pci_release_regions(pdev);
543 pci_disable_device(pdev);
544 kfree(pep);
545
546 scsi_host_put(esp->host);
547}
548
549static struct pci_device_id am53c974_pci_tbl[] = {
550 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
551 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
552 { }
553};
554MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
555
556static struct pci_driver am53c974_driver = {
557 .name = DRV_MODULE_NAME,
558 .id_table = am53c974_pci_tbl,
559 .probe = pci_esp_probe_one,
560 .remove = pci_esp_remove_one,
561};
562
563static int __init am53c974_module_init(void)
564{
565 return pci_register_driver(&am53c974_driver);
566}
567
568static void __exit am53c974_module_exit(void)
569{
570 pci_unregister_driver(&am53c974_driver);
571}
572
573MODULE_DESCRIPTION("AM53C974 SCSI driver");
574MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
575MODULE_LICENSE("GPL");
576MODULE_VERSION(DRV_MODULE_VERSION);
577MODULE_ALIAS("tmscsim");
578
579module_param(am53c974_debug, bool, 0644);
580MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
581
582module_param(am53c974_fenab, bool, 0444);
583MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
584
585module_init(am53c974_module_init);
586module_exit(am53c974_module_exit);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0b44fb5ee485..914c39f9f388 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -114,16 +114,11 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
114static const char *arcmsr_info(struct Scsi_Host *); 114static const char *arcmsr_info(struct Scsi_Host *);
115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
117static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 117static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
118 int queue_depth, int reason)
119{ 118{
120 if (reason != SCSI_QDEPTH_DEFAULT)
121 return -EOPNOTSUPP;
122
123 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 119 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
124 queue_depth = ARCMSR_MAX_CMD_PERLUN; 120 queue_depth = ARCMSR_MAX_CMD_PERLUN;
125 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 121 return scsi_change_queue_depth(sdev, queue_depth);
126 return queue_depth;
127} 122}
128 123
129static struct scsi_host_template arcmsr_scsi_host_template = { 124static struct scsi_host_template arcmsr_scsi_host_template = {
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d89b9b4deb3c..deaaf84989cd 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -850,13 +850,13 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
850 break; 850 break;
851 851
852 default: 852 default:
853 printk(KERN_ERR "scsi%d.H: incomplete data transfer detected: result=%08X command=", 853 scmd_printk(KERN_ERR, SCpnt,
854 host->host->host_no, SCpnt->result); 854 "incomplete data transfer detected: "
855 __scsi_print_command(SCpnt->cmnd); 855 "result=%08X", SCpnt->result);
856 scsi_print_command(SCpnt);
856 acornscsi_dumpdma(host, "done"); 857 acornscsi_dumpdma(host, "done");
857 acornscsi_dumplog(host, SCpnt->device->id); 858 acornscsi_dumplog(host, SCpnt->device->id);
858 SCpnt->result &= 0xffff; 859 set_host_byte(SCpnt, DID_ERROR);
859 SCpnt->result |= DID_ERROR << 16;
860 } 860 }
861 } 861 }
862 } 862 }
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8ef810a4476e..d28d6c0f18c0 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -13,16 +13,12 @@
13#include <asm/ecard.h> 13#include <asm/ecard.h>
14#include <asm/io.h> 14#include <asm/io.h>
15 15
16#include "../scsi.h"
17#include <scsi/scsi_host.h> 16#include <scsi/scsi_host.h>
18 17
19#include <scsi/scsicam.h> 18#include <scsi/scsicam.h>
20 19
21#define AUTOSENSE
22#define PSEUDO_DMA 20#define PSEUDO_DMA
23 21
24#define CUMANASCSI_PUBLIC_RELEASE 1
25
26#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 22#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
27#define NCR5380_local_declare() struct Scsi_Host *_instance 23#define NCR5380_local_declare() struct Scsi_Host *_instance
28#define NCR5380_setup(instance) _instance = instance 24#define NCR5380_setup(instance) _instance = instance
@@ -30,6 +26,7 @@
30#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value) 26#define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value)
31#define NCR5380_intr cumanascsi_intr 27#define NCR5380_intr cumanascsi_intr
32#define NCR5380_queue_command cumanascsi_queue_command 28#define NCR5380_queue_command cumanascsi_queue_command
29#define NCR5380_info cumanascsi_info
33 30
34#define NCR5380_implementation_fields \ 31#define NCR5380_implementation_fields \
35 unsigned ctrl; \ 32 unsigned ctrl; \
@@ -42,11 +39,6 @@ void cumanascsi_setup(char *str, int *ints)
42{ 39{
43} 40}
44 41
45const char *cumanascsi_info(struct Scsi_Host *spnt)
46{
47 return "";
48}
49
50#define CTRL 0x16fc 42#define CTRL 0x16fc
51#define STAT 0x2004 43#define STAT 0x2004
52#define L(v) (((v)<<16)|((v) & 0x0000ffff)) 44#define L(v) (((v)<<16)|((v) & 0x0000ffff))
@@ -267,14 +259,6 @@ static int cumanascsi1_probe(struct expansion_card *ec,
267 goto out_unmap; 259 goto out_unmap;
268 } 260 }
269 261
270 printk("scsi%d: at port 0x%08lx irq %d",
271 host->host_no, host->io_port, host->irq);
272 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
273 host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE);
274 printk("\nscsi%d:", host->host_no);
275 NCR5380_print_options(host);
276 printk("\n");
277
278 ret = scsi_add_host(host, &ec->dev); 262 ret = scsi_add_host(host, &ec->dev);
279 if (ret) 263 if (ret)
280 goto out_free_irq; 264 goto out_free_irq;
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 71cfb1e504c4..e64c3af7c1a0 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -308,8 +308,7 @@ static void fas216_log_command(FAS216_Info *info, int level,
308 fas216_do_log(info, '0' + SCpnt->device->id, fmt, args); 308 fas216_do_log(info, '0' + SCpnt->device->id, fmt, args);
309 va_end(args); 309 va_end(args);
310 310
311 printk(" CDB: "); 311 scsi_print_command(SCpnt);
312 __scsi_print_command(SCpnt->cmnd);
313} 312}
314 313
315static void 314static void
@@ -2079,14 +2078,12 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
2079 break; 2078 break;
2080 2079
2081 default: 2080 default:
2082 printk(KERN_ERR "scsi%d.%c: incomplete data transfer " 2081 scmd_printk(KERN_ERR, SCpnt,
2083 "detected: res=%08X ptr=%p len=%X CDB: ", 2082 "incomplete data transfer detected: res=%08X ptr=%p len=%X\n",
2084 info->host->host_no, '0' + SCpnt->device->id, 2083 SCpnt->result, info->scsi.SCp.ptr,
2085 SCpnt->result, info->scsi.SCp.ptr, 2084 info->scsi.SCp.this_residual);
2086 info->scsi.SCp.this_residual); 2085 scsi_print_command(SCpnt);
2087 __scsi_print_command(SCpnt->cmnd); 2086 set_host_byte(SCpnt, DID_ERROR);
2088 SCpnt->result &= ~(255 << 16);
2089 SCpnt->result |= DID_BAD_TARGET << 16;
2090 goto request_sense; 2087 goto request_sense;
2091 } 2088 }
2092 } 2089 }
@@ -2158,12 +2155,11 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
2158 * to transfer, we should not have a valid pointer. 2155 * to transfer, we should not have a valid pointer.
2159 */ 2156 */
2160 if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) { 2157 if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) {
2161 printk("scsi%d.%c: zero bytes left to transfer, but " 2158 scmd_printk(KERN_INFO, SCpnt,
2162 "buffer pointer still valid: ptr=%p len=%08x CDB: ", 2159 "zero bytes left to transfer, but buffer pointer still valid: ptr=%p len=%08x\n",
2163 info->host->host_no, '0' + SCpnt->device->id, 2160 info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
2164 info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
2165 info->scsi.SCp.ptr = NULL; 2161 info->scsi.SCp.ptr = NULL;
2166 __scsi_print_command(SCpnt->cmnd); 2162 scsi_print_command(SCpnt);
2167 } 2163 }
2168 2164
2169 /* 2165 /*
@@ -2427,14 +2423,11 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt)
2427 2423
2428 info->stats.aborts += 1; 2424 info->stats.aborts += 1;
2429 2425
2430 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2426 scmd_printk(KERN_WARNING, SCpnt, "abort command\n");
2431 __scsi_print_command(SCpnt->cmnd);
2432 2427
2433 print_debug_list(); 2428 print_debug_list();
2434 fas216_dumpstate(info); 2429 fas216_dumpstate(info);
2435 2430
2436 printk(KERN_WARNING "scsi%d: abort %p ", info->host->host_no, SCpnt);
2437
2438 switch (fas216_find_command(info, SCpnt)) { 2431 switch (fas216_find_command(info, SCpnt)) {
2439 /* 2432 /*
2440 * We found the command, and cleared it out. Either 2433 * We found the command, and cleared it out. Either
@@ -2442,7 +2435,7 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt)
2442 * target, or the busylun bit is not set. 2435 * target, or the busylun bit is not set.
2443 */ 2436 */
2444 case res_success: 2437 case res_success:
2445 printk("success\n"); 2438 scmd_printk(KERN_WARNING, SCpnt, "abort %p success\n", SCpnt);
2446 result = SUCCESS; 2439 result = SUCCESS;
2447 break; 2440 break;
2448 2441
@@ -2452,14 +2445,13 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt)
2452 * if the bus is free. 2445 * if the bus is free.
2453 */ 2446 */
2454 case res_hw_abort: 2447 case res_hw_abort:
2455
2456 2448
2457 /* 2449 /*
2458 * We are unable to abort the command for some reason. 2450 * We are unable to abort the command for some reason.
2459 */ 2451 */
2460 default: 2452 default:
2461 case res_failed: 2453 case res_failed:
2462 printk("failed\n"); 2454 scmd_printk(KERN_WARNING, SCpnt, "abort %p failed\n", SCpnt);
2463 break; 2455 break;
2464 } 2456 }
2465 2457
@@ -2664,8 +2656,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
2664 2656
2665 fas216_checkmagic(info); 2657 fas216_checkmagic(info);
2666 2658
2667 printk("scsi%d.%c: %s: resetting host\n", 2659 fas216_log(info, LOG_ERROR, "resetting host");
2668 info->host->host_no, '0' + SCpnt->device->id, __func__);
2669 2660
2670 /* 2661 /*
2671 * Reset the SCSI chip. 2662 * Reset the SCSI chip.
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 188e734c7ff0..7c6fa1479c9c 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -14,13 +14,9 @@
14#include <asm/ecard.h> 14#include <asm/ecard.h>
15#include <asm/io.h> 15#include <asm/io.h>
16 16
17#include "../scsi.h"
18#include <scsi/scsi_host.h> 17#include <scsi/scsi_host.h>
19 18
20#define AUTOSENSE
21/*#define PSEUDO_DMA*/ 19/*#define PSEUDO_DMA*/
22
23#define OAKSCSI_PUBLIC_RELEASE 1
24#define DONT_USE_INTR 20#define DONT_USE_INTR
25 21
26#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 22#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
@@ -29,10 +25,9 @@
29 25
30#define NCR5380_read(reg) readb(_base + ((reg) << 2)) 26#define NCR5380_read(reg) readb(_base + ((reg) << 2))
31#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2)) 27#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2))
32#define NCR5380_intr oakscsi_intr
33#define NCR5380_queue_command oakscsi_queue_command 28#define NCR5380_queue_command oakscsi_queue_command
29#define NCR5380_info oakscsi_info
34#define NCR5380_show_info oakscsi_show_info 30#define NCR5380_show_info oakscsi_show_info
35#define NCR5380_write_info oakscsi_write_info
36 31
37#define NCR5380_implementation_fields \ 32#define NCR5380_implementation_fields \
38 void __iomem *base 33 void __iomem *base
@@ -42,11 +37,6 @@
42#undef START_DMA_INITIATOR_RECEIVE_REG 37#undef START_DMA_INITIATOR_RECEIVE_REG
43#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7) 38#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7)
44 39
45const char * oakscsi_info (struct Scsi_Host *spnt)
46{
47 return "";
48}
49
50#define STAT ((128 + 16) << 2) 40#define STAT ((128 + 16) << 2)
51#define DATA ((128 + 8) << 2) 41#define DATA ((128 + 8) << 2)
52 42
@@ -114,7 +104,6 @@ printk("reading %p len %d\n", addr, len);
114static struct scsi_host_template oakscsi_template = { 104static struct scsi_host_template oakscsi_template = {
115 .module = THIS_MODULE, 105 .module = THIS_MODULE,
116 .show_info = oakscsi_show_info, 106 .show_info = oakscsi_show_info,
117 .write_info = oakscsi_write_info,
118 .name = "Oak 16-bit SCSI", 107 .name = "Oak 16-bit SCSI",
119 .info = oakscsi_info, 108 .info = oakscsi_info,
120 .queuecommand = oakscsi_queue_command, 109 .queuecommand = oakscsi_queue_command,
@@ -150,19 +139,11 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
150 goto unreg; 139 goto unreg;
151 } 140 }
152 141
153 host->irq = IRQ_NONE; 142 host->irq = NO_IRQ;
154 host->n_io_port = 255; 143 host->n_io_port = 255;
155 144
156 NCR5380_init(host, 0); 145 NCR5380_init(host, 0);
157 146
158 printk("scsi%d: at port 0x%08lx irqs disabled",
159 host->host_no, host->io_port);
160 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
161 host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE);
162 printk("\nscsi%d:", host->host_no);
163 NCR5380_print_options(host);
164 printk("\n");
165
166 ret = scsi_add_host(host, &ec->dev); 147 ret = scsi_add_host(host, &ec->dev);
167 if (ret) 148 if (ret)
168 goto out_unmap; 149 goto out_unmap;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 79e6f045c2a9..6daed6b386d4 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -11,8 +11,6 @@
11 * drew@colorado.edu 11 * drew@colorado.edu
12 * +1 (303) 666-5836 12 * +1 (303) 666-5836
13 * 13 *
14 * DISTRIBUTION RELEASE 6.
15 *
16 * For more information, please consult 14 * For more information, please consult
17 * 15 *
18 * NCR 5380 Family 16 * NCR 5380 Family
@@ -73,6 +71,9 @@
73 * 1. Test linked command handling code after Eric is ready with 71 * 1. Test linked command handling code after Eric is ready with
74 * the high level code. 72 * the high level code.
75 */ 73 */
74
75/* Adapted for the sun3 by Sam Creasey. */
76
76#include <scsi/scsi_dbg.h> 77#include <scsi/scsi_dbg.h>
77#include <scsi/scsi_transport_spi.h> 78#include <scsi/scsi_transport_spi.h>
78 79
@@ -103,27 +104,7 @@
103 104
104/* 105/*
105 * Design 106 * Design
106 * Issues :
107 *
108 * The other Linux SCSI drivers were written when Linux was Intel PC-only,
109 * and specifically for each board rather than each chip. This makes their
110 * adaptation to platforms like the Mac (Some of which use NCR5380's)
111 * more difficult than it has to be.
112 * 107 *
113 * Also, many of the SCSI drivers were written before the command queuing
114 * routines were implemented, meaning their implementations of queued
115 * commands were hacked on rather than designed in from the start.
116 *
117 * When I designed the Linux SCSI drivers I figured that
118 * while having two different SCSI boards in a system might be useful
119 * for debugging things, two of the same type wouldn't be used.
120 * Well, I was wrong and a number of users have mailed me about running
121 * multiple high-performance SCSI boards in a server.
122 *
123 * Finally, when I get questions from users, I have no idea what
124 * revision of my driver they are running.
125 *
126 * This driver attempts to address these problems :
127 * This is a generic 5380 driver. To use it on a different platform, 108 * This is a generic 5380 driver. To use it on a different platform,
128 * one simply writes appropriate system specific macros (ie, data 109 * one simply writes appropriate system specific macros (ie, data
129 * transfer - some PC's will use the I/O bus, 68K's must use 110 * transfer - some PC's will use the I/O bus, 68K's must use
@@ -138,17 +119,6 @@
138 * allowing multiple commands to propagate all the way to a SCSI-II device 119 * allowing multiple commands to propagate all the way to a SCSI-II device
139 * while a command is already executing. 120 * while a command is already executing.
140 * 121 *
141 * To solve the multiple-boards-in-the-same-system problem,
142 * there is a separate instance structure for each instance
143 * of a 5380 in the system. So, multiple NCR5380 drivers will
144 * be able to coexist with appropriate changes to the high level
145 * SCSI code.
146 *
147 * A NCR5380_PUBLIC_REVISION macro is provided, with the release
148 * number (updated for each public release) printed by the
149 * NCR5380_print_options command, which should be called from the
150 * wrapper detect function, so that I know what release of the driver
151 * users are using.
152 * 122 *
153 * Issues specific to the NCR5380 : 123 * Issues specific to the NCR5380 :
154 * 124 *
@@ -173,19 +143,17 @@
173 * Architecture : 143 * Architecture :
174 * 144 *
175 * At the heart of the design is a coroutine, NCR5380_main, 145 * At the heart of the design is a coroutine, NCR5380_main,
176 * which is started when not running by the interrupt handler, 146 * which is started from a workqueue for each NCR5380 host in the
177 * timer, and queue command function. It attempts to establish 147 * system. It attempts to establish I_T_L or I_T_L_Q nexuses by
178 * I_T_L or I_T_L_Q nexuses by removing the commands from the 148 * removing the commands from the issue queue and calling
179 * issue queue and calling NCR5380_select() if a nexus 149 * NCR5380_select() if a nexus is not established.
180 * is not established.
181 * 150 *
182 * Once a nexus is established, the NCR5380_information_transfer() 151 * Once a nexus is established, the NCR5380_information_transfer()
183 * phase goes through the various phases as instructed by the target. 152 * phase goes through the various phases as instructed by the target.
184 * if the target goes into MSG IN and sends a DISCONNECT message, 153 * if the target goes into MSG IN and sends a DISCONNECT message,
185 * the command structure is placed into the per instance disconnected 154 * the command structure is placed into the per instance disconnected
186 * queue, and NCR5380_main tries to find more work. If USLEEP 155 * queue, and NCR5380_main tries to find more work. If the target is
187 * was defined, and the target is idle for too long, the system 156 * idle for too long, the system will try to sleep.
188 * will try to sleep.
189 * 157 *
190 * If a command has disconnected, eventually an interrupt will trigger, 158 * If a command has disconnected, eventually an interrupt will trigger,
191 * calling NCR5380_intr() which will in turn call NCR5380_reselect 159 * calling NCR5380_intr() which will in turn call NCR5380_reselect
@@ -211,6 +179,9 @@
211 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically 179 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
212 * for commands that return with a CHECK CONDITION status. 180 * for commands that return with a CHECK CONDITION status.
213 * 181 *
182 * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
183 * transceivers.
184 *
214 * LINKED - if defined, linked commands are supported. 185 * LINKED - if defined, linked commands are supported.
215 * 186 *
216 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. 187 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
@@ -223,6 +194,9 @@
223 * 194 *
224 * NCR5380_write(register, value) - write to the specific register 195 * NCR5380_write(register, value) - write to the specific register
225 * 196 *
197 * NCR5380_implementation_fields - additional fields needed for this
198 * specific implementation of the NCR5380
199 *
226 * Either real DMA *or* pseudo DMA may be implemented 200 * Either real DMA *or* pseudo DMA may be implemented
227 * REAL functions : 201 * REAL functions :
228 * NCR5380_REAL_DMA should be defined if real DMA is to be used. 202 * NCR5380_REAL_DMA should be defined if real DMA is to be used.
@@ -241,40 +215,21 @@
241 * NCR5380_pwrite(instance, src, count) 215 * NCR5380_pwrite(instance, src, count)
242 * NCR5380_pread(instance, dst, count); 216 * NCR5380_pread(instance, dst, count);
243 * 217 *
244 * If nothing specific to this implementation needs doing (ie, with external
245 * hardware), you must also define
246 *
247 * NCR5380_queue_command
248 * NCR5380_reset
249 * NCR5380_abort
250 * NCR5380_proc_info
251 *
252 * to be the global entry points into the specific driver, ie
253 * #define NCR5380_queue_command t128_queue_command.
254 *
255 * If this is not done, the routines will be defined as static functions
256 * with the NCR5380* names and the user must provide a globally
257 * accessible wrapper function.
258 *
259 * The generic driver is initialized by calling NCR5380_init(instance), 218 * The generic driver is initialized by calling NCR5380_init(instance),
260 * after setting the appropriate host specific fields and ID. If the 219 * after setting the appropriate host specific fields and ID. If the
261 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, 220 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
262 * possible) function may be used. Before the specific driver initialization 221 * possible) function may be used.
263 * code finishes, NCR5380_print_options should be called.
264 */ 222 */
265 223
266static struct Scsi_Host *first_instance = NULL;
267static struct scsi_host_template *the_template = NULL;
268
269/* Macros ease life... :-) */ 224/* Macros ease life... :-) */
270#define SETUP_HOSTDATA(in) \ 225#define SETUP_HOSTDATA(in) \
271 struct NCR5380_hostdata *hostdata = \ 226 struct NCR5380_hostdata *hostdata = \
272 (struct NCR5380_hostdata *)(in)->hostdata 227 (struct NCR5380_hostdata *)(in)->hostdata
273#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) 228#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
274 229
275#define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble) 230#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
276#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) 231#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next))
277#define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble) 232#define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble)
278 233
279#define HOSTNO instance->host_no 234#define HOSTNO instance->host_no
280#define H_NO(cmd) (cmd)->device->host->host_no 235#define H_NO(cmd) (cmd)->device->host->host_no
@@ -316,30 +271,17 @@ static struct scsi_host_template *the_template = NULL;
316 * important: the tag bit must be cleared before 'nr_allocated' is decreased. 271 * important: the tag bit must be cleared before 'nr_allocated' is decreased.
317 */ 272 */
318 273
319/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ 274static void __init init_tags(struct NCR5380_hostdata *hostdata)
320#undef TAG_NONE
321#define TAG_NONE 0xff
322
323typedef struct {
324 DECLARE_BITMAP(allocated, MAX_TAGS);
325 int nr_allocated;
326 int queue_size;
327} TAG_ALLOC;
328
329static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */
330
331
332static void __init init_tags(void)
333{ 275{
334 int target, lun; 276 int target, lun;
335 TAG_ALLOC *ta; 277 struct tag_alloc *ta;
336 278
337 if (!setup_use_tagged_queuing) 279 if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
338 return; 280 return;
339 281
340 for (target = 0; target < 8; ++target) { 282 for (target = 0; target < 8; ++target) {
341 for (lun = 0; lun < 8; ++lun) { 283 for (lun = 0; lun < 8; ++lun) {
342 ta = &TagAlloc[target][lun]; 284 ta = &hostdata->TagAlloc[target][lun];
343 bitmap_zero(ta->allocated, MAX_TAGS); 285 bitmap_zero(ta->allocated, MAX_TAGS);
344 ta->nr_allocated = 0; 286 ta->nr_allocated = 0;
345 /* At the beginning, assume the maximum queue size we could 287 /* At the beginning, assume the maximum queue size we could
@@ -359,7 +301,7 @@ static void __init init_tags(void)
359 * conditions. 301 * conditions.
360 */ 302 */
361 303
362static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) 304static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
363{ 305{
364 u8 lun = cmd->device->lun; 306 u8 lun = cmd->device->lun;
365 SETUP_HOSTDATA(cmd->device->host); 307 SETUP_HOSTDATA(cmd->device->host);
@@ -367,10 +309,11 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
367 if (hostdata->busy[cmd->device->id] & (1 << lun)) 309 if (hostdata->busy[cmd->device->id] & (1 << lun))
368 return 1; 310 return 1;
369 if (!should_be_tagged || 311 if (!should_be_tagged ||
370 !setup_use_tagged_queuing || !cmd->device->tagged_supported) 312 !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
313 !cmd->device->tagged_supported)
371 return 0; 314 return 0;
372 if (TagAlloc[cmd->device->id][lun].nr_allocated >= 315 if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >=
373 TagAlloc[cmd->device->id][lun].queue_size) { 316 hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) {
374 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", 317 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
375 H_NO(cmd), cmd->device->id, lun); 318 H_NO(cmd), cmd->device->id, lun);
376 return 1; 319 return 1;
@@ -384,7 +327,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
384 * untagged. 327 * untagged.
385 */ 328 */
386 329
387static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) 330static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
388{ 331{
389 u8 lun = cmd->device->lun; 332 u8 lun = cmd->device->lun;
390 SETUP_HOSTDATA(cmd->device->host); 333 SETUP_HOSTDATA(cmd->device->host);
@@ -393,13 +336,14 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
393 * an untagged command. 336 * an untagged command.
394 */ 337 */
395 if (!should_be_tagged || 338 if (!should_be_tagged ||
396 !setup_use_tagged_queuing || !cmd->device->tagged_supported) { 339 !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
340 !cmd->device->tagged_supported) {
397 cmd->tag = TAG_NONE; 341 cmd->tag = TAG_NONE;
398 hostdata->busy[cmd->device->id] |= (1 << lun); 342 hostdata->busy[cmd->device->id] |= (1 << lun);
399 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " 343 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
400 "command\n", H_NO(cmd), cmd->device->id, lun); 344 "command\n", H_NO(cmd), cmd->device->id, lun);
401 } else { 345 } else {
402 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; 346 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
403 347
404 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); 348 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
405 set_bit(cmd->tag, ta->allocated); 349 set_bit(cmd->tag, ta->allocated);
@@ -416,7 +360,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
416 * unlock the LUN. 360 * unlock the LUN.
417 */ 361 */
418 362
419static void cmd_free_tag(Scsi_Cmnd *cmd) 363static void cmd_free_tag(struct scsi_cmnd *cmd)
420{ 364{
421 u8 lun = cmd->device->lun; 365 u8 lun = cmd->device->lun;
422 SETUP_HOSTDATA(cmd->device->host); 366 SETUP_HOSTDATA(cmd->device->host);
@@ -429,7 +373,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
429 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", 373 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
430 H_NO(cmd), cmd->tag); 374 H_NO(cmd), cmd->tag);
431 } else { 375 } else {
432 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; 376 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
433 clear_bit(cmd->tag, ta->allocated); 377 clear_bit(cmd->tag, ta->allocated);
434 ta->nr_allocated--; 378 ta->nr_allocated--;
435 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", 379 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
@@ -438,17 +382,17 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
438} 382}
439 383
440 384
441static void free_all_tags(void) 385static void free_all_tags(struct NCR5380_hostdata *hostdata)
442{ 386{
443 int target, lun; 387 int target, lun;
444 TAG_ALLOC *ta; 388 struct tag_alloc *ta;
445 389
446 if (!setup_use_tagged_queuing) 390 if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
447 return; 391 return;
448 392
449 for (target = 0; target < 8; ++target) { 393 for (target = 0; target < 8; ++target) {
450 for (lun = 0; lun < 8; ++lun) { 394 for (lun = 0; lun < 8; ++lun) {
451 ta = &TagAlloc[target][lun]; 395 ta = &hostdata->TagAlloc[target][lun];
452 bitmap_zero(ta->allocated, MAX_TAGS); 396 bitmap_zero(ta->allocated, MAX_TAGS);
453 ta->nr_allocated = 0; 397 ta->nr_allocated = 0;
454 } 398 }
@@ -459,19 +403,20 @@ static void free_all_tags(void)
459 403
460 404
461/* 405/*
462 * Function: void merge_contiguous_buffers( Scsi_Cmnd *cmd ) 406 * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd )
463 * 407 *
464 * Purpose: Try to merge several scatter-gather requests into one DMA 408 * Purpose: Try to merge several scatter-gather requests into one DMA
465 * transfer. This is possible if the scatter buffers lie on 409 * transfer. This is possible if the scatter buffers lie on
466 * physical contiguous addresses. 410 * physical contiguous addresses.
467 * 411 *
468 * Parameters: Scsi_Cmnd *cmd 412 * Parameters: struct scsi_cmnd *cmd
469 * The command to work on. The first scatter buffer's data are 413 * The command to work on. The first scatter buffer's data are
470 * assumed to be already transferred into ptr/this_residual. 414 * assumed to be already transferred into ptr/this_residual.
471 */ 415 */
472 416
473static void merge_contiguous_buffers(Scsi_Cmnd *cmd) 417static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
474{ 418{
419#if !defined(CONFIG_SUN3)
475 unsigned long endaddr; 420 unsigned long endaddr;
476#if (NDEBUG & NDEBUG_MERGING) 421#if (NDEBUG & NDEBUG_MERGING)
477 unsigned long oldlen = cmd->SCp.this_residual; 422 unsigned long oldlen = cmd->SCp.this_residual;
@@ -496,18 +441,17 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
496 dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", 441 dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
497 cnt, cmd->SCp.ptr, cmd->SCp.this_residual); 442 cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
498#endif 443#endif
444#endif /* !defined(CONFIG_SUN3) */
499} 445}
500 446
501/* 447/**
502 * Function : void initialize_SCp(Scsi_Cmnd *cmd) 448 * initialize_SCp - init the scsi pointer field
449 * @cmd: command block to set up
503 * 450 *
504 * Purpose : initialize the saved data pointers for cmd to point to the 451 * Set up the internal fields in the SCSI command.
505 * start of the buffer.
506 *
507 * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
508 */ 452 */
509 453
510static inline void initialize_SCp(Scsi_Cmnd *cmd) 454static inline void initialize_SCp(struct scsi_cmnd *cmd)
511{ 455{
512 /* 456 /*
513 * Initialize the Scsi Pointer field so that all of the commands in the 457 * Initialize the Scsi Pointer field so that all of the commands in the
@@ -557,12 +501,11 @@ static struct {
557 {0, NULL} 501 {0, NULL}
558}; 502};
559 503
560/* 504/**
561 * Function : void NCR5380_print(struct Scsi_Host *instance) 505 * NCR5380_print - print scsi bus signals
506 * @instance: adapter state to dump
562 * 507 *
563 * Purpose : print the SCSI bus signals for debugging purposes 508 * Print the SCSI bus signals for debugging purposes
564 *
565 * Input : instance - which NCR5380
566 */ 509 */
567 510
568static void NCR5380_print(struct Scsi_Host *instance) 511static void NCR5380_print(struct Scsi_Host *instance)
@@ -605,12 +548,13 @@ static struct {
605 {PHASE_UNKNOWN, "UNKNOWN"} 548 {PHASE_UNKNOWN, "UNKNOWN"}
606}; 549};
607 550
608/* 551/**
609 * Function : void NCR5380_print_phase(struct Scsi_Host *instance) 552 * NCR5380_print_phase - show SCSI phase
553 * @instance: adapter to dump
610 * 554 *
611 * Purpose : print the current SCSI phase for debugging purposes 555 * Print the current SCSI phase for debugging purposes
612 * 556 *
613 * Input : instance - which NCR5380 557 * Locks: none
614 */ 558 */
615 559
616static void NCR5380_print_phase(struct Scsi_Host *instance) 560static void NCR5380_print_phase(struct Scsi_Host *instance)
@@ -648,71 +592,75 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
648#include <linux/workqueue.h> 592#include <linux/workqueue.h>
649#include <linux/interrupt.h> 593#include <linux/interrupt.h>
650 594
651static volatile int main_running; 595static inline void queue_main(struct NCR5380_hostdata *hostdata)
652static DECLARE_WORK(NCR5380_tqueue, NCR5380_main);
653
654static inline void queue_main(void)
655{ 596{
656 if (!main_running) { 597 if (!hostdata->main_running) {
657 /* If in interrupt and NCR5380_main() not already running, 598 /* If in interrupt and NCR5380_main() not already running,
658 queue it on the 'immediate' task queue, to be processed 599 queue it on the 'immediate' task queue, to be processed
659 immediately after the current interrupt processing has 600 immediately after the current interrupt processing has
660 finished. */ 601 finished. */
661 schedule_work(&NCR5380_tqueue); 602 schedule_work(&hostdata->main_task);
662 } 603 }
663 /* else: nothing to do: the running NCR5380_main() will pick up 604 /* else: nothing to do: the running NCR5380_main() will pick up
664 any newly queued command. */ 605 any newly queued command. */
665} 606}
666 607
667 608/**
668static inline void NCR5380_all_init(void) 609 * NCR58380_info - report driver and host information
669{ 610 * @instance: relevant scsi host instance
670 static int done = 0;
671 if (!done) {
672 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
673 done = 1;
674 }
675}
676
677
678/*
679 * Function : void NCR58380_print_options (struct Scsi_Host *instance)
680 * 611 *
681 * Purpose : called by probe code indicating the NCR5380 driver 612 * For use as the host template info() handler.
682 * options that were selected.
683 * 613 *
684 * Inputs : instance, pointer to this instance. Unused. 614 * Locks: none
685 */ 615 */
686 616
687static void __init NCR5380_print_options(struct Scsi_Host *instance) 617static const char *NCR5380_info(struct Scsi_Host *instance)
618{
619 struct NCR5380_hostdata *hostdata = shost_priv(instance);
620
621 return hostdata->info;
622}
623
624static void prepare_info(struct Scsi_Host *instance)
688{ 625{
689 printk(" generic options" 626 struct NCR5380_hostdata *hostdata = shost_priv(instance);
690#ifdef AUTOSENSE 627
691 " AUTOSENSE" 628 snprintf(hostdata->info, sizeof(hostdata->info),
629 "%s, io_port 0x%lx, n_io_port %d, "
630 "base 0x%lx, irq %d, "
631 "can_queue %d, cmd_per_lun %d, "
632 "sg_tablesize %d, this_id %d, "
633 "flags { %s}, "
634 "options { %s} ",
635 instance->hostt->name, instance->io_port, instance->n_io_port,
636 instance->base, instance->irq,
637 instance->can_queue, instance->cmd_per_lun,
638 instance->sg_tablesize, instance->this_id,
639 hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "",
640#ifdef DIFFERENTIAL
641 "DIFFERENTIAL "
692#endif 642#endif
693#ifdef REAL_DMA 643#ifdef REAL_DMA
694 " REAL DMA" 644 "REAL_DMA "
695#endif 645#endif
696#ifdef PARITY 646#ifdef PARITY
697 " PARITY" 647 "PARITY "
698#endif 648#endif
699#ifdef SUPPORT_TAGS 649#ifdef SUPPORT_TAGS
700 " SCSI-2 TAGGED QUEUING" 650 "SUPPORT_TAGS "
701#endif 651#endif
702 ); 652 "");
703 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
704} 653}
705 654
706/* 655/**
707 * Function : void NCR5380_print_status (struct Scsi_Host *instance) 656 * NCR5380_print_status - dump controller info
657 * @instance: controller to dump
708 * 658 *
709 * Purpose : print commands in the various queues, called from 659 * Print commands in the various queues, called from NCR5380_abort
710 * NCR5380_abort and NCR5380_debug to aid debugging. 660 * to aid debugging.
711 *
712 * Inputs : instance, pointer to this instance.
713 */ 661 */
714 662
715static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) 663static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd)
716{ 664{
717 int i, s; 665 int i, s;
718 unsigned char *command; 666 unsigned char *command;
@@ -729,7 +677,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
729static void NCR5380_print_status(struct Scsi_Host *instance) 677static void NCR5380_print_status(struct Scsi_Host *instance)
730{ 678{
731 struct NCR5380_hostdata *hostdata; 679 struct NCR5380_hostdata *hostdata;
732 Scsi_Cmnd *ptr; 680 struct scsi_cmnd *ptr;
733 unsigned long flags; 681 unsigned long flags;
734 682
735 NCR5380_dprint(NDEBUG_ANY, instance); 683 NCR5380_dprint(NDEBUG_ANY, instance);
@@ -737,20 +685,19 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
737 685
738 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 686 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
739 687
740 printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
741 local_irq_save(flags); 688 local_irq_save(flags);
742 printk("NCR5380: coroutine is%s running.\n", 689 printk("NCR5380: coroutine is%s running.\n",
743 main_running ? "" : "n't"); 690 hostdata->main_running ? "" : "n't");
744 if (!hostdata->connected) 691 if (!hostdata->connected)
745 printk("scsi%d: no currently connected command\n", HOSTNO); 692 printk("scsi%d: no currently connected command\n", HOSTNO);
746 else 693 else
747 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); 694 lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected);
748 printk("scsi%d: issue_queue\n", HOSTNO); 695 printk("scsi%d: issue_queue\n", HOSTNO);
749 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) 696 for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
750 lprint_Scsi_Cmnd(ptr); 697 lprint_Scsi_Cmnd(ptr);
751 698
752 printk("scsi%d: disconnected_queue\n", HOSTNO); 699 printk("scsi%d: disconnected_queue\n", HOSTNO);
753 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; 700 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
754 ptr = NEXT(ptr)) 701 ptr = NEXT(ptr))
755 lprint_Scsi_Cmnd(ptr); 702 lprint_Scsi_Cmnd(ptr);
756 703
@@ -758,7 +705,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
758 printk("\n"); 705 printk("\n");
759} 706}
760 707
761static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) 708static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m)
762{ 709{
763 int i, s; 710 int i, s;
764 unsigned char *command; 711 unsigned char *command;
@@ -772,28 +719,28 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
772 seq_printf(m, "\n"); 719 seq_printf(m, "\n");
773} 720}
774 721
775static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) 722static int __maybe_unused NCR5380_show_info(struct seq_file *m,
723 struct Scsi_Host *instance)
776{ 724{
777 struct NCR5380_hostdata *hostdata; 725 struct NCR5380_hostdata *hostdata;
778 Scsi_Cmnd *ptr; 726 struct scsi_cmnd *ptr;
779 unsigned long flags; 727 unsigned long flags;
780 728
781 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 729 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
782 730
783 seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
784 local_irq_save(flags); 731 local_irq_save(flags);
785 seq_printf(m, "NCR5380: coroutine is%s running.\n", 732 seq_printf(m, "NCR5380: coroutine is%s running.\n",
786 main_running ? "" : "n't"); 733 hostdata->main_running ? "" : "n't");
787 if (!hostdata->connected) 734 if (!hostdata->connected)
788 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); 735 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
789 else 736 else
790 show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); 737 show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m);
791 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); 738 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
792 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) 739 for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
793 show_Scsi_Cmnd(ptr, m); 740 show_Scsi_Cmnd(ptr, m);
794 741
795 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); 742 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
796 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; 743 for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr;
797 ptr = NEXT(ptr)) 744 ptr = NEXT(ptr))
798 show_Scsi_Cmnd(ptr, m); 745 show_Scsi_Cmnd(ptr, m);
799 746
@@ -801,16 +748,18 @@ static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
801 return 0; 748 return 0;
802} 749}
803 750
804/* 751/**
805 * Function : void NCR5380_init (struct Scsi_Host *instance) 752 * NCR5380_init - initialise an NCR5380
753 * @instance: adapter to configure
754 * @flags: control flags
806 * 755 *
807 * Purpose : initializes *instance and corresponding 5380 chip. 756 * Initializes *instance and corresponding 5380 chip,
808 * 757 * with flags OR'd into the initial flags value.
809 * Inputs : instance - instantiation of the 5380 driver.
810 * 758 *
811 * Notes : I assume that the host, hostno, and id bits have been 759 * Notes : I assume that the host, hostno, and id bits have been
812 * set correctly. I don't care about the irq and other fields. 760 * set correctly. I don't care about the irq and other fields.
813 * 761 *
762 * Returns 0 for success
814 */ 763 */
815 764
816static int __init NCR5380_init(struct Scsi_Host *instance, int flags) 765static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
@@ -818,8 +767,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
818 int i; 767 int i;
819 SETUP_HOSTDATA(instance); 768 SETUP_HOSTDATA(instance);
820 769
821 NCR5380_all_init(); 770 hostdata->host = instance;
822
823 hostdata->aborted = 0; 771 hostdata->aborted = 0;
824 hostdata->id_mask = 1 << instance->this_id; 772 hostdata->id_mask = 1 << instance->this_id;
825 hostdata->id_higher_mask = 0; 773 hostdata->id_higher_mask = 0;
@@ -829,7 +777,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
829 for (i = 0; i < 8; ++i) 777 for (i = 0; i < 8; ++i)
830 hostdata->busy[i] = 0; 778 hostdata->busy[i] = 0;
831#ifdef SUPPORT_TAGS 779#ifdef SUPPORT_TAGS
832 init_tags(); 780 init_tags(hostdata);
833#endif 781#endif
834#if defined (REAL_DMA) 782#if defined (REAL_DMA)
835 hostdata->dma_len = 0; 783 hostdata->dma_len = 0;
@@ -838,19 +786,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
838 hostdata->connected = NULL; 786 hostdata->connected = NULL;
839 hostdata->issue_queue = NULL; 787 hostdata->issue_queue = NULL;
840 hostdata->disconnected_queue = NULL; 788 hostdata->disconnected_queue = NULL;
841 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; 789 hostdata->flags = flags;
842 790
843 if (!the_template) { 791 INIT_WORK(&hostdata->main_task, NCR5380_main);
844 the_template = instance->hostt;
845 first_instance = instance;
846 }
847 792
848#ifndef AUTOSENSE 793 prepare_info(instance);
849 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1))
850 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n"
851 " without AUTOSENSE option, contingent allegiance conditions may\n"
852 " be incorrectly cleared.\n", HOSTNO);
853#endif /* def AUTOSENSE */
854 794
855 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 795 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
856 NCR5380_write(MODE_REG, MR_BASE); 796 NCR5380_write(MODE_REG, MR_BASE);
@@ -860,33 +800,35 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
860 return 0; 800 return 0;
861} 801}
862 802
803/**
804 * NCR5380_exit - remove an NCR5380
805 * @instance: adapter to remove
806 *
807 * Assumes that no more work can be queued (e.g. by NCR5380_intr).
808 */
809
863static void NCR5380_exit(struct Scsi_Host *instance) 810static void NCR5380_exit(struct Scsi_Host *instance)
864{ 811{
865 /* Empty, as we didn't schedule any delayed work */ 812 struct NCR5380_hostdata *hostdata = shost_priv(instance);
813
814 cancel_work_sync(&hostdata->main_task);
866} 815}
867 816
868/* 817/**
869 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, 818 * NCR5380_queue_command - queue a command
870 * void (*done)(Scsi_Cmnd *)) 819 * @instance: the relevant SCSI adapter
871 * 820 * @cmd: SCSI command
872 * Purpose : enqueues a SCSI command
873 *
874 * Inputs : cmd - SCSI command, done - function called on completion, with
875 * a pointer to the command descriptor.
876 *
877 * Returns : 0
878 *
879 * Side effects :
880 * cmd is added to the per instance issue_queue, with minor
881 * twiddling done to the host specific fields of cmd. If the
882 * main coroutine is not running, it is restarted.
883 * 821 *
822 * cmd is added to the per instance issue_queue, with minor
823 * twiddling done to the host specific fields of cmd. If the
824 * main coroutine is not running, it is restarted.
884 */ 825 */
885 826
886static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) 827static int NCR5380_queue_command(struct Scsi_Host *instance,
828 struct scsi_cmnd *cmd)
887{ 829{
888 SETUP_HOSTDATA(cmd->device->host); 830 struct NCR5380_hostdata *hostdata = shost_priv(instance);
889 Scsi_Cmnd *tmp; 831 struct scsi_cmnd *tmp;
890 unsigned long flags; 832 unsigned long flags;
891 833
892#if (NDEBUG & NDEBUG_NO_WRITE) 834#if (NDEBUG & NDEBUG_NO_WRITE)
@@ -896,47 +838,17 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
896 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", 838 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
897 H_NO(cmd)); 839 H_NO(cmd));
898 cmd->result = (DID_ERROR << 16); 840 cmd->result = (DID_ERROR << 16);
899 done(cmd); 841 cmd->scsi_done(cmd);
900 return 0; 842 return 0;
901 } 843 }
902#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 844#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
903 845
904#ifdef NCR5380_STATS
905# if 0
906 if (!hostdata->connected && !hostdata->issue_queue &&
907 !hostdata->disconnected_queue) {
908 hostdata->timebase = jiffies;
909 }
910# endif
911# ifdef NCR5380_STAT_LIMIT
912 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
913# endif
914 switch (cmd->cmnd[0]) {
915 case WRITE:
916 case WRITE_6:
917 case WRITE_10:
918 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
919 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
920 hostdata->pendingw++;
921 break;
922 case READ:
923 case READ_6:
924 case READ_10:
925 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
926 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
927 hostdata->pendingr++;
928 break;
929 }
930#endif
931
932 /* 846 /*
933 * We use the host_scribble field as a pointer to the next command 847 * We use the host_scribble field as a pointer to the next command
934 * in a queue 848 * in a queue
935 */ 849 */
936 850
937 SET_NEXT(cmd, NULL); 851 SET_NEXT(cmd, NULL);
938 cmd->scsi_done = done;
939
940 cmd->result = 0; 852 cmd->result = 0;
941 853
942 /* 854 /*
@@ -946,7 +858,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
946 * sense data is only guaranteed to be valid while the condition exists. 858 * sense data is only guaranteed to be valid while the condition exists.
947 */ 859 */
948 860
949 local_irq_save(flags);
950 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. 861 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
951 * Otherwise a running NCR5380_main may steal the lock. 862 * Otherwise a running NCR5380_main may steal the lock.
952 * Lock before actually inserting due to fairness reasons explained in 863 * Lock before actually inserting due to fairness reasons explained in
@@ -959,17 +870,24 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
959 * because also a timer int can trigger an abort or reset, which would 870 * because also a timer int can trigger an abort or reset, which would
960 * alter queues and touch the lock. 871 * alter queues and touch the lock.
961 */ 872 */
962 if (!IS_A_TT()) { 873 if (!NCR5380_acquire_dma_irq(instance))
963 /* perhaps stop command timer here */ 874 return SCSI_MLQUEUE_HOST_BUSY;
964 falcon_get_lock(); 875
965 /* perhaps restart command timer here */ 876 local_irq_save(flags);
966 } 877
878 /*
879 * Insert the cmd into the issue queue. Note that REQUEST SENSE
880 * commands are added to the head of the queue since any command will
881 * clear the contingent allegiance condition that exists and the
882 * sense data is only guaranteed to be valid while the condition exists.
883 */
884
967 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { 885 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
968 LIST(cmd, hostdata->issue_queue); 886 LIST(cmd, hostdata->issue_queue);
969 SET_NEXT(cmd, hostdata->issue_queue); 887 SET_NEXT(cmd, hostdata->issue_queue);
970 hostdata->issue_queue = cmd; 888 hostdata->issue_queue = cmd;
971 } else { 889 } else {
972 for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; 890 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
973 NEXT(tmp); tmp = NEXT(tmp)) 891 NEXT(tmp); tmp = NEXT(tmp))
974 ; 892 ;
975 LIST(cmd, tmp); 893 LIST(cmd, tmp);
@@ -987,32 +905,42 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
987 * If we're not in an interrupt, we can call NCR5380_main() 905 * If we're not in an interrupt, we can call NCR5380_main()
988 * unconditionally, because it cannot be already running. 906 * unconditionally, because it cannot be already running.
989 */ 907 */
990 if (in_interrupt() || ((flags >> 8) & 7) >= 6) 908 if (in_interrupt() || irqs_disabled())
991 queue_main(); 909 queue_main(hostdata);
992 else 910 else
993 NCR5380_main(NULL); 911 NCR5380_main(&hostdata->main_task);
994 return 0; 912 return 0;
995} 913}
996 914
997static DEF_SCSI_QCMD(NCR5380_queue_command) 915static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
916{
917 struct NCR5380_hostdata *hostdata = shost_priv(instance);
918
919 /* Caller does the locking needed to set & test these data atomically */
920 if (!hostdata->disconnected_queue &&
921 !hostdata->issue_queue &&
922 !hostdata->connected &&
923 !hostdata->retain_dma_intr)
924 NCR5380_release_dma_irq(instance);
925}
998 926
999/* 927/**
1000 * Function : NCR5380_main (void) 928 * NCR5380_main - NCR state machines
1001 * 929 *
1002 * Purpose : NCR5380_main is a coroutine that runs as long as more work can 930 * NCR5380_main is a coroutine that runs as long as more work can
1003 * be done on the NCR5380 host adapters in a system. Both 931 * be done on the NCR5380 host adapters in a system. Both
1004 * NCR5380_queue_command() and NCR5380_intr() will try to start it 932 * NCR5380_queue_command() and NCR5380_intr() will try to start it
1005 * in case it is not running. 933 * in case it is not running.
1006 * 934 *
1007 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should 935 * Locks: called as its own thread with no locks held.
1008 * reenable them. This prevents reentrancy and kernel stack overflow.
1009 */ 936 */
1010 937
1011static void NCR5380_main(struct work_struct *work) 938static void NCR5380_main(struct work_struct *work)
1012{ 939{
1013 Scsi_Cmnd *tmp, *prev; 940 struct NCR5380_hostdata *hostdata =
1014 struct Scsi_Host *instance = first_instance; 941 container_of(work, struct NCR5380_hostdata, main_task);
1015 struct NCR5380_hostdata *hostdata = HOSTDATA(instance); 942 struct Scsi_Host *instance = hostdata->host;
943 struct scsi_cmnd *tmp, *prev;
1016 int done; 944 int done;
1017 unsigned long flags; 945 unsigned long flags;
1018 946
@@ -1037,9 +965,9 @@ static void NCR5380_main(struct work_struct *work)
1037 'main_running' is set here, and queues/executes main via the 965 'main_running' is set here, and queues/executes main via the
1038 task queue, it doesn't do any harm, just this instance of main 966 task queue, it doesn't do any harm, just this instance of main
1039 won't find any work left to do. */ 967 won't find any work left to do. */
1040 if (main_running) 968 if (hostdata->main_running)
1041 return; 969 return;
1042 main_running = 1; 970 hostdata->main_running = 1;
1043 971
1044 local_save_flags(flags); 972 local_save_flags(flags);
1045 do { 973 do {
@@ -1053,7 +981,7 @@ static void NCR5380_main(struct work_struct *work)
1053 * for a target that's not busy. 981 * for a target that's not busy.
1054 */ 982 */
1055#if (NDEBUG & NDEBUG_LISTS) 983#if (NDEBUG & NDEBUG_LISTS)
1056 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; 984 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL;
1057 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) 985 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
1058 ; 986 ;
1059 /*printk("%p ", tmp);*/ 987 /*printk("%p ", tmp);*/
@@ -1061,16 +989,14 @@ static void NCR5380_main(struct work_struct *work)
1061 printk(" LOOP\n"); 989 printk(" LOOP\n");
1062 /* else printk("\n"); */ 990 /* else printk("\n"); */
1063#endif 991#endif
1064 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, 992 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
1065 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { 993 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) {
1066 u8 lun = tmp->device->lun; 994 u8 lun = tmp->device->lun;
1067 995
1068#if (NDEBUG & NDEBUG_LISTS) 996 dprintk(NDEBUG_LISTS,
1069 if (prev != tmp) 997 "MAIN tmp=%p target=%d busy=%d lun=%d\n",
1070 printk("MAIN tmp=%p target=%d busy=%d lun=%llu\n", 998 tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)],
1071 tmp, tmp->device->id, hostdata->busy[tmp->device->id], 999 lun);
1072 lun);
1073#endif
1074 /* When we find one, remove it from the issue queue. */ 1000 /* When we find one, remove it from the issue queue. */
1075 /* ++guenther: possible race with Falcon locking */ 1001 /* ++guenther: possible race with Falcon locking */
1076 if ( 1002 if (
@@ -1090,7 +1016,7 @@ static void NCR5380_main(struct work_struct *work)
1090 hostdata->issue_queue = NEXT(tmp); 1016 hostdata->issue_queue = NEXT(tmp);
1091 } 1017 }
1092 SET_NEXT(tmp, NULL); 1018 SET_NEXT(tmp, NULL);
1093 falcon_dont_release++; 1019 hostdata->retain_dma_intr++;
1094 1020
1095 /* reenable interrupts after finding one */ 1021 /* reenable interrupts after finding one */
1096 local_irq_restore(flags); 1022 local_irq_restore(flags);
@@ -1117,12 +1043,12 @@ static void NCR5380_main(struct work_struct *work)
1117#ifdef SUPPORT_TAGS 1043#ifdef SUPPORT_TAGS
1118 cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); 1044 cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE);
1119#endif 1045#endif
1120 if (!NCR5380_select(instance, tmp, 1046 if (!NCR5380_select(instance, tmp)) {
1121 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : 1047 local_irq_disable();
1122 TAG_NEXT)) { 1048 hostdata->retain_dma_intr--;
1123 falcon_dont_release--;
1124 /* release if target did not response! */ 1049 /* release if target did not response! */
1125 falcon_release_lock_if_possible(hostdata); 1050 maybe_release_dma_irq(instance);
1051 local_irq_restore(flags);
1126 break; 1052 break;
1127 } else { 1053 } else {
1128 local_irq_disable(); 1054 local_irq_disable();
@@ -1132,7 +1058,7 @@ static void NCR5380_main(struct work_struct *work)
1132#ifdef SUPPORT_TAGS 1058#ifdef SUPPORT_TAGS
1133 cmd_free_tag(tmp); 1059 cmd_free_tag(tmp);
1134#endif 1060#endif
1135 falcon_dont_release--; 1061 hostdata->retain_dma_intr--;
1136 local_irq_restore(flags); 1062 local_irq_restore(flags);
1137 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " 1063 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1138 "returned to issue_queue\n", HOSTNO); 1064 "returned to issue_queue\n", HOSTNO);
@@ -1160,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
1160 /* Better allow ints _after_ 'main_running' has been cleared, else 1086 /* Better allow ints _after_ 'main_running' has been cleared, else
1161 an interrupt could believe we'll pick up the work it left for 1087 an interrupt could believe we'll pick up the work it left for
1162 us, but we won't see it anymore here... */ 1088 us, but we won't see it anymore here... */
1163 main_running = 0; 1089 hostdata->main_running = 0;
1164 local_irq_restore(flags); 1090 local_irq_restore(flags);
1165} 1091}
1166 1092
@@ -1179,9 +1105,11 @@ static void NCR5380_main(struct work_struct *work)
1179static void NCR5380_dma_complete(struct Scsi_Host *instance) 1105static void NCR5380_dma_complete(struct Scsi_Host *instance)
1180{ 1106{
1181 SETUP_HOSTDATA(instance); 1107 SETUP_HOSTDATA(instance);
1182 int transfered, saved_data = 0, overrun = 0, cnt, toPIO; 1108 int transferred;
1183 unsigned char **data, p; 1109 unsigned char **data;
1184 volatile int *count; 1110 volatile int *count;
1111 int saved_data = 0, overrun = 0;
1112 unsigned char p;
1185 1113
1186 if (!hostdata->connected) { 1114 if (!hostdata->connected) {
1187 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " 1115 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
@@ -1189,7 +1117,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1189 return; 1117 return;
1190 } 1118 }
1191 1119
1192 if (atari_read_overruns) { 1120 if (hostdata->read_overruns) {
1193 p = hostdata->connected->SCp.phase; 1121 p = hostdata->connected->SCp.phase;
1194 if (p & SR_IO) { 1122 if (p & SR_IO) {
1195 udelay(10); 1123 udelay(10);
@@ -1207,21 +1135,41 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1207 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), 1135 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1208 NCR5380_read(STATUS_REG)); 1136 NCR5380_read(STATUS_REG));
1209 1137
1138#if defined(CONFIG_SUN3)
1139 if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
1140 pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
1141 instance->host_no);
1142 BUG();
1143 }
1144
1145 /* make sure we're not stuck in a data phase */
1146 if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
1147 (BASR_PHASE_MATCH | BASR_ACK)) {
1148 pr_err("scsi%d: BASR %02x\n", instance->host_no,
1149 NCR5380_read(BUS_AND_STATUS_REG));
1150 pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
1151 instance->host_no);
1152 BUG();
1153 }
1154#endif
1155
1210 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1156 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1211 NCR5380_write(MODE_REG, MR_BASE); 1157 NCR5380_write(MODE_REG, MR_BASE);
1212 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1158 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1213 1159
1214 transfered = hostdata->dma_len - NCR5380_dma_residual(instance); 1160 transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
1215 hostdata->dma_len = 0; 1161 hostdata->dma_len = 0;
1216 1162
1217 data = (unsigned char **)&hostdata->connected->SCp.ptr; 1163 data = (unsigned char **)&hostdata->connected->SCp.ptr;
1218 count = &hostdata->connected->SCp.this_residual; 1164 count = &hostdata->connected->SCp.this_residual;
1219 *data += transfered; 1165 *data += transferred;
1220 *count -= transfered; 1166 *count -= transferred;
1167
1168 if (hostdata->read_overruns) {
1169 int cnt, toPIO;
1221 1170
1222 if (atari_read_overruns) {
1223 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 1171 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
1224 cnt = toPIO = atari_read_overruns; 1172 cnt = toPIO = hostdata->read_overruns;
1225 if (overrun) { 1173 if (overrun) {
1226 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); 1174 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
1227 *(*data)++ = saved_data; 1175 *(*data)++ = saved_data;
@@ -1238,20 +1186,19 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1238#endif /* REAL_DMA */ 1186#endif /* REAL_DMA */
1239 1187
1240 1188
1241/* 1189/**
1242 * Function : void NCR5380_intr (int irq) 1190 * NCR5380_intr - generic NCR5380 irq handler
1243 * 1191 * @irq: interrupt number
1244 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses 1192 * @dev_id: device info
1245 * from the disconnected queue, and restarting NCR5380_main()
1246 * as required.
1247 *
1248 * Inputs : int irq, irq that caused this interrupt.
1249 * 1193 *
1194 * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
1195 * from the disconnected queue, and restarting NCR5380_main()
1196 * as required.
1250 */ 1197 */
1251 1198
1252static irqreturn_t NCR5380_intr(int irq, void *dev_id) 1199static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1253{ 1200{
1254 struct Scsi_Host *instance = first_instance; 1201 struct Scsi_Host *instance = dev_id;
1255 int done = 1, handled = 0; 1202 int done = 1, handled = 0;
1256 unsigned char basr; 1203 unsigned char basr;
1257 1204
@@ -1265,7 +1212,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1265 NCR5380_dprint(NDEBUG_INTR, instance); 1212 NCR5380_dprint(NDEBUG_INTR, instance);
1266 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { 1213 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1267 done = 0; 1214 done = 0;
1268 ENABLE_IRQ();
1269 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); 1215 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1270 NCR5380_reselect(instance); 1216 NCR5380_reselect(instance);
1271 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1217 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
@@ -1295,17 +1241,19 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1295 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); 1241 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1296 NCR5380_dma_complete( instance ); 1242 NCR5380_dma_complete( instance );
1297 done = 0; 1243 done = 0;
1298 ENABLE_IRQ();
1299 } else 1244 } else
1300#endif /* REAL_DMA */ 1245#endif /* REAL_DMA */
1301 { 1246 {
1302/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ 1247/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1303 if (basr & BASR_PHASE_MATCH) 1248 if (basr & BASR_PHASE_MATCH)
1304 printk(KERN_NOTICE "scsi%d: unknown interrupt, " 1249 dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
1305 "BASR 0x%x, MR 0x%x, SR 0x%x\n", 1250 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1306 HOSTNO, basr, NCR5380_read(MODE_REG), 1251 HOSTNO, basr, NCR5380_read(MODE_REG),
1307 NCR5380_read(STATUS_REG)); 1252 NCR5380_read(STATUS_REG));
1308 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1253 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1254#ifdef SUN3_SCSI_VME
1255 dregs->csr |= CSR_DMA_ENABLE;
1256#endif
1309 } 1257 }
1310 } /* if !(SELECTION || PARITY) */ 1258 } /* if !(SELECTION || PARITY) */
1311 handled = 1; 1259 handled = 1;
@@ -1314,53 +1262,29 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1314 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, 1262 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
1315 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); 1263 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1316 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1264 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1265#ifdef SUN3_SCSI_VME
1266 dregs->csr |= CSR_DMA_ENABLE;
1267#endif
1317 } 1268 }
1318 1269
1319 if (!done) { 1270 if (!done) {
1320 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); 1271 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1321 /* Put a call to NCR5380_main() on the queue... */ 1272 /* Put a call to NCR5380_main() on the queue... */
1322 queue_main(); 1273 queue_main(shost_priv(instance));
1323 } 1274 }
1324 return IRQ_RETVAL(handled); 1275 return IRQ_RETVAL(handled);
1325} 1276}
1326 1277
1327#ifdef NCR5380_STATS
1328static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1329{
1330# ifdef NCR5380_STAT_LIMIT
1331 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1332# endif
1333 switch (cmd->cmnd[0]) {
1334 case WRITE:
1335 case WRITE_6:
1336 case WRITE_10:
1337 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1338 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1339 hostdata->pendingw--;
1340 break;
1341 case READ:
1342 case READ_6:
1343 case READ_10:
1344 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1345 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1346 hostdata->pendingr--;
1347 break;
1348 }
1349}
1350#endif
1351
1352/* 1278/*
1353 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, 1279 * Function : int NCR5380_select(struct Scsi_Host *instance,
1354 * int tag); 1280 * struct scsi_cmnd *cmd)
1355 * 1281 *
1356 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1282 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1357 * including ARBITRATION, SELECTION, and initial message out for 1283 * including ARBITRATION, SELECTION, and initial message out for
1358 * IDENTIFY and queue messages. 1284 * IDENTIFY and queue messages.
1359 * 1285 *
1360 * Inputs : instance - instantiation of the 5380 driver on which this 1286 * Inputs : instance - instantiation of the 5380 driver on which this
1361 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for 1287 * target lives, cmd - SCSI command to execute.
1362 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1363 * the command that is presently connected.
1364 * 1288 *
1365 * Returns : -1 if selection could not execute for some reason, 1289 * Returns : -1 if selection could not execute for some reason,
1366 * 0 if selection succeeded or failed because the target 1290 * 0 if selection succeeded or failed because the target
@@ -1380,7 +1304,7 @@ static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1380 * cmd->result host byte set to DID_BAD_TARGET. 1304 * cmd->result host byte set to DID_BAD_TARGET.
1381 */ 1305 */
1382 1306
1383static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) 1307static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
1384{ 1308{
1385 SETUP_HOSTDATA(instance); 1309 SETUP_HOSTDATA(instance);
1386 unsigned char tmp[3], phase; 1310 unsigned char tmp[3], phase;
@@ -1562,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1562 * selection. 1486 * selection.
1563 */ 1487 */
1564 1488
1565 timeout = jiffies + 25; 1489 timeout = jiffies + (250 * HZ / 1000);
1566 1490
1567 /* 1491 /*
1568 * XXX very interesting - we're seeing a bounce where the BSY we 1492 * XXX very interesting - we're seeing a bounce where the BSY we
@@ -1616,9 +1540,6 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1616 return -1; 1540 return -1;
1617 } 1541 }
1618 cmd->result = DID_BAD_TARGET << 16; 1542 cmd->result = DID_BAD_TARGET << 16;
1619#ifdef NCR5380_STATS
1620 collect_stats(hostdata, cmd);
1621#endif
1622#ifdef SUPPORT_TAGS 1543#ifdef SUPPORT_TAGS
1623 cmd_free_tag(cmd); 1544 cmd_free_tag(cmd);
1624#endif 1545#endif
@@ -1676,6 +1597,9 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1676#ifndef SUPPORT_TAGS 1597#ifndef SUPPORT_TAGS
1677 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 1598 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
1678#endif 1599#endif
1600#ifdef SUN3_SCSI_VME
1601 dregs->csr |= CSR_INTR;
1602#endif
1679 1603
1680 initialize_SCp(cmd); 1604 initialize_SCp(cmd);
1681 1605
@@ -1826,7 +1750,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1826 * Returns : 0 on success, -1 on failure. 1750 * Returns : 0 on success, -1 on failure.
1827 */ 1751 */
1828 1752
1829static int do_abort(struct Scsi_Host *host) 1753static int do_abort(struct Scsi_Host *instance)
1830{ 1754{
1831 unsigned char tmp, *msgptr, phase; 1755 unsigned char tmp, *msgptr, phase;
1832 int len; 1756 int len;
@@ -1861,7 +1785,7 @@ static int do_abort(struct Scsi_Host *host)
1861 msgptr = &tmp; 1785 msgptr = &tmp;
1862 len = 1; 1786 len = 1;
1863 phase = PHASE_MSGOUT; 1787 phase = PHASE_MSGOUT;
1864 NCR5380_transfer_pio(host, &phase, &len, &msgptr); 1788 NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
1865 1789
1866 /* 1790 /*
1867 * If we got here, and the command completed successfully, 1791 * If we got here, and the command completed successfully,
@@ -1899,17 +1823,62 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1899 SETUP_HOSTDATA(instance); 1823 SETUP_HOSTDATA(instance);
1900 register int c = *count; 1824 register int c = *count;
1901 register unsigned char p = *phase; 1825 register unsigned char p = *phase;
1826 unsigned long flags;
1827
1828#if defined(CONFIG_SUN3)
1829 /* sanity check */
1830 if (!sun3_dma_setup_done) {
1831 pr_err("scsi%d: transfer_dma without setup!\n",
1832 instance->host_no);
1833 BUG();
1834 }
1835 hostdata->dma_len = c;
1836
1837 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1838 instance->host_no, (p & SR_IO) ? "reading" : "writing",
1839 c, (p & SR_IO) ? "to" : "from", *data);
1840
1841 /* netbsd turns off ints here, why not be safe and do it too */
1842 local_irq_save(flags);
1843
1844 /* send start chain */
1845 sun3scsi_dma_start(c, *data);
1846
1847 if (p & SR_IO) {
1848 NCR5380_write(TARGET_COMMAND_REG, 1);
1849 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1850 NCR5380_write(INITIATOR_COMMAND_REG, 0);
1851 NCR5380_write(MODE_REG,
1852 (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1853 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
1854 } else {
1855 NCR5380_write(TARGET_COMMAND_REG, 0);
1856 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1857 NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
1858 NCR5380_write(MODE_REG,
1859 (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1860 NCR5380_write(START_DMA_SEND_REG, 0);
1861 }
1862
1863#ifdef SUN3_SCSI_VME
1864 dregs->csr |= CSR_DMA_ENABLE;
1865#endif
1866
1867 local_irq_restore(flags);
1868
1869 sun3_dma_active = 1;
1870
1871#else /* !defined(CONFIG_SUN3) */
1902 register unsigned char *d = *data; 1872 register unsigned char *d = *data;
1903 unsigned char tmp; 1873 unsigned char tmp;
1904 unsigned long flags;
1905 1874
1906 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { 1875 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
1907 *phase = tmp; 1876 *phase = tmp;
1908 return -1; 1877 return -1;
1909 } 1878 }
1910 1879
1911 if (atari_read_overruns && (p & SR_IO)) 1880 if (hostdata->read_overruns && (p & SR_IO))
1912 c -= atari_read_overruns; 1881 c -= hostdata->read_overruns;
1913 1882
1914 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", 1883 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1915 HOSTNO, (p & SR_IO) ? "reading" : "writing", 1884 HOSTNO, (p & SR_IO) ? "reading" : "writing",
@@ -1921,7 +1890,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1921 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); 1890 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
1922#endif /* def REAL_DMA */ 1891#endif /* def REAL_DMA */
1923 1892
1924 if (IS_A_TT()) { 1893 if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
1925 /* On the Medusa, it is a must to initialize the DMA before 1894 /* On the Medusa, it is a must to initialize the DMA before
1926 * starting the NCR. This is also the cleaner way for the TT. 1895 * starting the NCR. This is also the cleaner way for the TT.
1927 */ 1896 */
@@ -1939,7 +1908,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1939 NCR5380_write(START_DMA_SEND_REG, 0); 1908 NCR5380_write(START_DMA_SEND_REG, 0);
1940 } 1909 }
1941 1910
1942 if (!IS_A_TT()) { 1911 if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
1943 /* On the Falcon, the DMA setup must be done after the last */ 1912 /* On the Falcon, the DMA setup must be done after the last */
1944 /* NCR access, else the DMA setup gets trashed! 1913 /* NCR access, else the DMA setup gets trashed!
1945 */ 1914 */
@@ -1949,6 +1918,8 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1949 NCR5380_dma_write_setup(instance, d, c); 1918 NCR5380_dma_write_setup(instance, d, c);
1950 local_irq_restore(flags); 1919 local_irq_restore(flags);
1951 } 1920 }
1921#endif /* !defined(CONFIG_SUN3) */
1922
1952 return 0; 1923 return 0;
1953} 1924}
1954#endif /* defined(REAL_DMA) */ 1925#endif /* defined(REAL_DMA) */
@@ -1982,7 +1953,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1982#endif 1953#endif
1983 unsigned char *data; 1954 unsigned char *data;
1984 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 1955 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
1985 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; 1956 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
1957
1958#ifdef SUN3_SCSI_VME
1959 dregs->csr |= CSR_INTR;
1960#endif
1986 1961
1987 while (1) { 1962 while (1) {
1988 tmp = NCR5380_read(STATUS_REG); 1963 tmp = NCR5380_read(STATUS_REG);
@@ -1993,6 +1968,33 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1993 old_phase = phase; 1968 old_phase = phase;
1994 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); 1969 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1995 } 1970 }
1971#if defined(CONFIG_SUN3)
1972 if (phase == PHASE_CMDOUT) {
1973#if defined(REAL_DMA)
1974 void *d;
1975 unsigned long count;
1976
1977 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1978 count = cmd->SCp.buffer->length;
1979 d = sg_virt(cmd->SCp.buffer);
1980 } else {
1981 count = cmd->SCp.this_residual;
1982 d = cmd->SCp.ptr;
1983 }
1984 /* this command setup for dma yet? */
1985 if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) {
1986 if (cmd->request->cmd_type == REQ_TYPE_FS) {
1987 sun3scsi_dma_setup(d, count,
1988 rq_data_dir(cmd->request));
1989 sun3_dma_setup_done = cmd;
1990 }
1991 }
1992#endif
1993#ifdef SUN3_SCSI_VME
1994 dregs->csr |= CSR_INTR;
1995#endif
1996 }
1997#endif /* CONFIG_SUN3 */
1996 1998
1997 if (sink && (phase != PHASE_MSGOUT)) { 1999 if (sink && (phase != PHASE_MSGOUT)) {
1998 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 2000 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
@@ -2054,8 +2056,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2054 */ 2056 */
2055 2057
2056#if defined(REAL_DMA) 2058#if defined(REAL_DMA)
2057 if (!cmd->device->borken && 2059 if (
2058 (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { 2060#if !defined(CONFIG_SUN3)
2061 !cmd->device->borken &&
2062#endif
2063 (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) {
2059 len = transfersize; 2064 len = transfersize;
2060 cmd->SCp.phase = phase; 2065 cmd->SCp.phase = phase;
2061 if (NCR5380_transfer_dma(instance, &phase, 2066 if (NCR5380_transfer_dma(instance, &phase,
@@ -2064,9 +2069,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2064 * If the watchdog timer fires, all future 2069 * If the watchdog timer fires, all future
2065 * accesses to this device will use the 2070 * accesses to this device will use the
2066 * polled-IO. */ 2071 * polled-IO. */
2067 printk(KERN_NOTICE "scsi%d: switching target %d " 2072 scmd_printk(KERN_INFO, cmd,
2068 "lun %llu to slow handshake\n", HOSTNO, 2073 "switching to slow handshake\n");
2069 cmd->device->id, cmd->device->lun);
2070 cmd->device->borken = 1; 2074 cmd->device->borken = 1;
2071 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 2075 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2072 ICR_ASSERT_ATN); 2076 ICR_ASSERT_ATN);
@@ -2092,6 +2096,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2092 NCR5380_transfer_pio(instance, &phase, 2096 NCR5380_transfer_pio(instance, &phase,
2093 (int *)&cmd->SCp.this_residual, 2097 (int *)&cmd->SCp.this_residual,
2094 (unsigned char **)&cmd->SCp.ptr); 2098 (unsigned char **)&cmd->SCp.ptr);
2099#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2100 /* if we had intended to dma that command clear it */
2101 if (sun3_dma_setup_done == cmd)
2102 sun3_dma_setup_done = NULL;
2103#endif
2095 break; 2104 break;
2096 case PHASE_MSGIN: 2105 case PHASE_MSGIN:
2097 len = 1; 2106 len = 1;
@@ -2145,9 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2145 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " 2154 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
2146 "done, calling scsi_done().\n", 2155 "done, calling scsi_done().\n",
2147 HOSTNO, cmd->device->id, cmd->device->lun); 2156 HOSTNO, cmd->device->id, cmd->device->lun);
2148#ifdef NCR5380_STATS
2149 collect_stats(hostdata, cmd);
2150#endif
2151 cmd->scsi_done(cmd); 2157 cmd->scsi_done(cmd);
2152 cmd = hostdata->connected; 2158 cmd = hostdata->connected;
2153 break; 2159 break;
@@ -2156,11 +2162,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2156 case COMMAND_COMPLETE: 2162 case COMMAND_COMPLETE:
2157 /* Accept message by clearing ACK */ 2163 /* Accept message by clearing ACK */
2158 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2164 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2159 /* ++guenther: possible race with Falcon locking */
2160 falcon_dont_release++;
2161 hostdata->connected = NULL;
2162 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " 2165 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
2163 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); 2166 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2167
2168 local_irq_save(flags);
2169 hostdata->retain_dma_intr++;
2170 hostdata->connected = NULL;
2164#ifdef SUPPORT_TAGS 2171#ifdef SUPPORT_TAGS
2165 cmd_free_tag(cmd); 2172 cmd_free_tag(cmd);
2166 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { 2173 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
@@ -2172,7 +2179,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2172 */ 2179 */
2173 /* ++Andreas: the mid level code knows about 2180 /* ++Andreas: the mid level code knows about
2174 QUEUE_FULL now. */ 2181 QUEUE_FULL now. */
2175 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 2182 struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun];
2176 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " 2183 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
2177 "QUEUE_FULL after %d commands\n", 2184 "QUEUE_FULL after %d commands\n",
2178 HOSTNO, cmd->device->id, cmd->device->lun, 2185 HOSTNO, cmd->device->id, cmd->device->lun,
@@ -2207,7 +2214,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2207 else if (status_byte(cmd->SCp.Status) != GOOD) 2214 else if (status_byte(cmd->SCp.Status) != GOOD)
2208 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); 2215 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2209 2216
2210#ifdef AUTOSENSE
2211 if ((cmd->cmnd[0] == REQUEST_SENSE) && 2217 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2212 hostdata->ses.cmd_len) { 2218 hostdata->ses.cmd_len) {
2213 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 2219 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -2220,22 +2226,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2220 2226
2221 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); 2227 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
2222 2228
2223 local_irq_save(flags);
2224 LIST(cmd,hostdata->issue_queue); 2229 LIST(cmd,hostdata->issue_queue);
2225 SET_NEXT(cmd, hostdata->issue_queue); 2230 SET_NEXT(cmd, hostdata->issue_queue);
2226 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2231 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2227 local_irq_restore(flags);
2228 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " 2232 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2229 "issue queue\n", H_NO(cmd)); 2233 "issue queue\n", H_NO(cmd));
2230 } else 2234 } else {
2231#endif /* def AUTOSENSE */
2232 {
2233#ifdef NCR5380_STATS
2234 collect_stats(hostdata, cmd);
2235#endif
2236 cmd->scsi_done(cmd); 2235 cmd->scsi_done(cmd);
2237 } 2236 }
2238 2237
2238 local_irq_restore(flags);
2239
2239 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2240 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2240 /* 2241 /*
2241 * Restore phase bits to 0 so an interrupted selection, 2242 * Restore phase bits to 0 so an interrupted selection,
@@ -2246,12 +2247,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2246 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2247 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2247 barrier(); 2248 barrier();
2248 2249
2249 falcon_dont_release--; 2250 local_irq_save(flags);
2251 hostdata->retain_dma_intr--;
2250 /* ++roman: For Falcon SCSI, release the lock on the 2252 /* ++roman: For Falcon SCSI, release the lock on the
2251 * ST-DMA here if no other commands are waiting on the 2253 * ST-DMA here if no other commands are waiting on the
2252 * disconnected queue. 2254 * disconnected queue.
2253 */ 2255 */
2254 falcon_release_lock_if_possible(hostdata); 2256 maybe_release_dma_irq(instance);
2257 local_irq_restore(flags);
2255 return; 2258 return;
2256 case MESSAGE_REJECT: 2259 case MESSAGE_REJECT:
2257 /* Accept message by clearing ACK */ 2260 /* Accept message by clearing ACK */
@@ -2303,6 +2306,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2303 /* Wait for bus free to avoid nasty timeouts */ 2306 /* Wait for bus free to avoid nasty timeouts */
2304 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2307 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2305 barrier(); 2308 barrier();
2309#ifdef SUN3_SCSI_VME
2310 dregs->csr |= CSR_DMA_ENABLE;
2311#endif
2306 return; 2312 return;
2307 /* 2313 /*
2308 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect 2314 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
@@ -2384,20 +2390,18 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2384 */ 2390 */
2385 default: 2391 default:
2386 if (!tmp) { 2392 if (!tmp) {
2387 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); 2393 printk(KERN_INFO "scsi%d: rejecting message ",
2394 instance->host_no);
2388 spi_print_msg(extended_msg); 2395 spi_print_msg(extended_msg);
2389 printk("\n"); 2396 printk("\n");
2390 } else if (tmp != EXTENDED_MESSAGE) 2397 } else if (tmp != EXTENDED_MESSAGE)
2391 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2398 scmd_printk(KERN_INFO, cmd,
2392 "message %02x from target %d, lun %llu\n", 2399 "rejecting unknown message %02x\n",
2393 HOSTNO, tmp, cmd->device->id, cmd->device->lun); 2400 tmp);
2394 else 2401 else
2395 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2402 scmd_printk(KERN_INFO, cmd,
2396 "extended message " 2403 "rejecting unknown extended message code %02x, length %d\n",
2397 "code %02x, length %d from target %d, lun %llu\n", 2404 extended_msg[1], extended_msg[0]);
2398 HOSTNO, extended_msg[1], extended_msg[0],
2399 cmd->device->id, cmd->device->lun);
2400
2401 2405
2402 msgout = MESSAGE_REJECT; 2406 msgout = MESSAGE_REJECT;
2403 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 2407 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -2410,6 +2414,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2410 hostdata->last_message = msgout; 2414 hostdata->last_message = msgout;
2411 NCR5380_transfer_pio(instance, &phase, &len, &data); 2415 NCR5380_transfer_pio(instance, &phase, &len, &data);
2412 if (msgout == ABORT) { 2416 if (msgout == ABORT) {
2417 local_irq_save(flags);
2413#ifdef SUPPORT_TAGS 2418#ifdef SUPPORT_TAGS
2414 cmd_free_tag(cmd); 2419 cmd_free_tag(cmd);
2415#else 2420#else
@@ -2417,12 +2422,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2417#endif 2422#endif
2418 hostdata->connected = NULL; 2423 hostdata->connected = NULL;
2419 cmd->result = DID_ERROR << 16; 2424 cmd->result = DID_ERROR << 16;
2420#ifdef NCR5380_STATS
2421 collect_stats(hostdata, cmd);
2422#endif
2423 cmd->scsi_done(cmd);
2424 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2425 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2425 falcon_release_lock_if_possible(hostdata); 2426 maybe_release_dma_irq(instance);
2427 local_irq_restore(flags);
2428 cmd->scsi_done(cmd);
2426 return; 2429 return;
2427 } 2430 }
2428 msgout = NOP; 2431 msgout = NOP;
@@ -2455,7 +2458,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2455 * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2458 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2456 * 2459 *
2457 * Purpose : does reselection, initializing the instance->connected 2460 * Purpose : does reselection, initializing the instance->connected
2458 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q 2461 * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
2459 * nexus has been reestablished, 2462 * nexus has been reestablished,
2460 * 2463 *
2461 * Inputs : instance - this instance of the NCR5380. 2464 * Inputs : instance - this instance of the NCR5380.
@@ -2463,19 +2466,21 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2463 */ 2466 */
2464 2467
2465 2468
2469/* it might eventually prove necessary to do a dma setup on
2470 reselection, but it doesn't seem to be needed now -- sam */
2471
2466static void NCR5380_reselect(struct Scsi_Host *instance) 2472static void NCR5380_reselect(struct Scsi_Host *instance)
2467{ 2473{
2468 SETUP_HOSTDATA(instance); 2474 SETUP_HOSTDATA(instance);
2469 unsigned char target_mask; 2475 unsigned char target_mask;
2470 unsigned char lun, phase; 2476 unsigned char lun;
2471 int len;
2472#ifdef SUPPORT_TAGS 2477#ifdef SUPPORT_TAGS
2473 unsigned char tag; 2478 unsigned char tag;
2474#endif 2479#endif
2475 unsigned char msg[3]; 2480 unsigned char msg[3];
2476 unsigned char *data; 2481 int __maybe_unused len;
2477 Scsi_Cmnd *tmp = NULL, *prev; 2482 unsigned char __maybe_unused *data, __maybe_unused phase;
2478/* unsigned long flags; */ 2483 struct scsi_cmnd *tmp = NULL, *prev;
2479 2484
2480 /* 2485 /*
2481 * Disable arbitration, etc. since the host adapter obviously 2486 * Disable arbitration, etc. since the host adapter obviously
@@ -2511,10 +2516,18 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2511 while (!(NCR5380_read(STATUS_REG) & SR_REQ)) 2516 while (!(NCR5380_read(STATUS_REG) & SR_REQ))
2512 ; 2517 ;
2513 2518
2519#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2520 /* acknowledge toggle to MSGIN */
2521 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
2522
2523 /* peek at the byte without really hitting the bus */
2524 msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
2525#else
2514 len = 1; 2526 len = 1;
2515 data = msg; 2527 data = msg;
2516 phase = PHASE_MSGIN; 2528 phase = PHASE_MSGIN;
2517 NCR5380_transfer_pio(instance, &phase, &len, &data); 2529 NCR5380_transfer_pio(instance, &phase, &len, &data);
2530#endif
2518 2531
2519 if (!(msg[0] & 0x80)) { 2532 if (!(msg[0] & 0x80)) {
2520 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); 2533 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
@@ -2524,13 +2537,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2524 } 2537 }
2525 lun = (msg[0] & 0x07); 2538 lun = (msg[0] & 0x07);
2526 2539
2527#ifdef SUPPORT_TAGS 2540#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3)
2528 /* If the phase is still MSGIN, the target wants to send some more 2541 /* If the phase is still MSGIN, the target wants to send some more
2529 * messages. In case it supports tagged queuing, this is probably a 2542 * messages. In case it supports tagged queuing, this is probably a
2530 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. 2543 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2531 */ 2544 */
2532 tag = TAG_NONE; 2545 tag = TAG_NONE;
2533 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { 2546 if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) {
2534 /* Accept previous IDENTIFY message by clearing ACK */ 2547 /* Accept previous IDENTIFY message by clearing ACK */
2535 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2548 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2536 len = 2; 2549 len = 2;
@@ -2548,15 +2561,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2548 * just reestablished, and remove it from the disconnected queue. 2561 * just reestablished, and remove it from the disconnected queue.
2549 */ 2562 */
2550 2563
2551 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; 2564 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL;
2552 tmp; prev = tmp, tmp = NEXT(tmp)) { 2565 tmp; prev = tmp, tmp = NEXT(tmp)) {
2553 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) 2566 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
2554#ifdef SUPPORT_TAGS 2567#ifdef SUPPORT_TAGS
2555 && (tag == tmp->tag) 2568 && (tag == tmp->tag)
2556#endif 2569#endif
2557 ) { 2570 ) {
2558 /* ++guenther: prevent race with falcon_release_lock */
2559 falcon_dont_release++;
2560 if (prev) { 2571 if (prev) {
2561 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 2572 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2562 SET_NEXT(prev, NEXT(tmp)); 2573 SET_NEXT(prev, NEXT(tmp));
@@ -2588,26 +2599,67 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2588 return; 2599 return;
2589 } 2600 }
2590 2601
2602#if defined(CONFIG_SUN3) && defined(REAL_DMA)
2603 /* engage dma setup for the command we just saw */
2604 {
2605 void *d;
2606 unsigned long count;
2607
2608 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
2609 count = tmp->SCp.buffer->length;
2610 d = sg_virt(tmp->SCp.buffer);
2611 } else {
2612 count = tmp->SCp.this_residual;
2613 d = tmp->SCp.ptr;
2614 }
2615 /* setup this command for dma if not already */
2616 if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) {
2617 sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
2618 sun3_dma_setup_done = tmp;
2619 }
2620 }
2621
2622 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
2623#endif
2624
2591 /* Accept message by clearing ACK */ 2625 /* Accept message by clearing ACK */
2592 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2626 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2593 2627
2628#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3)
2629 /* If the phase is still MSGIN, the target wants to send some more
2630 * messages. In case it supports tagged queuing, this is probably a
2631 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2632 */
2633 tag = TAG_NONE;
2634 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
2635 /* Accept previous IDENTIFY message by clearing ACK */
2636 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2637 len = 2;
2638 data = msg + 1;
2639 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2640 msg[1] == SIMPLE_QUEUE_TAG)
2641 tag = msg[2];
2642 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n"
2643 HOSTNO, target_mask, lun, tag);
2644 }
2645#endif
2646
2594 hostdata->connected = tmp; 2647 hostdata->connected = tmp;
2595 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", 2648 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
2596 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); 2649 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2597 falcon_dont_release--;
2598} 2650}
2599 2651
2600 2652
2601/* 2653/*
2602 * Function : int NCR5380_abort (Scsi_Cmnd *cmd) 2654 * Function : int NCR5380_abort (struct scsi_cmnd *cmd)
2603 * 2655 *
2604 * Purpose : abort a command 2656 * Purpose : abort a command
2605 * 2657 *
2606 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 2658 * Inputs : cmd - the scsi_cmnd to abort, code - code to set the
2607 * host byte of the result field to, if zero DID_ABORTED is 2659 * host byte of the result field to, if zero DID_ABORTED is
2608 * used. 2660 * used.
2609 * 2661 *
2610 * Returns : 0 - success, -1 on failure. 2662 * Returns : SUCCESS - success, FAILED on failure.
2611 * 2663 *
2612 * XXX - there is no way to abort the command that is currently 2664 * XXX - there is no way to abort the command that is currently
2613 * connected, you have to wait for it to complete. If this is 2665 * connected, you have to wait for it to complete. If this is
@@ -2616,24 +2668,19 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2616 */ 2668 */
2617 2669
2618static 2670static
2619int NCR5380_abort(Scsi_Cmnd *cmd) 2671int NCR5380_abort(struct scsi_cmnd *cmd)
2620{ 2672{
2621 struct Scsi_Host *instance = cmd->device->host; 2673 struct Scsi_Host *instance = cmd->device->host;
2622 SETUP_HOSTDATA(instance); 2674 SETUP_HOSTDATA(instance);
2623 Scsi_Cmnd *tmp, **prev; 2675 struct scsi_cmnd *tmp, **prev;
2624 unsigned long flags; 2676 unsigned long flags;
2625 2677
2626 printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); 2678 scmd_printk(KERN_NOTICE, cmd, "aborting command\n");
2627 scsi_print_command(cmd);
2628 2679
2629 NCR5380_print_status(instance); 2680 NCR5380_print_status(instance);
2630 2681
2631 local_irq_save(flags); 2682 local_irq_save(flags);
2632 2683
2633 if (!IS_A_TT() && !falcon_got_lock)
2634 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
2635 HOSTNO);
2636
2637 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, 2684 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2638 NCR5380_read(BUS_AND_STATUS_REG), 2685 NCR5380_read(BUS_AND_STATUS_REG),
2639 NCR5380_read(STATUS_REG)); 2686 NCR5380_read(STATUS_REG));
@@ -2674,12 +2721,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2674#else 2721#else
2675 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2722 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2676#endif 2723#endif
2724 maybe_release_dma_irq(instance);
2677 local_irq_restore(flags); 2725 local_irq_restore(flags);
2678 cmd->scsi_done(cmd); 2726 cmd->scsi_done(cmd);
2679 falcon_release_lock_if_possible(hostdata);
2680 return SUCCESS; 2727 return SUCCESS;
2681 } else { 2728 } else {
2682/* local_irq_restore(flags); */ 2729 local_irq_restore(flags);
2683 printk("scsi%d: abort of connected command failed!\n", HOSTNO); 2730 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2684 return FAILED; 2731 return FAILED;
2685 } 2732 }
@@ -2690,21 +2737,21 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2690 * Case 2 : If the command hasn't been issued yet, we simply remove it 2737 * Case 2 : If the command hasn't been issued yet, we simply remove it
2691 * from the issue queue. 2738 * from the issue queue.
2692 */ 2739 */
2693 for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue), 2740 for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue),
2694 tmp = (Scsi_Cmnd *)hostdata->issue_queue; 2741 tmp = (struct scsi_cmnd *)hostdata->issue_queue;
2695 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { 2742 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2696 if (cmd == tmp) { 2743 if (cmd == tmp) {
2697 REMOVE(5, *prev, tmp, NEXT(tmp)); 2744 REMOVE(5, *prev, tmp, NEXT(tmp));
2698 (*prev) = NEXT(tmp); 2745 (*prev) = NEXT(tmp);
2699 SET_NEXT(tmp, NULL); 2746 SET_NEXT(tmp, NULL);
2700 tmp->result = DID_ABORT << 16; 2747 tmp->result = DID_ABORT << 16;
2748 maybe_release_dma_irq(instance);
2701 local_irq_restore(flags); 2749 local_irq_restore(flags);
2702 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", 2750 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2703 HOSTNO); 2751 HOSTNO);
2704 /* Tagged queuing note: no tag to free here, hasn't been assigned 2752 /* Tagged queuing note: no tag to free here, hasn't been assigned
2705 * yet... */ 2753 * yet... */
2706 tmp->scsi_done(tmp); 2754 tmp->scsi_done(tmp);
2707 falcon_release_lock_if_possible(hostdata);
2708 return SUCCESS; 2755 return SUCCESS;
2709 } 2756 }
2710 } 2757 }
@@ -2751,13 +2798,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2751 * it from the disconnected queue. 2798 * it from the disconnected queue.
2752 */ 2799 */
2753 2800
2754 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; 2801 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp;
2755 tmp = NEXT(tmp)) { 2802 tmp = NEXT(tmp)) {
2756 if (cmd == tmp) { 2803 if (cmd == tmp) {
2757 local_irq_restore(flags); 2804 local_irq_restore(flags);
2758 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); 2805 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2759 2806
2760 if (NCR5380_select(instance, cmd, (int)cmd->tag)) 2807 if (NCR5380_select(instance, cmd))
2761 return FAILED; 2808 return FAILED;
2762 2809
2763 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); 2810 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
@@ -2765,8 +2812,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2765 do_abort(instance); 2812 do_abort(instance);
2766 2813
2767 local_irq_save(flags); 2814 local_irq_save(flags);
2768 for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue), 2815 for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue),
2769 tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; 2816 tmp = (struct scsi_cmnd *)hostdata->disconnected_queue;
2770 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { 2817 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2771 if (cmd == tmp) { 2818 if (cmd == tmp) {
2772 REMOVE(5, *prev, tmp, NEXT(tmp)); 2819 REMOVE(5, *prev, tmp, NEXT(tmp));
@@ -2782,15 +2829,22 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2782#else 2829#else
2783 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2830 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2784#endif 2831#endif
2832 maybe_release_dma_irq(instance);
2785 local_irq_restore(flags); 2833 local_irq_restore(flags);
2786 tmp->scsi_done(tmp); 2834 tmp->scsi_done(tmp);
2787 falcon_release_lock_if_possible(hostdata);
2788 return SUCCESS; 2835 return SUCCESS;
2789 } 2836 }
2790 } 2837 }
2791 } 2838 }
2792 } 2839 }
2793 2840
2841 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2842 * possible at all) At least, we should check if the lock could be
2843 * released after the abort, in case it is kept due to some bug.
2844 */
2845 maybe_release_dma_irq(instance);
2846 local_irq_restore(flags);
2847
2794 /* 2848 /*
2795 * Case 5 : If we reached this point, the command was not found in any of 2849 * Case 5 : If we reached this point, the command was not found in any of
2796 * the queues. 2850 * the queues.
@@ -2801,21 +2855,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2801 * broke. 2855 * broke.
2802 */ 2856 */
2803 2857
2804 local_irq_restore(flags);
2805 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); 2858 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2806 2859
2807 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2808 * possible at all) At least, we should check if the lock could be
2809 * released after the abort, in case it is kept due to some bug.
2810 */
2811 falcon_release_lock_if_possible(hostdata);
2812
2813 return FAILED; 2860 return FAILED;
2814} 2861}
2815 2862
2816 2863
2817/* 2864/*
2818 * Function : int NCR5380_reset (Scsi_Cmnd *cmd) 2865 * Function : int NCR5380_reset (struct scsi_cmnd *cmd)
2819 * 2866 *
2820 * Purpose : reset the SCSI bus. 2867 * Purpose : reset the SCSI bus.
2821 * 2868 *
@@ -2823,20 +2870,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2823 * 2870 *
2824 */ 2871 */
2825 2872
2826static int NCR5380_bus_reset(Scsi_Cmnd *cmd) 2873static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2827{ 2874{
2828 SETUP_HOSTDATA(cmd->device->host); 2875 struct Scsi_Host *instance = cmd->device->host;
2876 struct NCR5380_hostdata *hostdata = shost_priv(instance);
2829 int i; 2877 int i;
2830 unsigned long flags; 2878 unsigned long flags;
2831#if defined(RESET_RUN_DONE)
2832 Scsi_Cmnd *connected, *disconnected_queue;
2833#endif
2834
2835 if (!IS_A_TT() && !falcon_got_lock)
2836 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n",
2837 H_NO(cmd));
2838 2879
2839 NCR5380_print_status(cmd->device->host); 2880 NCR5380_print_status(instance);
2840 2881
2841 /* get in phase */ 2882 /* get in phase */
2842 NCR5380_write(TARGET_COMMAND_REG, 2883 NCR5380_write(TARGET_COMMAND_REG,
@@ -2853,89 +2894,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2853 * through anymore ... */ 2894 * through anymore ... */
2854 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 2895 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
2855 2896
2856 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2857 * should go.
2858 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2859 * not be reset by atari_scsi_reset()!
2860 */
2861
2862#if defined(RESET_RUN_DONE)
2863 /* XXX Should now be done by midlevel code, but it's broken XXX */
2864 /* XXX see below XXX */
2865
2866 /* MSch: old-style reset: actually abort all command processing here */
2867
2868 /* After the reset, there are no more connected or disconnected commands
2869 * and no busy units; to avoid problems with re-inserting the commands
2870 * into the issue_queue (via scsi_done()), the aborted commands are
2871 * remembered in local variables first.
2872 */
2873 local_irq_save(flags);
2874 connected = (Scsi_Cmnd *)hostdata->connected;
2875 hostdata->connected = NULL;
2876 disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue;
2877 hostdata->disconnected_queue = NULL;
2878#ifdef SUPPORT_TAGS
2879 free_all_tags();
2880#endif
2881 for (i = 0; i < 8; ++i)
2882 hostdata->busy[i] = 0;
2883#ifdef REAL_DMA
2884 hostdata->dma_len = 0;
2885#endif
2886 local_irq_restore(flags);
2887
2888 /* In order to tell the mid-level code which commands were aborted,
2889 * set the command status to DID_RESET and call scsi_done() !!!
2890 * This ultimately aborts processing of these commands in the mid-level.
2891 */
2892
2893 if ((cmd = connected)) {
2894 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2895 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2896 cmd->scsi_done(cmd);
2897 }
2898
2899 for (i = 0; (cmd = disconnected_queue); ++i) {
2900 disconnected_queue = NEXT(cmd);
2901 SET_NEXT(cmd, NULL);
2902 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2903 cmd->scsi_done(cmd);
2904 }
2905 if (i > 0)
2906 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2907
2908 /* The Falcon lock should be released after a reset...
2909 */
2910 /* ++guenther: moved to atari_scsi_reset(), to prevent a race between
2911 * unlocking and enabling dma interrupt.
2912 */
2913/* falcon_release_lock_if_possible( hostdata );*/
2914
2915 /* since all commands have been explicitly terminated, we need to tell
2916 * the midlevel code that the reset was SUCCESSFUL, and there is no
2917 * need to 'wake up' the commands by a request_sense
2918 */
2919 return SUCCESS;
2920#else /* 1 */
2921
2922 /* MSch: new-style reset handling: let the mid-level do what it can */
2923
2924 /* ++guenther: MID-LEVEL IS STILL BROKEN.
2925 * Mid-level is supposed to requeue all commands that were active on the
2926 * various low-level queues. In fact it does this, but that's not enough
2927 * because all these commands are subject to timeout. And if a timeout
2928 * happens for any removed command, *_abort() is called but all queues
2929 * are now empty. Abort then gives up the falcon lock, which is fatal,
2930 * since the mid-level will queue more commands and must have the lock
2931 * (it's all happening inside timer interrupt handler!!).
2932 * Even worse, abort will return NOT_RUNNING for all those commands not
2933 * on any queue, so they won't be retried ...
2934 *
2935 * Conclusion: either scsi.c disables timeout for all resetted commands
2936 * immediately, or we lose! As of linux-2.0.20 it doesn't.
2937 */
2938
2939 /* After the reset, there are no more connected or disconnected commands 2897 /* After the reset, there are no more connected or disconnected commands
2940 * and no busy units; so clear the low-level status here to avoid 2898 * and no busy units; so clear the low-level status here to avoid
2941 * conflicts when the mid-level code tries to wake up the affected 2899 * conflicts when the mid-level code tries to wake up the affected
@@ -2954,16 +2912,16 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2954 hostdata->connected = NULL; 2912 hostdata->connected = NULL;
2955 hostdata->disconnected_queue = NULL; 2913 hostdata->disconnected_queue = NULL;
2956#ifdef SUPPORT_TAGS 2914#ifdef SUPPORT_TAGS
2957 free_all_tags(); 2915 free_all_tags(hostdata);
2958#endif 2916#endif
2959 for (i = 0; i < 8; ++i) 2917 for (i = 0; i < 8; ++i)
2960 hostdata->busy[i] = 0; 2918 hostdata->busy[i] = 0;
2961#ifdef REAL_DMA 2919#ifdef REAL_DMA
2962 hostdata->dma_len = 0; 2920 hostdata->dma_len = 0;
2963#endif 2921#endif
2922
2923 maybe_release_dma_irq(instance);
2964 local_irq_restore(flags); 2924 local_irq_restore(flags);
2965 2925
2966 /* we did no complete reset of all commands, so a wakeup is required */
2967 return SUCCESS; 2926 return SUCCESS;
2968#endif /* 1 */
2969} 2927}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index b522134528d6..d1c37a386947 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -64,45 +64,57 @@
64/**************************************************************************/ 64/**************************************************************************/
65 65
66 66
67
68#include <linux/module.h> 67#include <linux/module.h>
69
70#define AUTOSENSE
71/* For the Atari version, use only polled IO or REAL_DMA */
72#define REAL_DMA
73/* Support tagged queuing? (on devices that are able to... :-) */
74#define SUPPORT_TAGS
75#define MAX_TAGS 32
76
77#include <linux/types.h> 68#include <linux/types.h>
78#include <linux/stddef.h>
79#include <linux/ctype.h>
80#include <linux/delay.h> 69#include <linux/delay.h>
81#include <linux/mm.h>
82#include <linux/blkdev.h> 70#include <linux/blkdev.h>
83#include <linux/interrupt.h> 71#include <linux/interrupt.h>
84#include <linux/init.h> 72#include <linux/init.h>
85#include <linux/nvram.h> 73#include <linux/nvram.h>
86#include <linux/bitops.h> 74#include <linux/bitops.h>
87#include <linux/wait.h> 75#include <linux/wait.h>
76#include <linux/platform_device.h>
88 77
89#include <asm/setup.h> 78#include <asm/setup.h>
90#include <asm/atarihw.h> 79#include <asm/atarihw.h>
91#include <asm/atariints.h> 80#include <asm/atariints.h>
92#include <asm/page.h>
93#include <asm/pgtable.h>
94#include <asm/irq.h>
95#include <asm/traps.h>
96
97#include "scsi.h"
98#include <scsi/scsi_host.h>
99#include "atari_scsi.h"
100#include "NCR5380.h"
101#include <asm/atari_stdma.h> 81#include <asm/atari_stdma.h>
102#include <asm/atari_stram.h> 82#include <asm/atari_stram.h>
103#include <asm/io.h> 83#include <asm/io.h>
104 84
105#include <linux/stat.h> 85#include <scsi/scsi_host.h>
86
87/* Definitions for the core NCR5380 driver. */
88
89#define REAL_DMA
90#define SUPPORT_TAGS
91#define MAX_TAGS 32
92#define DMA_MIN_SIZE 32
93
94#define NCR5380_implementation_fields /* none */
95
96#define NCR5380_read(reg) atari_scsi_reg_read(reg)
97#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value)
98
99#define NCR5380_queue_command atari_scsi_queue_command
100#define NCR5380_abort atari_scsi_abort
101#define NCR5380_show_info atari_scsi_show_info
102#define NCR5380_info atari_scsi_info
103
104#define NCR5380_dma_read_setup(instance, data, count) \
105 atari_scsi_dma_setup(instance, data, count, 0)
106#define NCR5380_dma_write_setup(instance, data, count) \
107 atari_scsi_dma_setup(instance, data, count, 1)
108#define NCR5380_dma_residual(instance) \
109 atari_scsi_dma_residual(instance)
110#define NCR5380_dma_xfer_len(instance, cmd, phase) \
111 atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
112
113#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance)
114#define NCR5380_release_dma_irq(instance) falcon_release_lock()
115
116#include "NCR5380.h"
117
106 118
107#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) 119#define IS_A_TT() ATARIHW_PRESENT(TT_SCSI)
108 120
@@ -149,23 +161,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
149 return adr; 161 return adr;
150} 162}
151 163
152static inline void ENABLE_IRQ(void)
153{
154 if (IS_A_TT())
155 atari_enable_irq(IRQ_TT_MFP_SCSI);
156 else
157 atari_enable_irq(IRQ_MFP_FSCSI);
158}
159
160static inline void DISABLE_IRQ(void)
161{
162 if (IS_A_TT())
163 atari_disable_irq(IRQ_TT_MFP_SCSI);
164 else
165 atari_disable_irq(IRQ_MFP_FSCSI);
166}
167
168
169#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ 164#define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \
170 (atari_scsi_host->hostdata))->dma_len) 165 (atari_scsi_host->hostdata))->dma_len)
171 166
@@ -178,30 +173,9 @@ static inline void DISABLE_IRQ(void)
178#define AFTER_RESET_DELAY (5*HZ/2) 173#define AFTER_RESET_DELAY (5*HZ/2)
179#endif 174#endif
180 175
181/***************************** Prototypes *****************************/
182
183#ifdef REAL_DMA 176#ifdef REAL_DMA
184static int scsi_dma_is_ignored_buserr(unsigned char dma_stat);
185static void atari_scsi_fetch_restbytes(void); 177static void atari_scsi_fetch_restbytes(void);
186static long atari_scsi_dma_residual(struct Scsi_Host *instance);
187static int falcon_classify_cmd(Scsi_Cmnd *cmd);
188static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
189 Scsi_Cmnd *cmd, int write_flag);
190#endif
191static irqreturn_t scsi_tt_intr(int irq, void *dummy);
192static irqreturn_t scsi_falcon_intr(int irq, void *dummy);
193static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata);
194static void falcon_get_lock(void);
195#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
196static void atari_scsi_reset_boot(void);
197#endif 178#endif
198static unsigned char atari_scsi_tt_reg_read(unsigned char reg);
199static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value);
200static unsigned char atari_scsi_falcon_reg_read(unsigned char reg);
201static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value);
202
203/************************* End of Prototypes **************************/
204
205 179
206static struct Scsi_Host *atari_scsi_host; 180static struct Scsi_Host *atari_scsi_host;
207static unsigned char (*atari_scsi_reg_read)(unsigned char reg); 181static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
@@ -226,8 +200,6 @@ static char *atari_dma_orig_addr;
226/* mask for address bits that can't be used with the ST-DMA */ 200/* mask for address bits that can't be used with the ST-DMA */
227static unsigned long atari_dma_stram_mask; 201static unsigned long atari_dma_stram_mask;
228#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) 202#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0)
229/* number of bytes to cut from a transfer to handle NCR overruns */
230static int atari_read_overruns;
231#endif 203#endif
232 204
233static int setup_can_queue = -1; 205static int setup_can_queue = -1;
@@ -386,10 +358,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
386 358
387 NCR5380_intr(irq, dummy); 359 NCR5380_intr(irq, dummy);
388 360
389#if 0
390 /* To be sure the int is not masked */
391 atari_enable_irq(IRQ_TT_MFP_SCSI);
392#endif
393 return IRQ_HANDLED; 361 return IRQ_HANDLED;
394} 362}
395 363
@@ -480,257 +448,35 @@ static void atari_scsi_fetch_restbytes(void)
480#endif /* REAL_DMA */ 448#endif /* REAL_DMA */
481 449
482 450
483static int falcon_got_lock = 0;
484static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait);
485static int falcon_trying_lock = 0;
486static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait);
487static int falcon_dont_release = 0;
488
489/* This function releases the lock on the DMA chip if there is no 451/* This function releases the lock on the DMA chip if there is no
490 * connected command and the disconnected queue is empty. On 452 * connected command and the disconnected queue is empty.
491 * releasing, instances of falcon_get_lock are awoken, that put
492 * themselves to sleep for fairness. They can now try to get the lock
493 * again (but others waiting longer more probably will win).
494 */ 453 */
495 454
496static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) 455static void falcon_release_lock(void)
497{ 456{
498 unsigned long flags;
499
500 if (IS_A_TT()) 457 if (IS_A_TT())
501 return; 458 return;
502 459
503 local_irq_save(flags); 460 if (stdma_is_locked_by(scsi_falcon_intr))
504
505 if (falcon_got_lock && !hostdata->disconnected_queue &&
506 !hostdata->issue_queue && !hostdata->connected) {
507
508 if (falcon_dont_release) {
509#if 0
510 printk("WARNING: Lock release not allowed. Ignored\n");
511#endif
512 local_irq_restore(flags);
513 return;
514 }
515 falcon_got_lock = 0;
516 stdma_release(); 461 stdma_release();
517 wake_up(&falcon_fairness_wait);
518 }
519
520 local_irq_restore(flags);
521} 462}
522 463
523/* This function manages the locking of the ST-DMA. 464/* This function manages the locking of the ST-DMA.
524 * If the DMA isn't locked already for SCSI, it tries to lock it by 465 * If the DMA isn't locked already for SCSI, it tries to lock it by
525 * calling stdma_lock(). But if the DMA is locked by the SCSI code and 466 * calling stdma_lock(). But if the DMA is locked by the SCSI code and
526 * there are other drivers waiting for the chip, we do not issue the 467 * there are other drivers waiting for the chip, we do not issue the
527 * command immediately but wait on 'falcon_fairness_queue'. We will be 468 * command immediately but tell the SCSI mid-layer to defer.
528 * waked up when the DMA is unlocked by some SCSI interrupt. After that
529 * we try to get the lock again.
530 * But we must be prepared that more than one instance of
531 * falcon_get_lock() is waiting on the fairness queue. They should not
532 * try all at once to call stdma_lock(), one is enough! For that, the
533 * first one sets 'falcon_trying_lock', others that see that variable
534 * set wait on the queue 'falcon_try_wait'.
535 * Complicated, complicated.... Sigh...
536 */ 469 */
537 470
538static void falcon_get_lock(void) 471static int falcon_get_lock(struct Scsi_Host *instance)
539{ 472{
540 unsigned long flags;
541
542 if (IS_A_TT()) 473 if (IS_A_TT())
543 return; 474 return 1;
544
545 local_irq_save(flags);
546
547 wait_event_cmd(falcon_fairness_wait,
548 in_interrupt() || !falcon_got_lock || !stdma_others_waiting(),
549 local_irq_restore(flags),
550 local_irq_save(flags));
551
552 while (!falcon_got_lock) {
553 if (in_irq())
554 panic("Falcon SCSI hasn't ST-DMA lock in interrupt");
555 if (!falcon_trying_lock) {
556 falcon_trying_lock = 1;
557 stdma_lock(scsi_falcon_intr, NULL);
558 falcon_got_lock = 1;
559 falcon_trying_lock = 0;
560 wake_up(&falcon_try_wait);
561 } else {
562 wait_event_cmd(falcon_try_wait,
563 falcon_got_lock && !falcon_trying_lock,
564 local_irq_restore(flags),
565 local_irq_save(flags));
566 }
567 }
568
569 local_irq_restore(flags);
570 if (!falcon_got_lock)
571 panic("Falcon SCSI: someone stole the lock :-(\n");
572}
573
574
575static int __init atari_scsi_detect(struct scsi_host_template *host)
576{
577 static int called = 0;
578 struct Scsi_Host *instance;
579
580 if (!MACH_IS_ATARI ||
581 (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) ||
582 called)
583 return 0;
584
585 host->proc_name = "Atari";
586
587 atari_scsi_reg_read = IS_A_TT() ? atari_scsi_tt_reg_read :
588 atari_scsi_falcon_reg_read;
589 atari_scsi_reg_write = IS_A_TT() ? atari_scsi_tt_reg_write :
590 atari_scsi_falcon_reg_write;
591
592 /* setup variables */
593 host->can_queue =
594 (setup_can_queue > 0) ? setup_can_queue :
595 IS_A_TT() ? ATARI_TT_CAN_QUEUE : ATARI_FALCON_CAN_QUEUE;
596 host->cmd_per_lun =
597 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun :
598 IS_A_TT() ? ATARI_TT_CMD_PER_LUN : ATARI_FALCON_CMD_PER_LUN;
599 /* Force sg_tablesize to 0 on a Falcon! */
600 host->sg_tablesize =
601 !IS_A_TT() ? ATARI_FALCON_SG_TABLESIZE :
602 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : ATARI_TT_SG_TABLESIZE;
603
604 if (setup_hostid >= 0)
605 host->this_id = setup_hostid;
606 else {
607 /* use 7 as default */
608 host->this_id = 7;
609 /* Test if a host id is set in the NVRam */
610 if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
611 unsigned char b = nvram_read_byte( 14 );
612 /* Arbitration enabled? (for TOS) If yes, use configured host ID */
613 if (b & 0x80)
614 host->this_id = b & 7;
615 }
616 }
617 475
618#ifdef SUPPORT_TAGS 476 if (in_interrupt())
619 if (setup_use_tagged_queuing < 0) 477 return stdma_try_lock(scsi_falcon_intr, instance);
620 setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
621#endif
622#ifdef REAL_DMA
623 /* If running on a Falcon and if there's TT-Ram (i.e., more than one
624 * memory block, since there's always ST-Ram in a Falcon), then allocate a
625 * STRAM_BUFFER_SIZE byte dribble buffer for transfers from/to alternative
626 * Ram.
627 */
628 if (MACH_IS_ATARI && ATARIHW_PRESENT(ST_SCSI) &&
629 !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) {
630 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
631 if (!atari_dma_buffer) {
632 printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM "
633 "double buffer\n");
634 return 0;
635 }
636 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
637 atari_dma_orig_addr = 0;
638 }
639#endif
640 instance = scsi_register(host, sizeof(struct NCR5380_hostdata));
641 if (instance == NULL) {
642 atari_stram_free(atari_dma_buffer);
643 atari_dma_buffer = 0;
644 return 0;
645 }
646 atari_scsi_host = instance;
647 /*
648 * Set irq to 0, to avoid that the mid-level code disables our interrupt
649 * during queue_command calls. This is completely unnecessary, and even
650 * worse causes bad problems on the Falcon, where the int is shared with
651 * IDE and floppy!
652 */
653 instance->irq = 0;
654
655#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
656 atari_scsi_reset_boot();
657#endif
658 NCR5380_init(instance, 0);
659
660 if (IS_A_TT()) {
661
662 /* This int is actually "pseudo-slow", i.e. it acts like a slow
663 * interrupt after having cleared the pending flag for the DMA
664 * interrupt. */
665 if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW,
666 "SCSI NCR5380", instance)) {
667 printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI);
668 scsi_unregister(atari_scsi_host);
669 atari_stram_free(atari_dma_buffer);
670 atari_dma_buffer = 0;
671 return 0;
672 }
673 tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
674#ifdef REAL_DMA
675 tt_scsi_dma.dma_ctrl = 0;
676 atari_dma_residual = 0;
677
678 if (MACH_IS_MEDUSA) {
679 /* While the read overruns (described by Drew Eckhardt in
680 * NCR5380.c) never happened on TTs, they do in fact on the Medusa
681 * (This was the cause why SCSI didn't work right for so long
682 * there.) Since handling the overruns slows down a bit, I turned
683 * the #ifdef's into a runtime condition.
684 *
685 * In principle it should be sufficient to do max. 1 byte with
686 * PIO, but there is another problem on the Medusa with the DMA
687 * rest data register. So 'atari_read_overruns' is currently set
688 * to 4 to avoid having transfers that aren't a multiple of 4. If
689 * the rest data bug is fixed, this can be lowered to 1.
690 */
691 atari_read_overruns = 4;
692 }
693#endif /*REAL_DMA*/
694 } else { /* ! IS_A_TT */
695
696 /* Nothing to do for the interrupt: the ST-DMA is initialized
697 * already by atari_init_INTS()
698 */
699
700#ifdef REAL_DMA
701 atari_dma_residual = 0;
702 atari_dma_active = 0;
703 atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
704 : 0xff000000);
705#endif
706 }
707 478
708 printk(KERN_INFO "scsi%d: options CAN_QUEUE=%d CMD_PER_LUN=%d SCAT-GAT=%d " 479 stdma_lock(scsi_falcon_intr, instance);
709#ifdef SUPPORT_TAGS
710 "TAGGED-QUEUING=%s "
711#endif
712 "HOSTID=%d",
713 instance->host_no, instance->hostt->can_queue,
714 instance->hostt->cmd_per_lun,
715 instance->hostt->sg_tablesize,
716#ifdef SUPPORT_TAGS
717 setup_use_tagged_queuing ? "yes" : "no",
718#endif
719 instance->hostt->this_id );
720 NCR5380_print_options(instance);
721 printk("\n");
722
723 called = 1;
724 return 1;
725}
726
727static int atari_scsi_release(struct Scsi_Host *sh)
728{
729 if (IS_A_TT())
730 free_irq(IRQ_TT_MFP_SCSI, sh);
731 if (atari_dma_buffer)
732 atari_stram_free(atari_dma_buffer);
733 NCR5380_exit(sh);
734 return 1; 480 return 1;
735} 481}
736 482
@@ -739,7 +485,7 @@ static int __init atari_scsi_setup(char *str)
739{ 485{
740 /* Format of atascsi parameter is: 486 /* Format of atascsi parameter is:
741 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> 487 * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
742 * Defaults depend on TT or Falcon, hostid determined at run time. 488 * Defaults depend on TT or Falcon, determined at run time.
743 * Negative values mean don't change. 489 * Negative values mean don't change.
744 */ 490 */
745 int ints[6]; 491 int ints[6];
@@ -750,36 +496,17 @@ static int __init atari_scsi_setup(char *str)
750 printk("atari_scsi_setup: no arguments!\n"); 496 printk("atari_scsi_setup: no arguments!\n");
751 return 0; 497 return 0;
752 } 498 }
753 499 if (ints[0] >= 1)
754 if (ints[0] >= 1) { 500 setup_can_queue = ints[1];
755 if (ints[1] > 0) 501 if (ints[0] >= 2)
756 /* no limits on this, just > 0 */ 502 setup_cmd_per_lun = ints[2];
757 setup_can_queue = ints[1]; 503 if (ints[0] >= 3)
758 } 504 setup_sg_tablesize = ints[3];
759 if (ints[0] >= 2) { 505 if (ints[0] >= 4)
760 if (ints[2] > 0) 506 setup_hostid = ints[4];
761 setup_cmd_per_lun = ints[2];
762 }
763 if (ints[0] >= 3) {
764 if (ints[3] >= 0) {
765 setup_sg_tablesize = ints[3];
766 /* Must be <= SG_ALL (255) */
767 if (setup_sg_tablesize > SG_ALL)
768 setup_sg_tablesize = SG_ALL;
769 }
770 }
771 if (ints[0] >= 4) {
772 /* Must be between 0 and 7 */
773 if (ints[4] >= 0 && ints[4] <= 7)
774 setup_hostid = ints[4];
775 else if (ints[4] > 7)
776 printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]);
777 }
778#ifdef SUPPORT_TAGS 507#ifdef SUPPORT_TAGS
779 if (ints[0] >= 5) { 508 if (ints[0] >= 5)
780 if (ints[5] >= 0) 509 setup_use_tagged_queuing = ints[5];
781 setup_use_tagged_queuing = !!ints[5];
782 }
783#endif 510#endif
784 511
785 return 1; 512 return 1;
@@ -788,45 +515,6 @@ static int __init atari_scsi_setup(char *str)
788__setup("atascsi=", atari_scsi_setup); 515__setup("atascsi=", atari_scsi_setup);
789#endif /* !MODULE */ 516#endif /* !MODULE */
790 517
791static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
792{
793 int rv;
794 struct NCR5380_hostdata *hostdata =
795 (struct NCR5380_hostdata *)cmd->device->host->hostdata;
796
797 /* For doing the reset, SCSI interrupts must be disabled first,
798 * since the 5380 raises its IRQ line while _RST is active and we
799 * can't disable interrupts completely, since we need the timer.
800 */
801 /* And abort a maybe active DMA transfer */
802 if (IS_A_TT()) {
803 atari_turnoff_irq(IRQ_TT_MFP_SCSI);
804#ifdef REAL_DMA
805 tt_scsi_dma.dma_ctrl = 0;
806#endif /* REAL_DMA */
807 } else {
808 atari_turnoff_irq(IRQ_MFP_FSCSI);
809#ifdef REAL_DMA
810 st_dma.dma_mode_status = 0x90;
811 atari_dma_active = 0;
812 atari_dma_orig_addr = NULL;
813#endif /* REAL_DMA */
814 }
815
816 rv = NCR5380_bus_reset(cmd);
817
818 /* Re-enable ints */
819 if (IS_A_TT()) {
820 atari_turnon_irq(IRQ_TT_MFP_SCSI);
821 } else {
822 atari_turnon_irq(IRQ_MFP_FSCSI);
823 }
824 if (rv == SUCCESS)
825 falcon_release_lock_if_possible(hostdata);
826
827 return rv;
828}
829
830 518
831#ifdef CONFIG_ATARI_SCSI_RESET_BOOT 519#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
832static void __init atari_scsi_reset_boot(void) 520static void __init atari_scsi_reset_boot(void)
@@ -860,15 +548,6 @@ static void __init atari_scsi_reset_boot(void)
860} 548}
861#endif 549#endif
862 550
863
864static const char *atari_scsi_info(struct Scsi_Host *host)
865{
866 /* atari_scsi_detect() is verbose enough... */
867 static const char string[] = "Atari native SCSI";
868 return string;
869}
870
871
872#if defined(REAL_DMA) 551#if defined(REAL_DMA)
873 552
874static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, 553static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
@@ -949,7 +628,7 @@ static long atari_scsi_dma_residual(struct Scsi_Host *instance)
949#define CMD_SURELY_BYTE_MODE 1 628#define CMD_SURELY_BYTE_MODE 1
950#define CMD_MODE_UNKNOWN 2 629#define CMD_MODE_UNKNOWN 2
951 630
952static int falcon_classify_cmd(Scsi_Cmnd *cmd) 631static int falcon_classify_cmd(struct scsi_cmnd *cmd)
953{ 632{
954 unsigned char opcode = cmd->cmnd[0]; 633 unsigned char opcode = cmd->cmnd[0];
955 634
@@ -981,7 +660,7 @@ static int falcon_classify_cmd(Scsi_Cmnd *cmd)
981 */ 660 */
982 661
983static unsigned long atari_dma_xfer_len(unsigned long wanted_len, 662static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
984 Scsi_Cmnd *cmd, int write_flag) 663 struct scsi_cmnd *cmd, int write_flag)
985{ 664{
986 unsigned long possible_len, limit; 665 unsigned long possible_len, limit;
987 666
@@ -1099,23 +778,247 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
1099 778
1100#include "atari_NCR5380.c" 779#include "atari_NCR5380.c"
1101 780
1102static struct scsi_host_template driver_template = { 781static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
782{
783 int rv;
784 unsigned long flags;
785
786 local_irq_save(flags);
787
788#ifdef REAL_DMA
789 /* Abort a maybe active DMA transfer */
790 if (IS_A_TT()) {
791 tt_scsi_dma.dma_ctrl = 0;
792 } else {
793 st_dma.dma_mode_status = 0x90;
794 atari_dma_active = 0;
795 atari_dma_orig_addr = NULL;
796 }
797#endif
798
799 rv = NCR5380_bus_reset(cmd);
800
801 /* The 5380 raises its IRQ line while _RST is active but the ST DMA
802 * "lock" has been released so this interrupt may end up handled by
803 * floppy or IDE driver (if one of them holds the lock). The NCR5380
804 * interrupt flag has been cleared already.
805 */
806
807 local_irq_restore(flags);
808
809 return rv;
810}
811
812#define DRV_MODULE_NAME "atari_scsi"
813#define PFX DRV_MODULE_NAME ": "
814
815static struct scsi_host_template atari_scsi_template = {
816 .module = THIS_MODULE,
817 .proc_name = DRV_MODULE_NAME,
1103 .show_info = atari_scsi_show_info, 818 .show_info = atari_scsi_show_info,
1104 .name = "Atari native SCSI", 819 .name = "Atari native SCSI",
1105 .detect = atari_scsi_detect,
1106 .release = atari_scsi_release,
1107 .info = atari_scsi_info, 820 .info = atari_scsi_info,
1108 .queuecommand = atari_scsi_queue_command, 821 .queuecommand = atari_scsi_queue_command,
1109 .eh_abort_handler = atari_scsi_abort, 822 .eh_abort_handler = atari_scsi_abort,
1110 .eh_bus_reset_handler = atari_scsi_bus_reset, 823 .eh_bus_reset_handler = atari_scsi_bus_reset,
1111 .can_queue = 0, /* initialized at run-time */ 824 .this_id = 7,
1112 .this_id = 0, /* initialized at run-time */
1113 .sg_tablesize = 0, /* initialized at run-time */
1114 .cmd_per_lun = 0, /* initialized at run-time */
1115 .use_clustering = DISABLE_CLUSTERING 825 .use_clustering = DISABLE_CLUSTERING
1116}; 826};
1117 827
828static int __init atari_scsi_probe(struct platform_device *pdev)
829{
830 struct Scsi_Host *instance;
831 int error;
832 struct resource *irq;
833 int host_flags = 0;
834
835 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
836 if (!irq)
837 return -ENODEV;
838
839 if (ATARIHW_PRESENT(TT_SCSI)) {
840 atari_scsi_reg_read = atari_scsi_tt_reg_read;
841 atari_scsi_reg_write = atari_scsi_tt_reg_write;
842 } else {
843 atari_scsi_reg_read = atari_scsi_falcon_reg_read;
844 atari_scsi_reg_write = atari_scsi_falcon_reg_write;
845 }
846
847 /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary.
848 * Higher values should work, too; try it!
849 * (But cmd_per_lun costs memory!)
850 *
851 * But there seems to be a bug somewhere that requires CAN_QUEUE to be
852 * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
853 * changed CMD_PER_LUN...
854 *
855 * Note: The Falcon currently uses 8/1 setting due to unsolved problems
856 * with cmd_per_lun != 1
857 */
858 if (ATARIHW_PRESENT(TT_SCSI)) {
859 atari_scsi_template.can_queue = 16;
860 atari_scsi_template.cmd_per_lun = 8;
861 atari_scsi_template.sg_tablesize = SG_ALL;
862 } else {
863 atari_scsi_template.can_queue = 8;
864 atari_scsi_template.cmd_per_lun = 1;
865 atari_scsi_template.sg_tablesize = SG_NONE;
866 }
867
868 if (setup_can_queue > 0)
869 atari_scsi_template.can_queue = setup_can_queue;
870
871 if (setup_cmd_per_lun > 0)
872 atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
873
874 /* Leave sg_tablesize at 0 on a Falcon! */
875 if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
876 atari_scsi_template.sg_tablesize = setup_sg_tablesize;
877
878 if (setup_hostid >= 0) {
879 atari_scsi_template.this_id = setup_hostid & 7;
880 } else {
881 /* Test if a host id is set in the NVRam */
882 if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
883 unsigned char b = nvram_read_byte(14);
884
885 /* Arbitration enabled? (for TOS)
886 * If yes, use configured host ID
887 */
888 if (b & 0x80)
889 atari_scsi_template.this_id = b & 7;
890 }
891 }
892
893
894#ifdef REAL_DMA
895 /* If running on a Falcon and if there's TT-Ram (i.e., more than one
896 * memory block, since there's always ST-Ram in a Falcon), then
897 * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers
898 * from/to alternative Ram.
899 */
900 if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
901 m68k_num_memory > 1) {
902 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
903 if (!atari_dma_buffer) {
904 pr_err(PFX "can't allocate ST-RAM double buffer\n");
905 return -ENOMEM;
906 }
907 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
908 atari_dma_orig_addr = 0;
909 }
910#endif
911
912 instance = scsi_host_alloc(&atari_scsi_template,
913 sizeof(struct NCR5380_hostdata));
914 if (!instance) {
915 error = -ENOMEM;
916 goto fail_alloc;
917 }
918 atari_scsi_host = instance;
919
920#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
921 atari_scsi_reset_boot();
922#endif
923
924 instance->irq = irq->start;
925
926 host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP;
927
928#ifdef SUPPORT_TAGS
929 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
930#endif
931
932 NCR5380_init(instance, host_flags);
933
934 if (IS_A_TT()) {
935 error = request_irq(instance->irq, scsi_tt_intr, 0,
936 "NCR5380", instance);
937 if (error) {
938 pr_err(PFX "request irq %d failed, aborting\n",
939 instance->irq);
940 goto fail_irq;
941 }
942 tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
943#ifdef REAL_DMA
944 tt_scsi_dma.dma_ctrl = 0;
945 atari_dma_residual = 0;
946
947 /* While the read overruns (described by Drew Eckhardt in
948 * NCR5380.c) never happened on TTs, they do in fact on the
949 * Medusa (This was the cause why SCSI didn't work right for
950 * so long there.) Since handling the overruns slows down
951 * a bit, I turned the #ifdef's into a runtime condition.
952 *
953 * In principle it should be sufficient to do max. 1 byte with
954 * PIO, but there is another problem on the Medusa with the DMA
955 * rest data register. So read_overruns is currently set
956 * to 4 to avoid having transfers that aren't a multiple of 4.
957 * If the rest data bug is fixed, this can be lowered to 1.
958 */
959 if (MACH_IS_MEDUSA) {
960 struct NCR5380_hostdata *hostdata =
961 shost_priv(instance);
962
963 hostdata->read_overruns = 4;
964 }
965#endif
966 } else {
967 /* Nothing to do for the interrupt: the ST-DMA is initialized
968 * already.
969 */
970#ifdef REAL_DMA
971 atari_dma_residual = 0;
972 atari_dma_active = 0;
973 atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
974 : 0xff000000);
975#endif
976 }
977
978 error = scsi_add_host(instance, NULL);
979 if (error)
980 goto fail_host;
981
982 platform_set_drvdata(pdev, instance);
983
984 scsi_scan_host(instance);
985 return 0;
986
987fail_host:
988 if (IS_A_TT())
989 free_irq(instance->irq, instance);
990fail_irq:
991 NCR5380_exit(instance);
992 scsi_host_put(instance);
993fail_alloc:
994 if (atari_dma_buffer)
995 atari_stram_free(atari_dma_buffer);
996 return error;
997}
998
999static int __exit atari_scsi_remove(struct platform_device *pdev)
1000{
1001 struct Scsi_Host *instance = platform_get_drvdata(pdev);
1002
1003 scsi_remove_host(instance);
1004 if (IS_A_TT())
1005 free_irq(instance->irq, instance);
1006 NCR5380_exit(instance);
1007 scsi_host_put(instance);
1008 if (atari_dma_buffer)
1009 atari_stram_free(atari_dma_buffer);
1010 return 0;
1011}
1012
1013static struct platform_driver atari_scsi_driver = {
1014 .remove = __exit_p(atari_scsi_remove),
1015 .driver = {
1016 .name = DRV_MODULE_NAME,
1017 .owner = THIS_MODULE,
1018 },
1019};
1118 1020
1119#include "scsi_module.c" 1021module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe);
1120 1022
1023MODULE_ALIAS("platform:" DRV_MODULE_NAME);
1121MODULE_LICENSE("GPL"); 1024MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
deleted file mode 100644
index 3299d91d7336..000000000000
--- a/drivers/scsi/atari_scsi.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * atari_scsi.h -- Header file for the Atari native SCSI driver
3 *
4 * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 *
6 * (Loosely based on the work of Robert De Vries' team)
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive
10 * for more details.
11 *
12 */
13
14
15#ifndef ATARI_SCSI_H
16#define ATARI_SCSI_H
17
18/* (I_HAVE_OVERRUNS stuff removed) */
19
20#ifndef ASM
21/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher
22 * values should work, too; try it! (but cmd_per_lun costs memory!) */
23
24/* But there seems to be a bug somewhere that requires CAN_QUEUE to be
25 * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
26 * changed CMD_PER_LUN... */
27
28/* Note: The Falcon currently uses 8/1 setting due to unsolved problems with
29 * cmd_per_lun != 1 */
30
31#define ATARI_TT_CAN_QUEUE 16
32#define ATARI_TT_CMD_PER_LUN 8
33#define ATARI_TT_SG_TABLESIZE SG_ALL
34
35#define ATARI_FALCON_CAN_QUEUE 8
36#define ATARI_FALCON_CMD_PER_LUN 1
37#define ATARI_FALCON_SG_TABLESIZE SG_NONE
38
39#define DEFAULT_USE_TAGGED_QUEUING 0
40
41
42#define NCR5380_implementation_fields /* none */
43
44#define NCR5380_read(reg) atari_scsi_reg_read( reg )
45#define NCR5380_write(reg, value) atari_scsi_reg_write( reg, value )
46
47#define NCR5380_intr atari_scsi_intr
48#define NCR5380_queue_command atari_scsi_queue_command
49#define NCR5380_abort atari_scsi_abort
50#define NCR5380_show_info atari_scsi_show_info
51#define NCR5380_dma_read_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 0)
52#define NCR5380_dma_write_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 1)
53#define NCR5380_dma_residual(inst) atari_scsi_dma_residual( inst )
54#define NCR5380_dma_xfer_len(i,cmd,phase) \
55 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
56
57#endif /* ndef ASM */
58#endif /* ATARI_SCSI_H */
59
60
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 30d74a06b993..f3193406776c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -556,7 +556,7 @@ static struct scsi_host_template beiscsi_sht = {
556 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 556 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
557 .proc_name = DRV_NAME, 557 .proc_name = DRV_NAME,
558 .queuecommand = iscsi_queuecommand, 558 .queuecommand = iscsi_queuecommand,
559 .change_queue_depth = iscsi_change_queue_depth, 559 .change_queue_depth = scsi_change_queue_depth,
560 .slave_configure = beiscsi_slave_configure, 560 .slave_configure = beiscsi_slave_configure,
561 .target_alloc = iscsi_target_alloc, 561 .target_alloc = iscsi_target_alloc,
562 .eh_abort_handler = beiscsi_eh_abort, 562 .eh_abort_handler = beiscsi_eh_abort,
@@ -570,7 +570,7 @@ static struct scsi_host_template beiscsi_sht = {
570 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 570 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
571 .use_clustering = ENABLE_CLUSTERING, 571 .use_clustering = ENABLE_CLUSTERING,
572 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 572 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
573 573 .track_queue_depth = 1,
574}; 574};
575 575
576static struct scsi_transport_template *beiscsi_scsi_transport; 576static struct scsi_transport_template *beiscsi_scsi_transport;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8e83d0474fe7..74a307c0a240 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -260,18 +260,9 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
260 unsigned long flags; 260 unsigned long flags;
261 void *kern_buf; 261 void *kern_buf;
262 262
263 kern_buf = kzalloc(nbytes, GFP_KERNEL); 263 kern_buf = memdup_user(buf, nbytes);
264 264 if (IS_ERR(kern_buf))
265 if (!kern_buf) { 265 return PTR_ERR(kern_buf);
266 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
267 bfad->inst_no);
268 return -ENOMEM;
269 }
270
271 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
272 kfree(kern_buf);
273 return -ENOMEM;
274 }
275 266
276 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 267 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
277 if (rc < 2) { 268 if (rc < 2) {
@@ -336,18 +327,9 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
336 unsigned long flags; 327 unsigned long flags;
337 void *kern_buf; 328 void *kern_buf;
338 329
339 kern_buf = kzalloc(nbytes, GFP_KERNEL); 330 kern_buf = memdup_user(buf, nbytes);
340 331 if (IS_ERR(kern_buf))
341 if (!kern_buf) { 332 return PTR_ERR(kern_buf);
342 printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
343 bfad->inst_no);
344 return -ENOMEM;
345 }
346
347 if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
348 kfree(kern_buf);
349 return -ENOMEM;
350 }
351 333
352 rc = sscanf(kern_buf, "%x:%x", &addr, &val); 334 rc = sscanf(kern_buf, "%x:%x", &addr, &val);
353 if (rc < 2) { 335 if (rc < 2) {
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f067332bf763..7223b0006740 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -776,11 +776,7 @@ bfad_thread_workq(struct bfad_s *bfad)
776static int 776static int
777bfad_im_slave_configure(struct scsi_device *sdev) 777bfad_im_slave_configure(struct scsi_device *sdev)
778{ 778{
779 if (sdev->tagged_supported) 779 scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
780 scsi_activate_tcq(sdev, bfa_lun_queue_depth);
781 else
782 scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);
783
784 return 0; 780 return 0;
785} 781}
786 782
@@ -804,6 +800,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
804 .shost_attrs = bfad_im_host_attrs, 800 .shost_attrs = bfad_im_host_attrs,
805 .max_sectors = BFAD_MAX_SECTORS, 801 .max_sectors = BFAD_MAX_SECTORS,
806 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, 802 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
803 .use_blk_tags = 1,
807}; 804};
808 805
809struct scsi_host_template bfad_im_vport_template = { 806struct scsi_host_template bfad_im_vport_template = {
@@ -825,6 +822,7 @@ struct scsi_host_template bfad_im_vport_template = {
825 .use_clustering = ENABLE_CLUSTERING, 822 .use_clustering = ENABLE_CLUSTERING,
826 .shost_attrs = bfad_im_vport_attrs, 823 .shost_attrs = bfad_im_vport_attrs,
827 .max_sectors = BFAD_MAX_SECTORS, 824 .max_sectors = BFAD_MAX_SECTORS,
825 .use_blk_tags = 1,
828}; 826};
829 827
830bfa_status_t 828bfa_status_t
@@ -868,14 +866,8 @@ bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
868 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { 866 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
869 if (tmp_sdev->id != sdev->id) 867 if (tmp_sdev->id != sdev->id)
870 continue; 868 continue;
871 if (tmp_sdev->ordered_tags) 869 scsi_change_queue_depth(tmp_sdev,
872 scsi_adjust_queue_depth(tmp_sdev, 870 tmp_sdev->queue_depth + 1);
873 MSG_ORDERED_TAG,
874 tmp_sdev->queue_depth + 1);
875 else
876 scsi_adjust_queue_depth(tmp_sdev,
877 MSG_SIMPLE_TAG,
878 tmp_sdev->queue_depth + 1);
879 871
880 itnim->last_ramp_up_time = jiffies; 872 itnim->last_ramp_up_time = jiffies;
881 } 873 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 79e5c94107a9..e861f286b42e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
412 struct fc_frame_header *fh; 412 struct fc_frame_header *fh;
413 struct fcoe_rcv_info *fr; 413 struct fcoe_rcv_info *fr;
414 struct fcoe_percpu_s *bg; 414 struct fcoe_percpu_s *bg;
415 struct sk_buff *tmp_skb;
415 unsigned short oxid; 416 unsigned short oxid;
416 417
417 interface = container_of(ptype, struct bnx2fc_interface, 418 interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
424 goto err; 425 goto err;
425 } 426 }
426 427
428 tmp_skb = skb_share_check(skb, GFP_ATOMIC);
429 if (!tmp_skb)
430 goto err;
431
432 skb = tmp_skb;
433
427 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 434 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
428 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); 435 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
429 goto err; 436 goto err;
@@ -1081,7 +1088,7 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1081 mutex_unlock(&bnx2fc_dev_lock); 1088 mutex_unlock(&bnx2fc_dev_lock);
1082 rtnl_unlock(); 1089 rtnl_unlock();
1083 1090
1084 if (IS_ERR(vn_port)) { 1091 if (!vn_port) {
1085 printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", 1092 printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
1086 netdev->name); 1093 netdev->name);
1087 return -EIO; 1094 return -EIO;
@@ -2195,6 +2202,7 @@ static int _bnx2fc_create(struct net_device *netdev,
2195 interface = bnx2fc_interface_create(hba, netdev, fip_mode); 2202 interface = bnx2fc_interface_create(hba, netdev, fip_mode);
2196 if (!interface) { 2203 if (!interface) {
2197 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); 2204 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
2205 rc = -ENOMEM;
2198 goto ifput_err; 2206 goto ifput_err;
2199 } 2207 }
2200 2208
@@ -2783,13 +2791,15 @@ static struct scsi_host_template bnx2fc_shost_template = {
2783 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ 2791 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
2784 .eh_host_reset_handler = fc_eh_host_reset, 2792 .eh_host_reset_handler = fc_eh_host_reset,
2785 .slave_alloc = fc_slave_alloc, 2793 .slave_alloc = fc_slave_alloc,
2786 .change_queue_depth = fc_change_queue_depth, 2794 .change_queue_depth = scsi_change_queue_depth,
2787 .change_queue_type = fc_change_queue_type, 2795 .change_queue_type = scsi_change_queue_type,
2788 .this_id = -1, 2796 .this_id = -1,
2789 .cmd_per_lun = 3, 2797 .cmd_per_lun = 3,
2790 .use_clustering = ENABLE_CLUSTERING, 2798 .use_clustering = ENABLE_CLUSTERING,
2791 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2799 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2792 .max_sectors = 1024, 2800 .max_sectors = 1024,
2801 .use_blk_tags = 1,
2802 .track_queue_depth = 1,
2793}; 2803};
2794 2804
2795static struct libfc_function_template bnx2fc_libfc_fcn_templ = { 2805static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 5b99844ef6bf..4b56858c1df2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1725,7 +1725,6 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1725 struct fcp_cmnd *fcp_cmnd) 1725 struct fcp_cmnd *fcp_cmnd)
1726{ 1726{
1727 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1727 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1728 char tag[2];
1729 1728
1730 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1729 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1731 1730
@@ -1739,21 +1738,10 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1739 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1740 fcp_cmnd->fc_flags = io_req->io_req_flags; 1739 fcp_cmnd->fc_flags = io_req->io_req_flags;
1741 1740
1742 if (scsi_populate_tag_msg(sc_cmd, tag)) { 1741 if (sc_cmd->flags & SCMD_TAGGED)
1743 switch (tag[0]) { 1742 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1744 case HEAD_OF_QUEUE_TAG: 1743 else
1745 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1746 break;
1747 case ORDERED_QUEUE_TAG:
1748 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1749 break;
1750 default:
1751 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1752 break;
1753 }
1754 } else {
1755 fcp_cmnd->fc_pri_ta = 0; 1744 fcp_cmnd->fc_pri_ta = 0;
1756 }
1757} 1745}
1758 1746
1759static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1747static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 7a36388822aa..e53078d03309 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2259,7 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = {
2259 .eh_abort_handler = iscsi_eh_abort, 2259 .eh_abort_handler = iscsi_eh_abort,
2260 .eh_device_reset_handler = iscsi_eh_device_reset, 2260 .eh_device_reset_handler = iscsi_eh_device_reset,
2261 .eh_target_reset_handler = iscsi_eh_recover_target, 2261 .eh_target_reset_handler = iscsi_eh_recover_target,
2262 .change_queue_depth = iscsi_change_queue_depth, 2262 .change_queue_depth = scsi_change_queue_depth,
2263 .target_alloc = iscsi_target_alloc, 2263 .target_alloc = iscsi_target_alloc,
2264 .can_queue = 2048, 2264 .can_queue = 2048,
2265 .max_sectors = 127, 2265 .max_sectors = 127,
@@ -2268,6 +2268,7 @@ static struct scsi_host_template bnx2i_host_template = {
2268 .use_clustering = ENABLE_CLUSTERING, 2268 .use_clustering = ENABLE_CLUSTERING,
2269 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2269 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2270 .shost_attrs = bnx2i_dev_attributes, 2270 .shost_attrs = bnx2i_dev_attributes,
2271 .track_queue_depth = 1,
2271}; 2272};
2272 2273
2273struct iscsi_transport bnx2i_iscsi_transport = { 2274struct iscsi_transport bnx2i_iscsi_transport = {
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index ef5ae0d03616..6bac8a746ee2 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -85,8 +85,7 @@ static const char * vendor_labels[CH_TYPES-4] = {
85// module_param_string_array(vendor_labels, NULL, 0444); 85// module_param_string_array(vendor_labels, NULL, 0444);
86 86
87#define ch_printk(prefix, ch, fmt, a...) \ 87#define ch_printk(prefix, ch, fmt, a...) \
88 sdev_printk(prefix, (ch)->device, "[%s] " fmt, \ 88 sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a)
89 (ch)->name, ##a)
90 89
91#define DPRINTK(fmt, arg...) \ 90#define DPRINTK(fmt, arg...) \
92do { \ 91do { \
@@ -183,7 +182,7 @@ static int ch_find_errno(struct scsi_sense_hdr *sshdr)
183} 182}
184 183
185static int 184static int
186ch_do_scsi(scsi_changer *ch, unsigned char *cmd, 185ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
187 void *buffer, unsigned buflength, 186 void *buffer, unsigned buflength,
188 enum dma_data_direction direction) 187 enum dma_data_direction direction)
189{ 188{
@@ -197,7 +196,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
197 errno = 0; 196 errno = 0;
198 if (debug) { 197 if (debug) {
199 DPRINTK("command: "); 198 DPRINTK("command: ");
200 __scsi_print_command(cmd); 199 __scsi_print_command(cmd, cmd_len);
201 } 200 }
202 201
203 result = scsi_execute_req(ch->device, cmd, direction, buffer, 202 result = scsi_execute_req(ch->device, cmd, direction, buffer,
@@ -207,7 +206,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
207 DPRINTK("result: 0x%x\n",result); 206 DPRINTK("result: 0x%x\n",result);
208 if (driver_byte(result) & DRIVER_SENSE) { 207 if (driver_byte(result) & DRIVER_SENSE) {
209 if (debug) 208 if (debug)
210 scsi_print_sense_hdr(ch->name, &sshdr); 209 scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
211 errno = ch_find_errno(&sshdr); 210 errno = ch_find_errno(&sshdr);
212 211
213 switch(sshdr.sense_key) { 212 switch(sshdr.sense_key) {
@@ -258,7 +257,8 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
258 cmd[3] = elem & 0xff; 257 cmd[3] = elem & 0xff;
259 cmd[5] = 1; 258 cmd[5] = 1;
260 cmd[9] = 255; 259 cmd[9] = 255;
261 if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) { 260 if (0 == (result = ch_do_scsi(ch, cmd, 12,
261 buffer, 256, DMA_FROM_DEVICE))) {
262 if (((buffer[16] << 8) | buffer[17]) != elem) { 262 if (((buffer[16] << 8) | buffer[17]) != elem) {
263 DPRINTK("asked for element 0x%02x, got 0x%02x\n", 263 DPRINTK("asked for element 0x%02x, got 0x%02x\n",
264 elem,(buffer[16] << 8) | buffer[17]); 264 elem,(buffer[16] << 8) | buffer[17]);
@@ -288,7 +288,7 @@ ch_init_elem(scsi_changer *ch)
288 memset(cmd,0,sizeof(cmd)); 288 memset(cmd,0,sizeof(cmd));
289 cmd[0] = INITIALIZE_ELEMENT_STATUS; 289 cmd[0] = INITIALIZE_ELEMENT_STATUS;
290 cmd[1] = (ch->device->lun & 0x7) << 5; 290 cmd[1] = (ch->device->lun & 0x7) << 5;
291 err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); 291 err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE);
292 VPRINTK(KERN_INFO, "... finished\n"); 292 VPRINTK(KERN_INFO, "... finished\n");
293 return err; 293 return err;
294} 294}
@@ -310,10 +310,10 @@ ch_readconfig(scsi_changer *ch)
310 cmd[1] = (ch->device->lun & 0x7) << 5; 310 cmd[1] = (ch->device->lun & 0x7) << 5;
311 cmd[2] = 0x1d; 311 cmd[2] = 0x1d;
312 cmd[4] = 255; 312 cmd[4] = 255;
313 result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); 313 result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
314 if (0 != result) { 314 if (0 != result) {
315 cmd[1] |= (1<<3); 315 cmd[1] |= (1<<3);
316 result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); 316 result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
317 } 317 }
318 if (0 == result) { 318 if (0 == result) {
319 ch->firsts[CHET_MT] = 319 ch->firsts[CHET_MT] =
@@ -438,7 +438,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
438 cmd[4] = (elem >> 8) & 0xff; 438 cmd[4] = (elem >> 8) & 0xff;
439 cmd[5] = elem & 0xff; 439 cmd[5] = elem & 0xff;
440 cmd[8] = rotate ? 1 : 0; 440 cmd[8] = rotate ? 1 : 0;
441 return ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); 441 return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE);
442} 442}
443 443
444static int 444static int
@@ -459,7 +459,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
459 cmd[6] = (dest >> 8) & 0xff; 459 cmd[6] = (dest >> 8) & 0xff;
460 cmd[7] = dest & 0xff; 460 cmd[7] = dest & 0xff;
461 cmd[10] = rotate ? 1 : 0; 461 cmd[10] = rotate ? 1 : 0;
462 return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); 462 return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE);
463} 463}
464 464
465static int 465static int
@@ -485,7 +485,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
485 cmd[9] = dest2 & 0xff; 485 cmd[9] = dest2 & 0xff;
486 cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); 486 cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
487 487
488 return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); 488 return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE);
489} 489}
490 490
491static void 491static void
@@ -535,7 +535,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
535 memcpy(buffer,tag,32); 535 memcpy(buffer,tag,32);
536 ch_check_voltag(buffer); 536 ch_check_voltag(buffer);
537 537
538 result = ch_do_scsi(ch, cmd, buffer, 256, DMA_TO_DEVICE); 538 result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE);
539 kfree(buffer); 539 kfree(buffer);
540 return result; 540 return result;
541} 541}
@@ -616,6 +616,11 @@ static long ch_ioctl(struct file *file,
616 int retval; 616 int retval;
617 void __user *argp = (void __user *)arg; 617 void __user *argp = (void __user *)arg;
618 618
619 retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
620 file->f_flags & O_NDELAY);
621 if (retval)
622 return retval;
623
619 switch (cmd) { 624 switch (cmd) {
620 case CHIOGPARAMS: 625 case CHIOGPARAMS:
621 { 626 {
@@ -766,7 +771,8 @@ static long ch_ioctl(struct file *file,
766 ch_cmd[5] = 1; 771 ch_cmd[5] = 1;
767 ch_cmd[9] = 255; 772 ch_cmd[9] = 255;
768 773
769 result = ch_do_scsi(ch, ch_cmd, buffer, 256, DMA_FROM_DEVICE); 774 result = ch_do_scsi(ch, ch_cmd, 12,
775 buffer, 256, DMA_FROM_DEVICE);
770 if (!result) { 776 if (!result) {
771 cge.cge_status = buffer[18]; 777 cge.cge_status = buffer[18];
772 cge.cge_flags = 0; 778 cge.cge_flags = 0;
@@ -966,9 +972,9 @@ static int ch_remove(struct device *dev)
966} 972}
967 973
968static struct scsi_driver ch_template = { 974static struct scsi_driver ch_template = {
969 .owner = THIS_MODULE,
970 .gendrv = { 975 .gendrv = {
971 .name = "ch", 976 .name = "ch",
977 .owner = THIS_MODULE,
972 .probe = ch_probe, 978 .probe = ch_probe,
973 .remove = ch_remove, 979 .remove = ch_remove,
974 }, 980 },
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d35a5d6c8d7c..e2068a2621c4 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -21,15 +21,21 @@
21 21
22 22
23/* Commands with service actions that change the command name */ 23/* Commands with service actions that change the command name */
24#define SERVICE_ACTION_IN_12 0xab
25#define SERVICE_ACTION_OUT_12 0xa9
26#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
27#define SERVICE_ACTION_IN_16 0x9e
28#define SERVICE_ACTION_OUT_16 0x9f
29#define THIRD_PARTY_COPY_OUT 0x83 24#define THIRD_PARTY_COPY_OUT 0x83
30#define THIRD_PARTY_COPY_IN 0x84 25#define THIRD_PARTY_COPY_IN 0x84
31 26
27#define VENDOR_SPECIFIC_CDB 0xc0
32 28
29struct sa_name_list {
30 int opcode;
31 const struct value_name_pair *arr;
32 int arr_sz;
33};
34
35struct value_name_pair {
36 int value;
37 const char * name;
38};
33 39
34#ifdef CONFIG_SCSI_CONSTANTS 40#ifdef CONFIG_SCSI_CONSTANTS
35static const char * cdb_byte0_names[] = { 41static const char * cdb_byte0_names[] = {
@@ -102,11 +108,6 @@ static const char * cdb_byte0_names[] = {
102 "Volume set (out), Send DVD structure", 108 "Volume set (out), Send DVD structure",
103}; 109};
104 110
105struct value_name_pair {
106 int value;
107 const char * name;
108};
109
110static const struct value_name_pair maint_in_arr[] = { 111static const struct value_name_pair maint_in_arr[] = {
111 {0x5, "Report identifying information"}, 112 {0x5, "Report identifying information"},
112 {0xa, "Report target port groups"}, 113 {0xa, "Report target port groups"},
@@ -244,170 +245,119 @@ static const struct value_name_pair variable_length_arr[] = {
244}; 245};
245#define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr) 246#define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr)
246 247
247static const char * get_sa_name(const struct value_name_pair * arr, 248static struct sa_name_list sa_names_arr[] = {
248 int arr_sz, int service_action) 249 {VARIABLE_LENGTH_CMD, variable_length_arr, VARIABLE_LENGTH_SZ},
249{ 250 {MAINTENANCE_IN, maint_in_arr, MAINT_IN_SZ},
250 int k; 251 {MAINTENANCE_OUT, maint_out_arr, MAINT_OUT_SZ},
252 {PERSISTENT_RESERVE_IN, pr_in_arr, PR_IN_SZ},
253 {PERSISTENT_RESERVE_OUT, pr_out_arr, PR_OUT_SZ},
254 {SERVICE_ACTION_IN_12, serv_in12_arr, SERV_IN12_SZ},
255 {SERVICE_ACTION_OUT_12, serv_out12_arr, SERV_OUT12_SZ},
256 {SERVICE_ACTION_BIDIRECTIONAL, serv_bidi_arr, SERV_BIDI_SZ},
257 {SERVICE_ACTION_IN_16, serv_in16_arr, SERV_IN16_SZ},
258 {SERVICE_ACTION_OUT_16, serv_out16_arr, SERV_OUT16_SZ},
259 {THIRD_PARTY_COPY_IN, tpc_in_arr, TPC_IN_SZ},
260 {THIRD_PARTY_COPY_OUT, tpc_out_arr, TPC_OUT_SZ},
261 {0, NULL, 0},
262};
251 263
252 for (k = 0; k < arr_sz; ++k, ++arr) { 264#else /* ifndef CONFIG_SCSI_CONSTANTS */
253 if (service_action == arr->value) 265static const char *cdb_byte0_names[0];
254 break; 266
255 } 267static struct sa_name_list sa_names_arr[] = {
256 return (k < arr_sz) ? arr->name : NULL; 268 {VARIABLE_LENGTH_CMD, NULL, 0},
257} 269 {MAINTENANCE_IN, NULL, 0},
270 {MAINTENANCE_OUT, NULL, 0},
271 {PERSISTENT_RESERVE_IN, NULL, 0},
272 {PERSISTENT_RESERVE_OUT, NULL, 0},
273 {SERVICE_ACTION_IN_12, NULL, 0},
274 {SERVICE_ACTION_OUT_12, NULL, 0},
275 {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0},
276 {SERVICE_ACTION_IN_16, NULL, 0},
277 {SERVICE_ACTION_OUT_16, NULL, 0},
278 {THIRD_PARTY_COPY_IN, NULL, 0},
279 {THIRD_PARTY_COPY_OUT, NULL, 0},
280 {0, NULL, 0},
281};
282#endif /* CONFIG_SCSI_CONSTANTS */
258 283
259/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */ 284static bool scsi_opcode_sa_name(int opcode, int service_action,
260static void print_opcode_name(unsigned char * cdbp, int cdb_len) 285 const char **cdb_name, const char **sa_name)
261{ 286{
262 int sa, len, cdb0; 287 struct sa_name_list *sa_name_ptr;
263 int fin_name = 0; 288 const struct value_name_pair *arr = NULL;
264 const char * name; 289 int arr_sz, k;
265 290
266 cdb0 = cdbp[0]; 291 *cdb_name = NULL;
267 switch(cdb0) { 292 if (opcode >= VENDOR_SPECIFIC_CDB)
268 case VARIABLE_LENGTH_CMD: 293 return false;
269 len = scsi_varlen_cdb_length(cdbp);
270 if (len < 10) {
271 printk("short variable length command, "
272 "len=%d ext_len=%d", len, cdb_len);
273 break;
274 }
275 sa = (cdbp[8] << 8) + cdbp[9];
276 name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ,
277 sa);
278 if (name)
279 printk("%s", name);
280 else
281 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
282 294
283 if ((cdb_len > 0) && (len != cdb_len)) 295 if (opcode < ARRAY_SIZE(cdb_byte0_names))
284 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); 296 *cdb_name = cdb_byte0_names[opcode];
285 297
286 break; 298 for (sa_name_ptr = sa_names_arr; sa_name_ptr->arr; ++sa_name_ptr) {
287 case MAINTENANCE_IN: 299 if (sa_name_ptr->opcode == opcode) {
288 sa = cdbp[1] & 0x1f; 300 arr = sa_name_ptr->arr;
289 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 301 arr_sz = sa_name_ptr->arr_sz;
290 fin_name = 1; 302 break;
291 break; 303 }
292 case MAINTENANCE_OUT:
293 sa = cdbp[1] & 0x1f;
294 name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa);
295 fin_name = 1;
296 break;
297 case PERSISTENT_RESERVE_IN:
298 sa = cdbp[1] & 0x1f;
299 name = get_sa_name(pr_in_arr, PR_IN_SZ, sa);
300 fin_name = 1;
301 break;
302 case PERSISTENT_RESERVE_OUT:
303 sa = cdbp[1] & 0x1f;
304 name = get_sa_name(pr_out_arr, PR_OUT_SZ, sa);
305 fin_name = 1;
306 break;
307 case SERVICE_ACTION_IN_12:
308 sa = cdbp[1] & 0x1f;
309 name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa);
310 fin_name = 1;
311 break;
312 case SERVICE_ACTION_OUT_12:
313 sa = cdbp[1] & 0x1f;
314 name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa);
315 fin_name = 1;
316 break;
317 case SERVICE_ACTION_BIDIRECTIONAL:
318 sa = cdbp[1] & 0x1f;
319 name = get_sa_name(serv_bidi_arr, SERV_BIDI_SZ, sa);
320 fin_name = 1;
321 break;
322 case SERVICE_ACTION_IN_16:
323 sa = cdbp[1] & 0x1f;
324 name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa);
325 fin_name = 1;
326 break;
327 case SERVICE_ACTION_OUT_16:
328 sa = cdbp[1] & 0x1f;
329 name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa);
330 fin_name = 1;
331 break;
332 case THIRD_PARTY_COPY_IN:
333 sa = cdbp[1] & 0x1f;
334 name = get_sa_name(tpc_in_arr, TPC_IN_SZ, sa);
335 fin_name = 1;
336 break;
337 case THIRD_PARTY_COPY_OUT:
338 sa = cdbp[1] & 0x1f;
339 name = get_sa_name(tpc_out_arr, TPC_OUT_SZ, sa);
340 fin_name = 1;
341 break;
342 default:
343 if (cdb0 < 0xc0) {
344 name = cdb_byte0_names[cdb0];
345 if (name)
346 printk("%s", name);
347 else
348 printk("cdb[0]=0x%x (reserved)", cdb0);
349 } else
350 printk("cdb[0]=0x%x (vendor)", cdb0);
351 break;
352 } 304 }
353 if (fin_name) { 305 if (!arr)
354 if (name) 306 return false;
355 printk("%s", name); 307
356 else 308 for (k = 0; k < arr_sz; ++k, ++arr) {
357 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); 309 if (service_action == arr->value)
310 break;
358 } 311 }
359} 312 if (k < arr_sz)
313 *sa_name = arr->name;
360 314
361#else /* ifndef CONFIG_SCSI_CONSTANTS */ 315 return true;
316}
362 317
363static void print_opcode_name(unsigned char * cdbp, int cdb_len) 318static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len)
364{ 319{
365 int sa, len, cdb0; 320 int sa, cdb0;
321 const char *cdb_name = NULL, *sa_name = NULL;
366 322
367 cdb0 = cdbp[0]; 323 cdb0 = cdbp[0];
368 switch(cdb0) { 324 if (cdb0 == VARIABLE_LENGTH_CMD) {
369 case VARIABLE_LENGTH_CMD: 325 if (cdb_len < 10) {
370 len = scsi_varlen_cdb_length(cdbp); 326 printk("short variable length command, len=%zu",
371 if (len < 10) { 327 cdb_len);
372 printk("short opcode=0x%x command, len=%d " 328 return;
373 "ext_len=%d", cdb0, len, cdb_len);
374 break;
375 } 329 }
376 sa = (cdbp[8] << 8) + cdbp[9]; 330 sa = (cdbp[8] << 8) + cdbp[9];
377 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); 331 } else
378 if (len != cdb_len)
379 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
380 break;
381 case MAINTENANCE_IN:
382 case MAINTENANCE_OUT:
383 case PERSISTENT_RESERVE_IN:
384 case PERSISTENT_RESERVE_OUT:
385 case SERVICE_ACTION_IN_12:
386 case SERVICE_ACTION_OUT_12:
387 case SERVICE_ACTION_BIDIRECTIONAL:
388 case SERVICE_ACTION_IN_16:
389 case SERVICE_ACTION_OUT_16:
390 case THIRD_PARTY_COPY_IN:
391 case THIRD_PARTY_COPY_OUT:
392 sa = cdbp[1] & 0x1f; 332 sa = cdbp[1] & 0x1f;
393 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); 333
394 break; 334 if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) {
395 default: 335 if (cdb_name)
396 if (cdb0 < 0xc0) 336 printk("%s", cdb_name);
337 else if (cdb0 >= VENDOR_SPECIFIC_CDB)
338 printk("cdb[0]=0x%x (vendor)", cdb0);
339 else if (cdb0 >= 0x60 && cdb0 < 0x7e)
340 printk("cdb[0]=0x%x (reserved)", cdb0);
341 else
397 printk("cdb[0]=0x%x", cdb0); 342 printk("cdb[0]=0x%x", cdb0);
343 } else {
344 if (sa_name)
345 printk("%s", sa_name);
346 else if (cdb_name)
347 printk("%s, sa=0x%x", cdb_name, sa);
398 else 348 else
399 printk("cdb[0]=0x%x (vendor)", cdb0); 349 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
400 break;
401 } 350 }
402} 351}
403#endif
404 352
405void __scsi_print_command(unsigned char *cdb) 353void __scsi_print_command(const unsigned char *cdb, size_t cdb_len)
406{ 354{
407 int k, len; 355 int k, len;
408 356
409 print_opcode_name(cdb, 0); 357 print_opcode_name(cdb, cdb_len);
410 len = scsi_command_size(cdb); 358 len = scsi_command_size(cdb);
359 if (cdb_len < len)
360 len = cdb_len;
411 /* print out all bytes in cdb */ 361 /* print out all bytes in cdb */
412 for (k = 0; k < len; ++k) 362 for (k = 0; k < len; ++k)
413 printk(" %02x", cdb[k]); 363 printk(" %02x", cdb[k]);
@@ -433,41 +383,6 @@ void scsi_print_command(struct scsi_cmnd *cmd)
433} 383}
434EXPORT_SYMBOL(scsi_print_command); 384EXPORT_SYMBOL(scsi_print_command);
435 385
436/**
437 * scsi_print_status - print scsi status description
438 * @scsi_status: scsi status value
439 *
440 * If the status is recognized, the description is printed.
441 * Otherwise "Unknown status" is output. No trailing space.
442 * If CONFIG_SCSI_CONSTANTS is not set, then print status in hex
443 * (e.g. "0x2" for Check Condition).
444 **/
445void
446scsi_print_status(unsigned char scsi_status) {
447#ifdef CONFIG_SCSI_CONSTANTS
448 const char * ccp;
449
450 switch (scsi_status) {
451 case 0: ccp = "Good"; break;
452 case 0x2: ccp = "Check Condition"; break;
453 case 0x4: ccp = "Condition Met"; break;
454 case 0x8: ccp = "Busy"; break;
455 case 0x10: ccp = "Intermediate"; break;
456 case 0x14: ccp = "Intermediate-Condition Met"; break;
457 case 0x18: ccp = "Reservation Conflict"; break;
458 case 0x22: ccp = "Command Terminated"; break; /* obsolete */
459 case 0x28: ccp = "Task set Full"; break; /* was: Queue Full */
460 case 0x30: ccp = "ACA Active"; break;
461 case 0x40: ccp = "Task Aborted"; break;
462 default: ccp = "Unknown status";
463 }
464 printk(KERN_INFO "%s", ccp);
465#else
466 printk(KERN_INFO "0x%0x", scsi_status);
467#endif
468}
469EXPORT_SYMBOL(scsi_print_status);
470
471#ifdef CONFIG_SCSI_CONSTANTS 386#ifdef CONFIG_SCSI_CONSTANTS
472 387
473struct error_info { 388struct error_info {
@@ -1292,18 +1207,19 @@ static const struct error_info additional[] =
1292 1207
1293struct error_info2 { 1208struct error_info2 {
1294 unsigned char code1, code2_min, code2_max; 1209 unsigned char code1, code2_min, code2_max;
1210 const char * str;
1295 const char * fmt; 1211 const char * fmt;
1296}; 1212};
1297 1213
1298static const struct error_info2 additional2[] = 1214static const struct error_info2 additional2[] =
1299{ 1215{
1300 {0x40, 0x00, 0x7f, "Ram failure (%x)"}, 1216 {0x40, 0x00, 0x7f, "Ram failure", ""},
1301 {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"}, 1217 {0x40, 0x80, 0xff, "Diagnostic failure on component", ""},
1302 {0x41, 0x00, 0xff, "Data path failure (%x)"}, 1218 {0x41, 0x00, 0xff, "Data path failure", ""},
1303 {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"}, 1219 {0x42, 0x00, 0xff, "Power-on or self-test failure", ""},
1304 {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"}, 1220 {0x4D, 0x00, 0xff, "Tagged overlapped commands", "task tag "},
1305 {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"}, 1221 {0x70, 0x00, 0xff, "Decompression exception", "short algorithm id of "},
1306 {0, 0, 0, NULL} 1222 {0, 0, 0, NULL, NULL}
1307}; 1223};
1308 1224
1309/* description of the sense key values */ 1225/* description of the sense key values */
@@ -1349,69 +1265,79 @@ EXPORT_SYMBOL(scsi_sense_key_string);
1349 * This string may contain a "%x" and should be printed with ascq as arg. 1265 * This string may contain a "%x" and should be printed with ascq as arg.
1350 */ 1266 */
1351const char * 1267const char *
1352scsi_extd_sense_format(unsigned char asc, unsigned char ascq) { 1268scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
1269{
1353#ifdef CONFIG_SCSI_CONSTANTS 1270#ifdef CONFIG_SCSI_CONSTANTS
1354 int i; 1271 int i;
1355 unsigned short code = ((asc << 8) | ascq); 1272 unsigned short code = ((asc << 8) | ascq);
1356 1273
1274 *fmt = NULL;
1357 for (i = 0; additional[i].text; i++) 1275 for (i = 0; additional[i].text; i++)
1358 if (additional[i].code12 == code) 1276 if (additional[i].code12 == code)
1359 return additional[i].text; 1277 return additional[i].text;
1360 for (i = 0; additional2[i].fmt; i++) { 1278 for (i = 0; additional2[i].fmt; i++) {
1361 if (additional2[i].code1 == asc && 1279 if (additional2[i].code1 == asc &&
1362 ascq >= additional2[i].code2_min && 1280 ascq >= additional2[i].code2_min &&
1363 ascq <= additional2[i].code2_max) 1281 ascq <= additional2[i].code2_max) {
1364 return additional2[i].fmt; 1282 *fmt = additional2[i].fmt;
1283 return additional2[i].str;
1284 }
1365 } 1285 }
1286#else
1287 *fmt = NULL;
1366#endif 1288#endif
1367 return NULL; 1289 return NULL;
1368} 1290}
1369EXPORT_SYMBOL(scsi_extd_sense_format); 1291EXPORT_SYMBOL(scsi_extd_sense_format);
1370 1292
1371void 1293void
1372scsi_show_extd_sense(unsigned char asc, unsigned char ascq) 1294scsi_show_extd_sense(const struct scsi_device *sdev, const char *name,
1295 unsigned char asc, unsigned char ascq)
1373{ 1296{
1374 const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq); 1297 const char *extd_sense_fmt = NULL;
1375 1298 const char *extd_sense_str = scsi_extd_sense_format(asc, ascq,
1376 if (extd_sense_fmt) { 1299 &extd_sense_fmt);
1377 if (strstr(extd_sense_fmt, "%x")) { 1300
1378 printk("Add. Sense: "); 1301 if (extd_sense_str) {
1379 printk(extd_sense_fmt, ascq); 1302 if (extd_sense_fmt)
1380 } else 1303 sdev_prefix_printk(KERN_INFO, sdev, name,
1381 printk("Add. Sense: %s", extd_sense_fmt); 1304 "Add. Sense: %s (%s%x)",
1382 } else { 1305 extd_sense_str, extd_sense_fmt,
1383 if (asc >= 0x80) 1306 ascq);
1384 printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc,
1385 ascq);
1386 if (ascq >= 0x80)
1387 printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc,
1388 ascq);
1389 else 1307 else
1390 printk("ASC=0x%x ASCQ=0x%x", asc, ascq); 1308 sdev_prefix_printk(KERN_INFO, sdev, name,
1391 } 1309 "Add. Sense: %s", extd_sense_str);
1392 1310
1393 printk("\n"); 1311 } else {
1312 sdev_prefix_printk(KERN_INFO, sdev, name,
1313 "%sASC=0x%x %sASCQ=0x%x\n",
1314 asc >= 0x80 ? "<<vendor>> " : "", asc,
1315 ascq >= 0x80 ? "<<vendor>> " : "", ascq);
1316 }
1394} 1317}
1395EXPORT_SYMBOL(scsi_show_extd_sense); 1318EXPORT_SYMBOL(scsi_show_extd_sense);
1396 1319
1397void 1320void
1398scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr) 1321scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name,
1322 const struct scsi_sense_hdr *sshdr)
1399{ 1323{
1400 const char *sense_txt; 1324 const char *sense_txt;
1401 1325
1402 sense_txt = scsi_sense_key_string(sshdr->sense_key); 1326 sense_txt = scsi_sense_key_string(sshdr->sense_key);
1403 if (sense_txt) 1327 if (sense_txt)
1404 printk("Sense Key : %s ", sense_txt); 1328 sdev_prefix_printk(KERN_INFO, sdev, name,
1329 "Sense Key : %s [%s]%s\n", sense_txt,
1330 scsi_sense_is_deferred(sshdr) ?
1331 "deferred" : "current",
1332 sshdr->response_code >= 0x72 ?
1333 " [descriptor]" : "");
1405 else 1334 else
1406 printk("Sense Key : 0x%x ", sshdr->sense_key); 1335 sdev_prefix_printk(KERN_INFO, sdev, name,
1407 1336 "Sense Key : 0x%x [%s]%s", sshdr->sense_key,
1408 printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " : 1337 scsi_sense_is_deferred(sshdr) ?
1409 "[current] "); 1338 "deferred" : "current",
1410 1339 sshdr->response_code >= 0x72 ?
1411 if (sshdr->response_code >= 0x72) 1340 " [descriptor]" : "");
1412 printk("[descriptor]");
1413
1414 printk("\n");
1415} 1341}
1416EXPORT_SYMBOL(scsi_show_sense_hdr); 1342EXPORT_SYMBOL(scsi_show_sense_hdr);
1417 1343
@@ -1419,141 +1345,55 @@ EXPORT_SYMBOL(scsi_show_sense_hdr);
1419 * Print normalized SCSI sense header with a prefix. 1345 * Print normalized SCSI sense header with a prefix.
1420 */ 1346 */
1421void 1347void
1422scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr) 1348scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name,
1349 const struct scsi_sense_hdr *sshdr)
1423{ 1350{
1424 printk(KERN_INFO "%s: ", name); 1351 scsi_show_sense_hdr(sdev, name, sshdr);
1425 scsi_show_sense_hdr(sshdr); 1352 scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq);
1426 printk(KERN_INFO "%s: ", name);
1427 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1428} 1353}
1429EXPORT_SYMBOL(scsi_print_sense_hdr); 1354EXPORT_SYMBOL(scsi_print_sense_hdr);
1430 1355
1431/*
1432 * Print normalized SCSI sense header with device information and a prefix.
1433 */
1434void
1435scsi_cmd_print_sense_hdr(struct scsi_cmnd *scmd, const char *desc,
1436 struct scsi_sense_hdr *sshdr)
1437{
1438 scmd_printk(KERN_INFO, scmd, "%s: ", desc);
1439 scsi_show_sense_hdr(sshdr);
1440 scmd_printk(KERN_INFO, scmd, "%s: ", desc);
1441 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1442}
1443EXPORT_SYMBOL(scsi_cmd_print_sense_hdr);
1444
1445static void 1356static void
1446scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len, 1357scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len)
1447 struct scsi_sense_hdr *sshdr)
1448{ 1358{
1449 int k, num, res; 1359 int k, num;
1450 1360
1451 res = scsi_normalize_sense(sense_buffer, sense_len, sshdr); 1361 num = (sense_len < 32) ? sense_len : 32;
1452 if (0 == res) { 1362 printk("Unrecognized sense data (in hex):");
1453 /* this may be SCSI-1 sense data */ 1363 for (k = 0; k < num; ++k) {
1454 num = (sense_len < 32) ? sense_len : 32; 1364 if (0 == (k % 16)) {
1455 printk("Unrecognized sense data (in hex):"); 1365 printk("\n");
1456 for (k = 0; k < num; ++k) { 1366 printk(KERN_INFO " ");
1457 if (0 == (k % 16)) {
1458 printk("\n");
1459 printk(KERN_INFO " ");
1460 }
1461 printk("%02x ", sense_buffer[k]);
1462 } 1367 }
1463 printk("\n"); 1368 printk("%02x ", sense_buffer[k]);
1464 return;
1465 } 1369 }
1466} 1370 printk("\n");
1467 1371 return;
1468static void
1469scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len,
1470 struct scsi_sense_hdr *sshdr)
1471{
1472 int k, num, res;
1473
1474 if (sshdr->response_code < 0x72)
1475 {
1476 /* only decode extras for "fixed" format now */
1477 char buff[80];
1478 int blen, fixed_valid;
1479 unsigned int info;
1480
1481 fixed_valid = sense_buffer[0] & 0x80;
1482 info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) |
1483 (sense_buffer[5] << 8) | sense_buffer[6]);
1484 res = 0;
1485 memset(buff, 0, sizeof(buff));
1486 blen = sizeof(buff) - 1;
1487 if (fixed_valid)
1488 res += snprintf(buff + res, blen - res,
1489 "Info fld=0x%x", info);
1490 if (sense_buffer[2] & 0x80) {
1491 /* current command has read a filemark */
1492 if (res > 0)
1493 res += snprintf(buff + res, blen - res, ", ");
1494 res += snprintf(buff + res, blen - res, "FMK");
1495 }
1496 if (sense_buffer[2] & 0x40) {
1497 /* end-of-medium condition exists */
1498 if (res > 0)
1499 res += snprintf(buff + res, blen - res, ", ");
1500 res += snprintf(buff + res, blen - res, "EOM");
1501 }
1502 if (sense_buffer[2] & 0x20) {
1503 /* incorrect block length requested */
1504 if (res > 0)
1505 res += snprintf(buff + res, blen - res, ", ");
1506 res += snprintf(buff + res, blen - res, "ILI");
1507 }
1508 if (res > 0)
1509 printk("%s\n", buff);
1510 } else if (sshdr->additional_length > 0) {
1511 /* descriptor format with sense descriptors */
1512 num = 8 + sshdr->additional_length;
1513 num = (sense_len < num) ? sense_len : num;
1514 printk("Descriptor sense data with sense descriptors "
1515 "(in hex):");
1516 for (k = 0; k < num; ++k) {
1517 if (0 == (k % 16)) {
1518 printk("\n");
1519 printk(KERN_INFO " ");
1520 }
1521 printk("%02x ", sense_buffer[k]);
1522 }
1523
1524 printk("\n");
1525 }
1526
1527} 1372}
1528 1373
1529/* Normalize and print sense buffer with name prefix */ 1374/* Normalize and print sense buffer with name prefix */
1530void __scsi_print_sense(const char *name, const unsigned char *sense_buffer, 1375void __scsi_print_sense(const struct scsi_device *sdev, const char *name,
1531 int sense_len) 1376 const unsigned char *sense_buffer, int sense_len)
1532{ 1377{
1533 struct scsi_sense_hdr sshdr; 1378 struct scsi_sense_hdr sshdr;
1534 1379
1535 printk(KERN_INFO "%s: ", name); 1380 if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) {
1536 scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr); 1381 scsi_dump_sense_buffer(sense_buffer, sense_len);
1537 scsi_show_sense_hdr(&sshdr); 1382 return;
1538 scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr); 1383 }
1539 printk(KERN_INFO "%s: ", name); 1384 scsi_show_sense_hdr(sdev, name, &sshdr);
1540 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 1385 scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq);
1541} 1386}
1542EXPORT_SYMBOL(__scsi_print_sense); 1387EXPORT_SYMBOL(__scsi_print_sense);
1543 1388
1544/* Normalize and print sense buffer in SCSI command */ 1389/* Normalize and print sense buffer in SCSI command */
1545void scsi_print_sense(char *name, struct scsi_cmnd *cmd) 1390void scsi_print_sense(const struct scsi_cmnd *cmd)
1546{ 1391{
1547 struct scsi_sense_hdr sshdr; 1392 struct gendisk *disk = cmd->request->rq_disk;
1393 const char *disk_name = disk ? disk->disk_name : NULL;
1548 1394
1549 scmd_printk(KERN_INFO, cmd, " "); 1395 __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer,
1550 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1396 SCSI_SENSE_BUFFERSIZE);
1551 &sshdr);
1552 scsi_show_sense_hdr(&sshdr);
1553 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1554 &sshdr);
1555 scmd_printk(KERN_INFO, cmd, " ");
1556 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1557} 1397}
1558EXPORT_SYMBOL(scsi_print_sense); 1398EXPORT_SYMBOL(scsi_print_sense);
1559 1399
@@ -1565,38 +1405,87 @@ static const char * const hostbyte_table[]={
1565"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", 1405"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
1566"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE", 1406"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
1567"DID_NEXUS_FAILURE" }; 1407"DID_NEXUS_FAILURE" };
1568#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1569 1408
1570static const char * const driverbyte_table[]={ 1409static const char * const driverbyte_table[]={
1571"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", 1410"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
1572"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; 1411"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
1573#define NUM_DRIVERBYTE_STRS ARRAY_SIZE(driverbyte_table)
1574 1412
1575void scsi_show_result(int result) 1413#endif
1414
1415const char *scsi_hostbyte_string(int result)
1576{ 1416{
1417 const char *hb_string = NULL;
1418#ifdef CONFIG_SCSI_CONSTANTS
1577 int hb = host_byte(result); 1419 int hb = host_byte(result);
1578 int db = driver_byte(result);
1579 1420
1580 printk("Result: hostbyte=%s driverbyte=%s\n", 1421 if (hb < ARRAY_SIZE(hostbyte_table))
1581 (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"), 1422 hb_string = hostbyte_table[hb];
1582 (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid")); 1423#endif
1424 return hb_string;
1583} 1425}
1426EXPORT_SYMBOL(scsi_hostbyte_string);
1584 1427
1585#else 1428const char *scsi_driverbyte_string(int result)
1586
1587void scsi_show_result(int result)
1588{ 1429{
1589 printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n", 1430 const char *db_string = NULL;
1590 host_byte(result), driver_byte(result)); 1431#ifdef CONFIG_SCSI_CONSTANTS
1432 int db = driver_byte(result);
1433
1434 if (db < ARRAY_SIZE(driverbyte_table))
1435 db_string = driverbyte_table[db];
1436#endif
1437 return db_string;
1591} 1438}
1439EXPORT_SYMBOL(scsi_driverbyte_string);
1592 1440
1441#ifdef CONFIG_SCSI_CONSTANTS
1442#define scsi_mlreturn_name(result) { result, #result }
1443static const struct value_name_pair scsi_mlreturn_arr[] = {
1444 scsi_mlreturn_name(NEEDS_RETRY),
1445 scsi_mlreturn_name(SUCCESS),
1446 scsi_mlreturn_name(FAILED),
1447 scsi_mlreturn_name(QUEUED),
1448 scsi_mlreturn_name(SOFT_ERROR),
1449 scsi_mlreturn_name(ADD_TO_MLQUEUE),
1450 scsi_mlreturn_name(TIMEOUT_ERROR),
1451 scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED),
1452 scsi_mlreturn_name(FAST_IO_FAIL)
1453};
1593#endif 1454#endif
1594EXPORT_SYMBOL(scsi_show_result);
1595 1455
1456const char *scsi_mlreturn_string(int result)
1457{
1458#ifdef CONFIG_SCSI_CONSTANTS
1459 const struct value_name_pair *arr = scsi_mlreturn_arr;
1460 int k;
1596 1461
1597void scsi_print_result(struct scsi_cmnd *cmd) 1462 for (k = 0; k < ARRAY_SIZE(scsi_mlreturn_arr); ++k, ++arr) {
1463 if (result == arr->value)
1464 return arr->name;
1465 }
1466#endif
1467 return NULL;
1468}
1469EXPORT_SYMBOL(scsi_mlreturn_string);
1470
1471void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition)
1598{ 1472{
1599 scmd_printk(KERN_INFO, cmd, " "); 1473 const char *mlret_string = scsi_mlreturn_string(disposition);
1600 scsi_show_result(cmd->result); 1474 const char *hb_string = scsi_hostbyte_string(cmd->result);
1475 const char *db_string = scsi_driverbyte_string(cmd->result);
1476
1477 if (hb_string || db_string)
1478 scmd_printk(KERN_INFO, cmd,
1479 "%s%s Result: hostbyte=%s driverbyte=%s",
1480 msg ? msg : "",
1481 mlret_string ? mlret_string : "UNKNOWN",
1482 hb_string ? hb_string : "invalid",
1483 db_string ? db_string : "invalid");
1484 else
1485 scmd_printk(KERN_INFO, cmd,
1486 "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x",
1487 msg ? msg : "",
1488 mlret_string ? mlret_string : "UNKNOWN",
1489 host_byte(cmd->result), driver_byte(cmd->result));
1601} 1490}
1602EXPORT_SYMBOL(scsi_print_result); 1491EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 86103c8475d8..4d0b6ce55f20 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -152,28 +152,6 @@ csio_scsi_itnexus_loss_error(uint16_t error)
152 return 0; 152 return 0;
153} 153}
154 154
155static inline void
156csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,
157 uint8_t oq, uint8_t sq)
158{
159 char stag[2];
160
161 if (scsi_populate_tag_msg(scmnd, stag)) {
162 switch (stag[0]) {
163 case HEAD_OF_QUEUE_TAG:
164 *tag = hq;
165 break;
166 case ORDERED_QUEUE_TAG:
167 *tag = oq;
168 break;
169 default:
170 *tag = sq;
171 break;
172 }
173 } else
174 *tag = 0;
175}
176
177/* 155/*
178 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. 156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
179 * @req: IO req structure. 157 * @req: IO req structure.
@@ -192,11 +170,12 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
192 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 170 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
193 fcp_cmnd->fc_tm_flags = 0; 171 fcp_cmnd->fc_tm_flags = 0;
194 fcp_cmnd->fc_cmdref = 0; 172 fcp_cmnd->fc_cmdref = 0;
195 fcp_cmnd->fc_pri_ta = 0;
196 173
197 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
198 csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta, 175 if (scmnd->flags & SCMD_TAGGED)
199 FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE); 176 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
177 else
178 fcp_cmnd->fc_pri_ta = 0;
200 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 179 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
201 180
202 if (req->nsge) 181 if (req->nsge)
@@ -2262,11 +2241,7 @@ csio_slave_alloc(struct scsi_device *sdev)
2262static int 2241static int
2263csio_slave_configure(struct scsi_device *sdev) 2242csio_slave_configure(struct scsi_device *sdev)
2264{ 2243{
2265 if (sdev->tagged_supported) 2244 scsi_change_queue_depth(sdev, csio_lun_qdepth);
2266 scsi_activate_tcq(sdev, csio_lun_qdepth);
2267 else
2268 scsi_deactivate_tcq(sdev, csio_lun_qdepth);
2269
2270 return 0; 2245 return 0;
2271} 2246}
2272 2247
@@ -2311,6 +2286,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
2311 .use_clustering = ENABLE_CLUSTERING, 2286 .use_clustering = ENABLE_CLUSTERING,
2312 .shost_attrs = csio_fcoe_lport_attrs, 2287 .shost_attrs = csio_fcoe_lport_attrs,
2313 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2288 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2289 .use_blk_tags = 1,
2314}; 2290};
2315 2291
2316struct scsi_host_template csio_fcoe_shost_vport_template = { 2292struct scsi_host_template csio_fcoe_shost_vport_template = {
@@ -2330,6 +2306,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
2330 .use_clustering = ENABLE_CLUSTERING, 2306 .use_clustering = ENABLE_CLUSTERING,
2331 .shost_attrs = csio_fcoe_vport_attrs, 2307 .shost_attrs = csio_fcoe_vport_attrs,
2332 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2308 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2309 .use_blk_tags = 1,
2333}; 2310};
2334 2311
2335/* 2312/*
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 49692a1ac44a..3db4c63978c5 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -86,7 +86,7 @@ static struct scsi_host_template cxgb3i_host_template = {
86 .proc_name = DRV_MODULE_NAME, 86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH, 87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand, 88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = iscsi_change_queue_depth, 89 .change_queue_depth = scsi_change_queue_depth,
90 .sg_tablesize = SG_ALL, 90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF, 91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
@@ -96,6 +96,7 @@ static struct scsi_host_template cxgb3i_host_template = {
96 .target_alloc = iscsi_target_alloc, 96 .target_alloc = iscsi_target_alloc,
97 .use_clustering = DISABLE_CLUSTERING, 97 .use_clustering = DISABLE_CLUSTERING,
98 .this_id = -1, 98 .this_id = -1,
99 .track_queue_depth = 1,
99}; 100};
100 101
101static struct iscsi_transport cxgb3i_iscsi_transport = { 102static struct iscsi_transport cxgb3i_iscsi_transport = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 15081257cfc8..e6c3f55d9d36 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -89,7 +89,7 @@ static struct scsi_host_template cxgb4i_host_template = {
89 .proc_name = DRV_MODULE_NAME, 89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand, 91 .queuecommand = iscsi_queuecommand,
92 .change_queue_depth = iscsi_change_queue_depth, 92 .change_queue_depth = scsi_change_queue_depth,
93 .sg_tablesize = SG_ALL, 93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF, 94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
@@ -99,6 +99,7 @@ static struct scsi_host_template cxgb4i_host_template = {
99 .target_alloc = iscsi_target_alloc, 99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING, 100 .use_clustering = DISABLE_CLUSTERING,
101 .this_id = -1, 101 .this_id = -1,
102 .track_queue_depth = 1,
102}; 103};
103 104
104static struct iscsi_transport cxgb4i_iscsi_transport = { 105static struct iscsi_transport cxgb4i_iscsi_transport = {
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 33e422e75835..1dba62c5cf6a 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -98,27 +98,51 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
98static int scsi_dh_handler_attach(struct scsi_device *sdev, 98static int scsi_dh_handler_attach(struct scsi_device *sdev,
99 struct scsi_device_handler *scsi_dh) 99 struct scsi_device_handler *scsi_dh)
100{ 100{
101 int err = 0; 101 struct scsi_dh_data *d;
102 102
103 if (sdev->scsi_dh_data) { 103 if (sdev->scsi_dh_data) {
104 if (sdev->scsi_dh_data->scsi_dh != scsi_dh) 104 if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
105 err = -EBUSY; 105 return -EBUSY;
106 else 106
107 kref_get(&sdev->scsi_dh_data->kref); 107 kref_get(&sdev->scsi_dh_data->kref);
108 } else if (scsi_dh->attach) { 108 return 0;
109 err = scsi_dh->attach(sdev);
110 if (!err) {
111 kref_init(&sdev->scsi_dh_data->kref);
112 sdev->scsi_dh_data->sdev = sdev;
113 }
114 } 109 }
115 return err; 110
111 if (!try_module_get(scsi_dh->module))
112 return -EINVAL;
113
114 d = scsi_dh->attach(sdev);
115 if (IS_ERR(d)) {
116 sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n",
117 scsi_dh->name, PTR_ERR(d));
118 module_put(scsi_dh->module);
119 return PTR_ERR(d);
120 }
121
122 d->scsi_dh = scsi_dh;
123 kref_init(&d->kref);
124 d->sdev = sdev;
125
126 spin_lock_irq(sdev->request_queue->queue_lock);
127 sdev->scsi_dh_data = d;
128 spin_unlock_irq(sdev->request_queue->queue_lock);
129 return 0;
116} 130}
117 131
118static void __detach_handler (struct kref *kref) 132static void __detach_handler (struct kref *kref)
119{ 133{
120 struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref); 134 struct scsi_dh_data *scsi_dh_data =
121 scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev); 135 container_of(kref, struct scsi_dh_data, kref);
136 struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
137 struct scsi_device *sdev = scsi_dh_data->sdev;
138
139 spin_lock_irq(sdev->request_queue->queue_lock);
140 sdev->scsi_dh_data = NULL;
141 spin_unlock_irq(sdev->request_queue->queue_lock);
142
143 scsi_dh->detach(sdev);
144 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
145 module_put(scsi_dh->module);
122} 146}
123 147
124/* 148/*
@@ -141,7 +165,7 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev,
141 if (!scsi_dh) 165 if (!scsi_dh)
142 scsi_dh = sdev->scsi_dh_data->scsi_dh; 166 scsi_dh = sdev->scsi_dh_data->scsi_dh;
143 167
144 if (scsi_dh && scsi_dh->detach) 168 if (scsi_dh)
145 kref_put(&sdev->scsi_dh_data->kref, __detach_handler); 169 kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
146} 170}
147 171
@@ -330,6 +354,9 @@ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
330 if (get_device_handler(scsi_dh->name)) 354 if (get_device_handler(scsi_dh->name))
331 return -EBUSY; 355 return -EBUSY;
332 356
357 if (!scsi_dh->attach || !scsi_dh->detach)
358 return -EINVAL;
359
333 spin_lock(&list_lock); 360 spin_lock(&list_lock);
334 list_add(&scsi_dh->list, &scsi_dh_list); 361 list_add(&scsi_dh->list, &scsi_dh_list);
335 spin_unlock(&list_lock); 362 spin_unlock(&list_lock);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd78bdc53528..854b568b9931 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -62,6 +62,7 @@
62#define ALUA_OPTIMIZE_STPG 1 62#define ALUA_OPTIMIZE_STPG 1
63 63
64struct alua_dh_data { 64struct alua_dh_data {
65 struct scsi_dh_data dh_data;
65 int group_id; 66 int group_id;
66 int rel_port; 67 int rel_port;
67 int tpgs; 68 int tpgs;
@@ -87,9 +88,7 @@ static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *);
87 88
88static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) 89static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
89{ 90{
90 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 91 return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data);
91 BUG_ON(scsi_dh_data == NULL);
92 return ((struct alua_dh_data *) scsi_dh_data->buf);
93} 92}
94 93
95static int realloc_buffer(struct alua_dh_data *h, unsigned len) 94static int realloc_buffer(struct alua_dh_data *h, unsigned len)
@@ -825,42 +824,18 @@ static bool alua_match(struct scsi_device *sdev)
825 return (scsi_device_tpgs(sdev) != 0); 824 return (scsi_device_tpgs(sdev) != 0);
826} 825}
827 826
828static int alua_bus_attach(struct scsi_device *sdev);
829static void alua_bus_detach(struct scsi_device *sdev);
830
831static struct scsi_device_handler alua_dh = {
832 .name = ALUA_DH_NAME,
833 .module = THIS_MODULE,
834 .attach = alua_bus_attach,
835 .detach = alua_bus_detach,
836 .prep_fn = alua_prep_fn,
837 .check_sense = alua_check_sense,
838 .activate = alua_activate,
839 .set_params = alua_set_params,
840 .match = alua_match,
841};
842
843/* 827/*
844 * alua_bus_attach - Attach device handler 828 * alua_bus_attach - Attach device handler
845 * @sdev: device to be attached to 829 * @sdev: device to be attached to
846 */ 830 */
847static int alua_bus_attach(struct scsi_device *sdev) 831static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev)
848{ 832{
849 struct scsi_dh_data *scsi_dh_data;
850 struct alua_dh_data *h; 833 struct alua_dh_data *h;
851 unsigned long flags; 834 int err;
852 int err = SCSI_DH_OK;
853
854 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
855 + sizeof(*h) , GFP_KERNEL);
856 if (!scsi_dh_data) {
857 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
858 ALUA_DH_NAME);
859 return -ENOMEM;
860 }
861 835
862 scsi_dh_data->scsi_dh = &alua_dh; 836 h = kzalloc(sizeof(*h) , GFP_KERNEL);
863 h = (struct alua_dh_data *) scsi_dh_data->buf; 837 if (!h)
838 return ERR_PTR(-ENOMEM);
864 h->tpgs = TPGS_MODE_UNINITIALIZED; 839 h->tpgs = TPGS_MODE_UNINITIALIZED;
865 h->state = TPGS_STATE_OPTIMIZED; 840 h->state = TPGS_STATE_OPTIMIZED;
866 h->group_id = -1; 841 h->group_id = -1;
@@ -870,23 +845,14 @@ static int alua_bus_attach(struct scsi_device *sdev)
870 h->sdev = sdev; 845 h->sdev = sdev;
871 846
872 err = alua_initialize(sdev, h); 847 err = alua_initialize(sdev, h);
873 if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED)) 848 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
874 goto failed;
875
876 if (!try_module_get(THIS_MODULE))
877 goto failed; 849 goto failed;
878 850
879 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
880 sdev->scsi_dh_data = scsi_dh_data;
881 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
882 sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); 851 sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME);
883 852 return &h->dh_data;
884 return 0;
885
886failed: 853failed:
887 kfree(scsi_dh_data); 854 kfree(h);
888 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME); 855 return ERR_PTR(-EINVAL);
889 return -EINVAL;
890} 856}
891 857
892/* 858/*
@@ -895,23 +861,25 @@ failed:
895 */ 861 */
896static void alua_bus_detach(struct scsi_device *sdev) 862static void alua_bus_detach(struct scsi_device *sdev)
897{ 863{
898 struct scsi_dh_data *scsi_dh_data; 864 struct alua_dh_data *h = get_alua_data(sdev);
899 struct alua_dh_data *h;
900 unsigned long flags;
901
902 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
903 scsi_dh_data = sdev->scsi_dh_data;
904 sdev->scsi_dh_data = NULL;
905 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
906 865
907 h = (struct alua_dh_data *) scsi_dh_data->buf;
908 if (h->buff && h->inq != h->buff) 866 if (h->buff && h->inq != h->buff)
909 kfree(h->buff); 867 kfree(h->buff);
910 kfree(scsi_dh_data); 868 kfree(h);
911 module_put(THIS_MODULE);
912 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME);
913} 869}
914 870
871static struct scsi_device_handler alua_dh = {
872 .name = ALUA_DH_NAME,
873 .module = THIS_MODULE,
874 .attach = alua_bus_attach,
875 .detach = alua_bus_detach,
876 .prep_fn = alua_prep_fn,
877 .check_sense = alua_check_sense,
878 .activate = alua_activate,
879 .set_params = alua_set_params,
880 .match = alua_match,
881};
882
915static int __init alua_init(void) 883static int __init alua_init(void)
916{ 884{
917 int r; 885 int r;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 84765384c47c..6ed1caadbc6a 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -72,6 +72,7 @@ static const char * lun_state[] =
72}; 72};
73 73
74struct clariion_dh_data { 74struct clariion_dh_data {
75 struct scsi_dh_data dh_data;
75 /* 76 /*
76 * Flags: 77 * Flags:
77 * CLARIION_SHORT_TRESPASS 78 * CLARIION_SHORT_TRESPASS
@@ -116,9 +117,8 @@ struct clariion_dh_data {
116static inline struct clariion_dh_data 117static inline struct clariion_dh_data
117 *get_clariion_data(struct scsi_device *sdev) 118 *get_clariion_data(struct scsi_device *sdev)
118{ 119{
119 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 120 return container_of(sdev->scsi_dh_data, struct clariion_dh_data,
120 BUG_ON(scsi_dh_data == NULL); 121 dh_data);
121 return ((struct clariion_dh_data *) scsi_dh_data->buf);
122} 122}
123 123
124/* 124/*
@@ -622,7 +622,10 @@ done:
622 return result; 622 return result;
623} 623}
624 624
625static const struct scsi_dh_devlist clariion_dev_list[] = { 625static const struct {
626 char *vendor;
627 char *model;
628} clariion_dev_list[] = {
626 {"DGC", "RAID"}, 629 {"DGC", "RAID"},
627 {"DGC", "DISK"}, 630 {"DGC", "DISK"},
628 {"DGC", "VRAID"}, 631 {"DGC", "VRAID"},
@@ -647,39 +650,14 @@ static bool clariion_match(struct scsi_device *sdev)
647 return false; 650 return false;
648} 651}
649 652
650static int clariion_bus_attach(struct scsi_device *sdev); 653static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev)
651static void clariion_bus_detach(struct scsi_device *sdev);
652
653static struct scsi_device_handler clariion_dh = {
654 .name = CLARIION_NAME,
655 .module = THIS_MODULE,
656 .devlist = clariion_dev_list,
657 .attach = clariion_bus_attach,
658 .detach = clariion_bus_detach,
659 .check_sense = clariion_check_sense,
660 .activate = clariion_activate,
661 .prep_fn = clariion_prep_fn,
662 .set_params = clariion_set_params,
663 .match = clariion_match,
664};
665
666static int clariion_bus_attach(struct scsi_device *sdev)
667{ 654{
668 struct scsi_dh_data *scsi_dh_data;
669 struct clariion_dh_data *h; 655 struct clariion_dh_data *h;
670 unsigned long flags;
671 int err; 656 int err;
672 657
673 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 658 h = kzalloc(sizeof(*h) , GFP_KERNEL);
674 + sizeof(*h) , GFP_KERNEL); 659 if (!h)
675 if (!scsi_dh_data) { 660 return ERR_PTR(-ENOMEM);
676 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
677 CLARIION_NAME);
678 return -ENOMEM;
679 }
680
681 scsi_dh_data->scsi_dh = &clariion_dh;
682 h = (struct clariion_dh_data *) scsi_dh_data->buf;
683 h->lun_state = CLARIION_LUN_UNINITIALIZED; 661 h->lun_state = CLARIION_LUN_UNINITIALIZED;
684 h->default_sp = CLARIION_UNBOUND_LU; 662 h->default_sp = CLARIION_UNBOUND_LU;
685 h->current_sp = CLARIION_UNBOUND_LU; 663 h->current_sp = CLARIION_UNBOUND_LU;
@@ -692,45 +670,37 @@ static int clariion_bus_attach(struct scsi_device *sdev)
692 if (err != SCSI_DH_OK) 670 if (err != SCSI_DH_OK)
693 goto failed; 671 goto failed;
694 672
695 if (!try_module_get(THIS_MODULE))
696 goto failed;
697
698 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
699 sdev->scsi_dh_data = scsi_dh_data;
700 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
701
702 sdev_printk(KERN_INFO, sdev, 673 sdev_printk(KERN_INFO, sdev,
703 "%s: connected to SP %c Port %d (%s, default SP %c)\n", 674 "%s: connected to SP %c Port %d (%s, default SP %c)\n",
704 CLARIION_NAME, h->current_sp + 'A', 675 CLARIION_NAME, h->current_sp + 'A',
705 h->port, lun_state[h->lun_state], 676 h->port, lun_state[h->lun_state],
706 h->default_sp + 'A'); 677 h->default_sp + 'A');
707 678 return &h->dh_data;
708 return 0;
709 679
710failed: 680failed:
711 kfree(scsi_dh_data); 681 kfree(h);
712 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 682 return ERR_PTR(-EINVAL);
713 CLARIION_NAME);
714 return -EINVAL;
715} 683}
716 684
717static void clariion_bus_detach(struct scsi_device *sdev) 685static void clariion_bus_detach(struct scsi_device *sdev)
718{ 686{
719 struct scsi_dh_data *scsi_dh_data; 687 struct clariion_dh_data *h = get_clariion_data(sdev);
720 unsigned long flags;
721
722 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
723 scsi_dh_data = sdev->scsi_dh_data;
724 sdev->scsi_dh_data = NULL;
725 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
726
727 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
728 CLARIION_NAME);
729 688
730 kfree(scsi_dh_data); 689 kfree(h);
731 module_put(THIS_MODULE);
732} 690}
733 691
692static struct scsi_device_handler clariion_dh = {
693 .name = CLARIION_NAME,
694 .module = THIS_MODULE,
695 .attach = clariion_bus_attach,
696 .detach = clariion_bus_detach,
697 .check_sense = clariion_check_sense,
698 .activate = clariion_activate,
699 .prep_fn = clariion_prep_fn,
700 .set_params = clariion_set_params,
701 .match = clariion_match,
702};
703
734static int __init clariion_init(void) 704static int __init clariion_init(void)
735{ 705{
736 int r; 706 int r;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 4ee2759f5299..485d99544a15 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,6 +38,7 @@
38#define HP_SW_PATH_PASSIVE 1 38#define HP_SW_PATH_PASSIVE 1
39 39
40struct hp_sw_dh_data { 40struct hp_sw_dh_data {
41 struct scsi_dh_data dh_data;
41 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 42 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
42 int path_state; 43 int path_state;
43 int retries; 44 int retries;
@@ -51,9 +52,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *);
51 52
52static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) 53static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
53{ 54{
54 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 55 return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data);
55 BUG_ON(scsi_dh_data == NULL);
56 return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
57} 56}
58 57
59/* 58/*
@@ -312,7 +311,10 @@ static int hp_sw_activate(struct scsi_device *sdev,
312 return 0; 311 return 0;
313} 312}
314 313
315static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { 314static const struct {
315 char *vendor;
316 char *model;
317} hp_sw_dh_data_list[] = {
316 {"COMPAQ", "MSA1000 VOLUME"}, 318 {"COMPAQ", "MSA1000 VOLUME"},
317 {"COMPAQ", "HSV110"}, 319 {"COMPAQ", "HSV110"},
318 {"HP", "HSV100"}, 320 {"HP", "HSV100"},
@@ -338,37 +340,14 @@ static bool hp_sw_match(struct scsi_device *sdev)
338 return false; 340 return false;
339} 341}
340 342
341static int hp_sw_bus_attach(struct scsi_device *sdev); 343static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev)
342static void hp_sw_bus_detach(struct scsi_device *sdev);
343
344static struct scsi_device_handler hp_sw_dh = {
345 .name = HP_SW_NAME,
346 .module = THIS_MODULE,
347 .devlist = hp_sw_dh_data_list,
348 .attach = hp_sw_bus_attach,
349 .detach = hp_sw_bus_detach,
350 .activate = hp_sw_activate,
351 .prep_fn = hp_sw_prep_fn,
352 .match = hp_sw_match,
353};
354
355static int hp_sw_bus_attach(struct scsi_device *sdev)
356{ 344{
357 struct scsi_dh_data *scsi_dh_data;
358 struct hp_sw_dh_data *h; 345 struct hp_sw_dh_data *h;
359 unsigned long flags;
360 int ret; 346 int ret;
361 347
362 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 348 h = kzalloc(sizeof(*h), GFP_KERNEL);
363 + sizeof(*h) , GFP_KERNEL); 349 if (!h)
364 if (!scsi_dh_data) { 350 return ERR_PTR(-ENOMEM);
365 sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
366 HP_SW_NAME);
367 return 0;
368 }
369
370 scsi_dh_data->scsi_dh = &hp_sw_dh;
371 h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
372 h->path_state = HP_SW_PATH_UNINITIALIZED; 351 h->path_state = HP_SW_PATH_UNINITIALIZED;
373 h->retries = HP_SW_RETRIES; 352 h->retries = HP_SW_RETRIES;
374 h->sdev = sdev; 353 h->sdev = sdev;
@@ -377,42 +356,32 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
377 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) 356 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
378 goto failed; 357 goto failed;
379 358
380 if (!try_module_get(THIS_MODULE))
381 goto failed;
382
383 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
384 sdev->scsi_dh_data = scsi_dh_data;
385 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
386
387 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", 359 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
388 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? 360 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
389 "active":"passive"); 361 "active":"passive");
390 362 return &h->dh_data;
391 return 0;
392
393failed: 363failed:
394 kfree(scsi_dh_data); 364 kfree(h);
395 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 365 return ERR_PTR(-EINVAL);
396 HP_SW_NAME);
397 return -EINVAL;
398} 366}
399 367
400static void hp_sw_bus_detach( struct scsi_device *sdev ) 368static void hp_sw_bus_detach( struct scsi_device *sdev )
401{ 369{
402 struct scsi_dh_data *scsi_dh_data; 370 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
403 unsigned long flags;
404
405 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
406 scsi_dh_data = sdev->scsi_dh_data;
407 sdev->scsi_dh_data = NULL;
408 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
409 module_put(THIS_MODULE);
410
411 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
412 371
413 kfree(scsi_dh_data); 372 kfree(h);
414} 373}
415 374
375static struct scsi_device_handler hp_sw_dh = {
376 .name = HP_SW_NAME,
377 .module = THIS_MODULE,
378 .attach = hp_sw_bus_attach,
379 .detach = hp_sw_bus_detach,
380 .activate = hp_sw_activate,
381 .prep_fn = hp_sw_prep_fn,
382 .match = hp_sw_match,
383};
384
416static int __init hp_sw_init(void) 385static int __init hp_sw_init(void)
417{ 386{
418 return scsi_register_device_handler(&hp_sw_dh); 387 return scsi_register_device_handler(&hp_sw_dh);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1b5bc9293e37..b46ace3d4bf0 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -181,6 +181,7 @@ struct c2_inquiry {
181}; 181};
182 182
183struct rdac_dh_data { 183struct rdac_dh_data {
184 struct scsi_dh_data dh_data;
184 struct rdac_controller *ctlr; 185 struct rdac_controller *ctlr;
185#define UNINITIALIZED_LUN (1 << 8) 186#define UNINITIALIZED_LUN (1 << 8)
186 unsigned lun; 187 unsigned lun;
@@ -261,9 +262,7 @@ do { \
261 262
262static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) 263static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
263{ 264{
264 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 265 return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data);
265 BUG_ON(scsi_dh_data == NULL);
266 return ((struct rdac_dh_data *) scsi_dh_data->buf);
267} 266}
268 267
269static struct request *get_rdac_req(struct scsi_device *sdev, 268static struct request *get_rdac_req(struct scsi_device *sdev,
@@ -779,7 +778,10 @@ static int rdac_check_sense(struct scsi_device *sdev,
779 return SCSI_RETURN_NOT_HANDLED; 778 return SCSI_RETURN_NOT_HANDLED;
780} 779}
781 780
782static const struct scsi_dh_devlist rdac_dev_list[] = { 781static const struct {
782 char *vendor;
783 char *model;
784} rdac_dev_list[] = {
783 {"IBM", "1722"}, 785 {"IBM", "1722"},
784 {"IBM", "1724"}, 786 {"IBM", "1724"},
785 {"IBM", "1726"}, 787 {"IBM", "1726"},
@@ -825,40 +827,16 @@ static bool rdac_match(struct scsi_device *sdev)
825 return false; 827 return false;
826} 828}
827 829
828static int rdac_bus_attach(struct scsi_device *sdev); 830static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev)
829static void rdac_bus_detach(struct scsi_device *sdev);
830
831static struct scsi_device_handler rdac_dh = {
832 .name = RDAC_NAME,
833 .module = THIS_MODULE,
834 .devlist = rdac_dev_list,
835 .prep_fn = rdac_prep_fn,
836 .check_sense = rdac_check_sense,
837 .attach = rdac_bus_attach,
838 .detach = rdac_bus_detach,
839 .activate = rdac_activate,
840 .match = rdac_match,
841};
842
843static int rdac_bus_attach(struct scsi_device *sdev)
844{ 831{
845 struct scsi_dh_data *scsi_dh_data;
846 struct rdac_dh_data *h; 832 struct rdac_dh_data *h;
847 unsigned long flags;
848 int err; 833 int err;
849 char array_name[ARRAY_LABEL_LEN]; 834 char array_name[ARRAY_LABEL_LEN];
850 char array_id[UNIQUE_ID_LEN]; 835 char array_id[UNIQUE_ID_LEN];
851 836
852 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 837 h = kzalloc(sizeof(*h) , GFP_KERNEL);
853 + sizeof(*h) , GFP_KERNEL); 838 if (!h)
854 if (!scsi_dh_data) { 839 return ERR_PTR(-ENOMEM);
855 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
856 RDAC_NAME);
857 return -ENOMEM;
858 }
859
860 scsi_dh_data->scsi_dh = &rdac_dh;
861 h = (struct rdac_dh_data *) scsi_dh_data->buf;
862 h->lun = UNINITIALIZED_LUN; 840 h->lun = UNINITIALIZED_LUN;
863 h->state = RDAC_STATE_ACTIVE; 841 h->state = RDAC_STATE_ACTIVE;
864 842
@@ -878,19 +856,12 @@ static int rdac_bus_attach(struct scsi_device *sdev)
878 if (err != SCSI_DH_OK) 856 if (err != SCSI_DH_OK)
879 goto clean_ctlr; 857 goto clean_ctlr;
880 858
881 if (!try_module_get(THIS_MODULE))
882 goto clean_ctlr;
883
884 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
885 sdev->scsi_dh_data = scsi_dh_data;
886 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
887
888 sdev_printk(KERN_NOTICE, sdev, 859 sdev_printk(KERN_NOTICE, sdev,
889 "%s: LUN %d (%s) (%s)\n", 860 "%s: LUN %d (%s) (%s)\n",
890 RDAC_NAME, h->lun, mode[(int)h->mode], 861 RDAC_NAME, h->lun, mode[(int)h->mode],
891 lun_state[(int)h->lun_state]); 862 lun_state[(int)h->lun_state]);
892 863
893 return 0; 864 return &h->dh_data;
894 865
895clean_ctlr: 866clean_ctlr:
896 spin_lock(&list_lock); 867 spin_lock(&list_lock);
@@ -898,37 +869,34 @@ clean_ctlr:
898 spin_unlock(&list_lock); 869 spin_unlock(&list_lock);
899 870
900failed: 871failed:
901 kfree(scsi_dh_data); 872 kfree(h);
902 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 873 return ERR_PTR(-EINVAL);
903 RDAC_NAME);
904 return -EINVAL;
905} 874}
906 875
907static void rdac_bus_detach( struct scsi_device *sdev ) 876static void rdac_bus_detach( struct scsi_device *sdev )
908{ 877{
909 struct scsi_dh_data *scsi_dh_data; 878 struct rdac_dh_data *h = get_rdac_data(sdev);
910 struct rdac_dh_data *h;
911 unsigned long flags;
912 879
913 scsi_dh_data = sdev->scsi_dh_data;
914 h = (struct rdac_dh_data *) scsi_dh_data->buf;
915 if (h->ctlr && h->ctlr->ms_queued) 880 if (h->ctlr && h->ctlr->ms_queued)
916 flush_workqueue(kmpath_rdacd); 881 flush_workqueue(kmpath_rdacd);
917 882
918 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
919 sdev->scsi_dh_data = NULL;
920 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
921
922 spin_lock(&list_lock); 883 spin_lock(&list_lock);
923 if (h->ctlr) 884 if (h->ctlr)
924 kref_put(&h->ctlr->kref, release_controller); 885 kref_put(&h->ctlr->kref, release_controller);
925 spin_unlock(&list_lock); 886 spin_unlock(&list_lock);
926 kfree(scsi_dh_data); 887 kfree(h);
927 module_put(THIS_MODULE);
928 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
929} 888}
930 889
931 890static struct scsi_device_handler rdac_dh = {
891 .name = RDAC_NAME,
892 .module = THIS_MODULE,
893 .prep_fn = rdac_prep_fn,
894 .check_sense = rdac_check_sense,
895 .attach = rdac_bus_attach,
896 .detach = rdac_bus_detach,
897 .activate = rdac_activate,
898 .match = rdac_match,
899};
932 900
933static int __init rdac_init(void) 901static int __init rdac_init(void)
934{ 902{
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 4b0dd8c56707..3e088125a8be 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -33,20 +33,20 @@
33/* 33/*
34 * Definitions for the generic 5380 driver. 34 * Definitions for the generic 5380 driver.
35 */ 35 */
36#define AUTOSENSE 36
37#define DONT_USE_INTR
37 38
38#define NCR5380_read(reg) inb(port + reg) 39#define NCR5380_read(reg) inb(port + reg)
39#define NCR5380_write(reg, value) outb(value, port + reg) 40#define NCR5380_write(reg, value) outb(value, port + reg)
40 41
41#define NCR5380_implementation_fields unsigned int port 42#define NCR5380_implementation_fields /* none */
42#define NCR5380_local_declare() NCR5380_implementation_fields 43#define NCR5380_local_declare() unsigned int port
43#define NCR5380_setup(instance) port = instance->io_port 44#define NCR5380_setup(instance) port = instance->io_port
44 45
45/* 46/*
46 * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) 47 * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h)
47 */ 48 */
48#include <linux/delay.h> 49#include <linux/delay.h>
49#include "scsi.h"
50 50
51#include "NCR5380.h" 51#include "NCR5380.h"
52#include "NCR5380.c" 52#include "NCR5380.c"
@@ -58,6 +58,7 @@
58static struct scsi_host_template dmx3191d_driver_template = { 58static struct scsi_host_template dmx3191d_driver_template = {
59 .proc_name = DMX3191D_DRIVER_NAME, 59 .proc_name = DMX3191D_DRIVER_NAME,
60 .name = "Domex DMX3191D", 60 .name = "Domex DMX3191D",
61 .info = NCR5380_info,
61 .queuecommand = NCR5380_queue_command, 62 .queuecommand = NCR5380_queue_command,
62 .eh_abort_handler = NCR5380_abort, 63 .eh_abort_handler = NCR5380_abort,
63 .eh_bus_reset_handler = NCR5380_bus_reset, 64 .eh_bus_reset_handler = NCR5380_bus_reset,
@@ -90,31 +91,23 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
90 if (!shost) 91 if (!shost)
91 goto out_release_region; 92 goto out_release_region;
92 shost->io_port = io; 93 shost->io_port = io;
93 shost->irq = pdev->irq;
94 94
95 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); 95 /* This card does not seem to raise an interrupt on pdev->irq.
96 * Steam-powered SCSI controllers run without an IRQ anyway.
97 */
98 shost->irq = NO_IRQ;
96 99
97 if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, 100 NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E);
98 DMX3191D_DRIVER_NAME, shost)) {
99 /*
100 * Steam powered scsi controllers run without an IRQ anyway
101 */
102 printk(KERN_WARNING "dmx3191: IRQ %d not available - "
103 "switching to polled mode.\n", pdev->irq);
104 shost->irq = SCSI_IRQ_NONE;
105 }
106 101
107 pci_set_drvdata(pdev, shost); 102 pci_set_drvdata(pdev, shost);
108 103
109 error = scsi_add_host(shost, &pdev->dev); 104 error = scsi_add_host(shost, &pdev->dev);
110 if (error) 105 if (error)
111 goto out_free_irq; 106 goto out_release_region;
112 107
113 scsi_scan_host(shost); 108 scsi_scan_host(shost);
114 return 0; 109 return 0;
115 110
116 out_free_irq:
117 free_irq(shost->irq, shost);
118 out_release_region: 111 out_release_region:
119 release_region(io, DMX3191D_REGION_LEN); 112 release_region(io, DMX3191D_REGION_LEN);
120 out_disable_device: 113 out_disable_device:
@@ -131,8 +124,6 @@ static void dmx3191d_remove_one(struct pci_dev *pdev)
131 124
132 NCR5380_exit(shost); 125 NCR5380_exit(shost);
133 126
134 if (shost->irq != SCSI_IRQ_NONE)
135 free_irq(shost->irq, shost);
136 release_region(shost->io_port, DMX3191D_REGION_LEN); 127 release_region(shost->io_port, DMX3191D_REGION_LEN);
137 pci_disable_device(pdev); 128 pci_disable_device(pdev);
138 129
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 072f0ec2851e..0bf976936a10 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -415,10 +415,8 @@ static int adpt_slave_configure(struct scsi_device * device)
415 pHba = (adpt_hba *) host->hostdata[0]; 415 pHba = (adpt_hba *) host->hostdata[0];
416 416
417 if (host->can_queue && device->tagged_supported) { 417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, 418 scsi_change_queue_depth(device,
419 host->can_queue - 1); 419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 } 420 }
423 return 0; 421 return 0;
424} 422}
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index 0a667fe05006..4c74c7ba2dff 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -1,5 +1,4 @@
1 1
2#define AUTOSENSE
3#define PSEUDO_DMA 2#define PSEUDO_DMA
4#define DONT_USE_INTR 3#define DONT_USE_INTR
5#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ 4#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
@@ -18,29 +17,9 @@
18 * (Unix and Linux consulting and custom programming) 17 * (Unix and Linux consulting and custom programming)
19 * drew@colorado.edu 18 * drew@colorado.edu
20 * +1 (303) 440-4894 19 * +1 (303) 440-4894
21 * 20 */
22 * DISTRIBUTION RELEASE 1.
23 *
24 * For more information, please consult
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29*/
30 21
31/* 22/*
32 * Options :
33 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
34 * for commands that return with a CHECK CONDITION status.
35 *
36 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
37 * increase compared to polled I/O.
38 *
39 * PARITY - enable parity checking. Not supported.
40 *
41 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers.
42 * You probably want this.
43 *
44 * The card is detected and initialized in one of several ways : 23 * The card is detected and initialized in one of several ways :
45 * 1. Autoprobe (default) - since the board is memory mapped, 24 * 1. Autoprobe (default) - since the board is memory mapped,
46 * a BIOS signature is scanned for to locate the registers. 25 * a BIOS signature is scanned for to locate the registers.
@@ -79,15 +58,11 @@
79#include <linux/init.h> 58#include <linux/init.h>
80#include <linux/interrupt.h> 59#include <linux/interrupt.h>
81#include <linux/io.h> 60#include <linux/io.h>
82#include "scsi.h"
83#include <scsi/scsi_host.h> 61#include <scsi/scsi_host.h>
84#include "dtc.h" 62#include "dtc.h"
85#define AUTOPROBE_IRQ 63#define AUTOPROBE_IRQ
86#include "NCR5380.h" 64#include "NCR5380.h"
87 65
88
89#define DTC_PUBLIC_RELEASE 2
90
91/* 66/*
92 * The DTC3180 & 3280 boards are memory mapped. 67 * The DTC3180 & 3280 boards are memory mapped.
93 * 68 *
@@ -173,10 +148,13 @@ static const struct signature {
173 * 148 *
174 */ 149 */
175 150
176static void __init dtc_setup(char *str, int *ints) 151static int __init dtc_setup(char *str)
177{ 152{
178 static int commandline_current = 0; 153 static int commandline_current = 0;
179 int i; 154 int i;
155 int ints[10];
156
157 get_options(str, ARRAY_SIZE(ints), ints);
180 if (ints[0] != 2) 158 if (ints[0] != 2)
181 printk("dtc_setup: usage dtc=address,irq\n"); 159 printk("dtc_setup: usage dtc=address,irq\n");
182 else if (commandline_current < NO_OVERRIDES) { 160 else if (commandline_current < NO_OVERRIDES) {
@@ -189,7 +167,10 @@ static void __init dtc_setup(char *str, int *ints)
189 } 167 }
190 ++commandline_current; 168 ++commandline_current;
191 } 169 }
170 return 1;
192} 171}
172
173__setup("dtc=", dtc_setup);
193#endif 174#endif
194 175
195/* 176/*
@@ -213,10 +194,6 @@ static int __init dtc_detect(struct scsi_host_template * tpnt)
213 void __iomem *base; 194 void __iomem *base;
214 int sig, count; 195 int sig, count;
215 196
216 tpnt->proc_name = "dtc3x80";
217 tpnt->show_info = dtc_show_info;
218 tpnt->write_info = dtc_write_info;
219
220 for (count = 0; current_override < NO_OVERRIDES; ++current_override) { 197 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
221 addr = 0; 198 addr = 0;
222 base = NULL; 199 base = NULL;
@@ -271,38 +248,33 @@ found:
271 else 248 else
272 instance->irq = NCR5380_probe_irq(instance, DTC_IRQS); 249 instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
273 250
251 /* Compatibility with documented NCR5380 kernel parameters */
252 if (instance->irq == 255)
253 instance->irq = NO_IRQ;
254
274#ifndef DONT_USE_INTR 255#ifndef DONT_USE_INTR
275 /* With interrupts enabled, it will sometimes hang when doing heavy 256 /* With interrupts enabled, it will sometimes hang when doing heavy
276 * reads. So better not enable them until I finger it out. */ 257 * reads. So better not enable them until I finger it out. */
277 if (instance->irq != SCSI_IRQ_NONE) 258 if (instance->irq != NO_IRQ)
278 if (request_irq(instance->irq, dtc_intr, 0, 259 if (request_irq(instance->irq, dtc_intr, 0,
279 "dtc", instance)) { 260 "dtc", instance)) {
280 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 261 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
281 instance->irq = SCSI_IRQ_NONE; 262 instance->irq = NO_IRQ;
282 } 263 }
283 264
284 if (instance->irq == SCSI_IRQ_NONE) { 265 if (instance->irq == NO_IRQ) {
285 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 266 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
286 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 267 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
287 } 268 }
288#else 269#else
289 if (instance->irq != SCSI_IRQ_NONE) 270 if (instance->irq != NO_IRQ)
290 printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); 271 printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no);
291 instance->irq = SCSI_IRQ_NONE; 272 instance->irq = NO_IRQ;
292#endif 273#endif
293#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT) 274#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
294 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 275 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
295#endif 276#endif
296 277
297 printk(KERN_INFO "scsi%d : at 0x%05X", instance->host_no, (int) instance->base);
298 if (instance->irq == SCSI_IRQ_NONE)
299 printk(" interrupts disabled");
300 else
301 printk(" irq %d", instance->irq);
302 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, DTC_PUBLIC_RELEASE);
303 NCR5380_print_options(instance);
304 printk("\n");
305
306 ++current_override; 278 ++current_override;
307 ++count; 279 ++count;
308 } 280 }
@@ -354,20 +326,18 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
354 * timeout. 326 * timeout.
355*/ 327*/
356 328
357static int dtc_maxi = 0;
358static int dtc_wmaxi = 0;
359
360static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) 329static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
361{ 330{
362 unsigned char *d = dst; 331 unsigned char *d = dst;
363 int i; /* For counting time spent in the poll-loop */ 332 int i; /* For counting time spent in the poll-loop */
333 struct NCR5380_hostdata *hostdata = shost_priv(instance);
364 NCR5380_local_declare(); 334 NCR5380_local_declare();
365 NCR5380_setup(instance); 335 NCR5380_setup(instance);
366 336
367 i = 0; 337 i = 0;
368 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 338 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
369 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); 339 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
370 if (instance->irq == SCSI_IRQ_NONE) 340 if (instance->irq == NO_IRQ)
371 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ); 341 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
372 else 342 else
373 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE); 343 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
@@ -391,8 +361,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
391 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ 361 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
392 rtrc(0); 362 rtrc(0);
393 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 363 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
394 if (i > dtc_maxi) 364 if (i > hostdata->spin_max_r)
395 dtc_maxi = i; 365 hostdata->spin_max_r = i;
396 return (0); 366 return (0);
397} 367}
398 368
@@ -412,13 +382,14 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
412static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) 382static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
413{ 383{
414 int i; 384 int i;
385 struct NCR5380_hostdata *hostdata = shost_priv(instance);
415 NCR5380_local_declare(); 386 NCR5380_local_declare();
416 NCR5380_setup(instance); 387 NCR5380_setup(instance);
417 388
418 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 389 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
419 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); 390 NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
420 /* set direction (write) */ 391 /* set direction (write) */
421 if (instance->irq == SCSI_IRQ_NONE) 392 if (instance->irq == NO_IRQ)
422 NCR5380_write(DTC_CONTROL_REG, 0); 393 NCR5380_write(DTC_CONTROL_REG, 0);
423 else 394 else
424 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); 395 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
@@ -444,8 +415,8 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
444 /* Check for parity error here. fixme. */ 415 /* Check for parity error here. fixme. */
445 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ 416 NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
446 rtrc(0); 417 rtrc(0);
447 if (i > dtc_wmaxi) 418 if (i > hostdata->spin_max_w)
448 dtc_wmaxi = i; 419 hostdata->spin_max_w = i;
449 return (0); 420 return (0);
450} 421}
451 422
@@ -457,7 +428,7 @@ static int dtc_release(struct Scsi_Host *shost)
457{ 428{
458 NCR5380_local_declare(); 429 NCR5380_local_declare();
459 NCR5380_setup(shost); 430 NCR5380_setup(shost);
460 if (shost->irq) 431 if (shost->irq != NO_IRQ)
461 free_irq(shost->irq, shost); 432 free_irq(shost->irq, shost);
462 NCR5380_exit(shost); 433 NCR5380_exit(shost);
463 if (shost->io_port && shost->n_io_port) 434 if (shost->io_port && shost->n_io_port)
@@ -471,6 +442,10 @@ static struct scsi_host_template driver_template = {
471 .name = "DTC 3180/3280 ", 442 .name = "DTC 3180/3280 ",
472 .detect = dtc_detect, 443 .detect = dtc_detect,
473 .release = dtc_release, 444 .release = dtc_release,
445 .proc_name = "dtc3x80",
446 .show_info = dtc_show_info,
447 .write_info = dtc_write_info,
448 .info = dtc_info,
474 .queuecommand = dtc_queue_command, 449 .queuecommand = dtc_queue_command,
475 .eh_abort_handler = dtc_abort, 450 .eh_abort_handler = dtc_abort,
476 .eh_bus_reset_handler = dtc_bus_reset, 451 .eh_bus_reset_handler = dtc_bus_reset,
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index 92d7cfc3f4fc..78a2332e9064 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -5,24 +5,6 @@
5 * (Unix and Linux consulting and custom programming) 5 * (Unix and Linux consulting and custom programming)
6 * drew@colorado.edu 6 * drew@colorado.edu
7 * +1 (303) 440-4894 7 * +1 (303) 440-4894
8 *
9 * DISTRIBUTION RELEASE 2.
10 *
11 * For more information, please consult
12 *
13 *
14 *
15 * and
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 8 */
27 9
28#ifndef DTC3280_H 10#ifndef DTC3280_H
@@ -32,13 +14,6 @@
32#define DTCDEBUG_INIT 0x1 14#define DTCDEBUG_INIT 0x1
33#define DTCDEBUG_TRANSFER 0x2 15#define DTCDEBUG_TRANSFER 0x2
34 16
35static int dtc_abort(Scsi_Cmnd *);
36static int dtc_biosparam(struct scsi_device *, struct block_device *,
37 sector_t, int*);
38static int dtc_detect(struct scsi_host_template *);
39static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
40static int dtc_bus_reset(Scsi_Cmnd *);
41
42#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
43#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
44#endif 19#endif
@@ -88,6 +63,7 @@ static int dtc_bus_reset(Scsi_Cmnd *);
88#define NCR5380_queue_command dtc_queue_command 63#define NCR5380_queue_command dtc_queue_command
89#define NCR5380_abort dtc_abort 64#define NCR5380_abort dtc_abort
90#define NCR5380_bus_reset dtc_bus_reset 65#define NCR5380_bus_reset dtc_bus_reset
66#define NCR5380_info dtc_info
91#define NCR5380_show_info dtc_show_info 67#define NCR5380_show_info dtc_show_info
92#define NCR5380_write_info dtc_write_info 68#define NCR5380_write_info dtc_write_info
93 69
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 943ad3a19661..227dd2c2ec2f 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -946,20 +946,18 @@ static int eata2x_slave_configure(struct scsi_device *dev)
946 946
947 if (TLDEV(dev->type) && dev->tagged_supported) { 947 if (TLDEV(dev->type) && dev->tagged_supported) {
948 if (tag_mode == TAG_SIMPLE) { 948 if (tag_mode == TAG_SIMPLE) {
949 scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd);
950 tag_suffix = ", simple tags"; 949 tag_suffix = ", simple tags";
951 } else if (tag_mode == TAG_ORDERED) { 950 } else if (tag_mode == TAG_ORDERED) {
952 scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd);
953 tag_suffix = ", ordered tags"; 951 tag_suffix = ", ordered tags";
954 } else { 952 } else {
955 scsi_adjust_queue_depth(dev, 0, tqd);
956 tag_suffix = ", no tags"; 953 tag_suffix = ", no tags";
957 } 954 }
955 scsi_change_queue_depth(dev, tqd);
958 } else if (TLDEV(dev->type) && linked_comm) { 956 } else if (TLDEV(dev->type) && linked_comm) {
959 scsi_adjust_queue_depth(dev, 0, tqd); 957 scsi_change_queue_depth(dev, tqd);
960 tag_suffix = ", untagged"; 958 tag_suffix = ", untagged";
961 } else { 959 } else {
962 scsi_adjust_queue_depth(dev, 0, utqd); 960 scsi_change_queue_depth(dev, utqd);
963 tag_suffix = ""; 961 tag_suffix = "";
964 } 962 }
965 963
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 3fd305d6b67d..b6030e3edd01 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -972,11 +972,6 @@ u8 handle_hba_ioctl(struct esas2r_adapter *a,
972 struct atto_ioctl *ioctl_hba); 972 struct atto_ioctl *ioctl_hba);
973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); 973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); 974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
975int esas2r_slave_alloc(struct scsi_device *dev);
976int esas2r_slave_configure(struct scsi_device *dev);
977void esas2r_slave_destroy(struct scsi_device *dev);
978int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
979int esas2r_change_queue_type(struct scsi_device *dev, int type);
980long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); 975long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
981 976
982/* SCSI error handler (eh) functions */ 977/* SCSI error handler (eh) functions */
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index d89a0277a8e1..baf913047b48 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -117,9 +117,8 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
117 117
118 rq = esas2r_alloc_request(a); 118 rq = esas2r_alloc_request(a);
119 if (rq == NULL) { 119 if (rq == NULL) {
120 up(&a->fm_api_semaphore);
121 fi->status = FI_STAT_BUSY; 120 fi->status = FI_STAT_BUSY;
122 return; 121 goto free_sem;
123 } 122 }
124 123
125 if (fi == &a->firmware.header) { 124 if (fi == &a->firmware.header) {
@@ -135,7 +134,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
135 if (a->firmware.header_buff == NULL) { 134 if (a->firmware.header_buff == NULL) {
136 esas2r_debug("failed to allocate header buffer!"); 135 esas2r_debug("failed to allocate header buffer!");
137 fi->status = FI_STAT_BUSY; 136 fi->status = FI_STAT_BUSY;
138 return; 137 goto free_req;
139 } 138 }
140 139
141 memcpy(a->firmware.header_buff, fi, 140 memcpy(a->firmware.header_buff, fi,
@@ -171,9 +170,10 @@ all_done:
171 a->firmware.header_buff, 170 a->firmware.header_buff,
172 (dma_addr_t)a->firmware.header_buff_phys); 171 (dma_addr_t)a->firmware.header_buff_phys);
173 } 172 }
174 173free_req:
175 up(&a->fm_api_semaphore);
176 esas2r_free_request(a, (struct esas2r_request *)rq); 174 esas2r_free_request(a, (struct esas2r_request *)rq);
175free_sem:
176 up(&a->fm_api_semaphore);
177 return; 177 return;
178 178
179} 179}
@@ -1420,9 +1420,10 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
1420 1420
1421 rq = esas2r_alloc_request(a); 1421 rq = esas2r_alloc_request(a);
1422 if (rq == NULL) { 1422 if (rq == NULL) {
1423 up(&a->nvram_semaphore); 1423 kfree(ioctl);
1424 ioctl->data.prw.code = 0; 1424 esas2r_log(ESAS2R_LOG_WARN,
1425 break; 1425 "could not allocate an internal request");
1426 return -ENOMEM;
1426 } 1427 }
1427 1428
1428 code = esas2r_write_params(a, rq, 1429 code = esas2r_write_params(a, rq,
@@ -1523,9 +1524,12 @@ ioctl_done:
1523 case -EINVAL: 1524 case -EINVAL:
1524 ioctl->header.return_code = IOCTL_INVALID_PARAM; 1525 ioctl->header.return_code = IOCTL_INVALID_PARAM;
1525 break; 1526 break;
1527
1528 default:
1529 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1530 break;
1526 } 1531 }
1527 1532
1528 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1529 } 1533 }
1530 1534
1531 /* Always copy the buffer back, if only to pick up the status */ 1535 /* Always copy the buffer back, if only to pick up the status */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 6504a195c874..593ff8a63c70 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -254,12 +254,10 @@ static struct scsi_host_template driver_template = {
254 .use_clustering = ENABLE_CLUSTERING, 254 .use_clustering = ENABLE_CLUSTERING,
255 .emulated = 0, 255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME, 256 .proc_name = ESAS2R_DRVR_NAME,
257 .slave_configure = esas2r_slave_configure, 257 .change_queue_depth = scsi_change_queue_depth,
258 .slave_alloc = esas2r_slave_alloc, 258 .change_queue_type = scsi_change_queue_type,
259 .slave_destroy = esas2r_slave_destroy,
260 .change_queue_depth = esas2r_change_queue_depth,
261 .change_queue_type = esas2r_change_queue_type,
262 .max_sectors = 0xFFFF, 259 .max_sectors = 0xFFFF,
260 .use_blk_tags = 1,
263}; 261};
264 262
265int sgl_page_size = 512; 263int sgl_page_size = 512;
@@ -1057,7 +1055,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
1057 1055
1058 cmd->scsi_done(cmd); 1056 cmd->scsi_done(cmd);
1059 1057
1060 return 0; 1058 return SUCCESS;
1061 } 1059 }
1062 1060
1063 spin_lock_irqsave(&a->queue_lock, flags); 1061 spin_lock_irqsave(&a->queue_lock, flags);
@@ -1259,60 +1257,6 @@ int esas2r_target_reset(struct scsi_cmnd *cmd)
1259 return esas2r_dev_targ_reset(cmd, true); 1257 return esas2r_dev_targ_reset(cmd, true);
1260} 1258}
1261 1259
1262int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
1263{
1264 esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
1265
1266 scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
1267
1268 return dev->queue_depth;
1269}
1270
1271int esas2r_change_queue_type(struct scsi_device *dev, int type)
1272{
1273 esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
1274
1275 if (dev->tagged_supported) {
1276 scsi_set_tag_type(dev, type);
1277
1278 if (type)
1279 scsi_activate_tcq(dev, dev->queue_depth);
1280 else
1281 scsi_deactivate_tcq(dev, dev->queue_depth);
1282 } else {
1283 type = 0;
1284 }
1285
1286 return type;
1287}
1288
1289int esas2r_slave_alloc(struct scsi_device *dev)
1290{
1291 return 0;
1292}
1293
1294int esas2r_slave_configure(struct scsi_device *dev)
1295{
1296 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1297 "esas2r_slave_configure()");
1298
1299 if (dev->tagged_supported) {
1300 scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
1301 scsi_activate_tcq(dev, cmd_per_lun);
1302 } else {
1303 scsi_set_tag_type(dev, 0);
1304 scsi_deactivate_tcq(dev, cmd_per_lun);
1305 }
1306
1307 return 0;
1308}
1309
1310void esas2r_slave_destroy(struct scsi_device *dev)
1311{
1312 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1313 "esas2r_slave_destroy()");
1314}
1315
1316void esas2r_log_request_failure(struct esas2r_adapter *a, 1260void esas2r_log_request_failure(struct esas2r_adapter *a,
1317 struct esas2r_request *rq) 1261 struct esas2r_request *rq)
1318{ 1262{
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 55548dc5cec3..ce5bd52fe692 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -49,55 +49,67 @@ static u32 esp_debug;
49#define ESP_DEBUG_DATADONE 0x00000100 49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200 50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400 51#define ESP_DEBUG_AUTOSENSE 0x00000400
52#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
52 54
53#define esp_log_intr(f, a...) \ 55#define esp_log_intr(f, a...) \
54do { if (esp_debug & ESP_DEBUG_INTR) \ 56do { if (esp_debug & ESP_DEBUG_INTR) \
55 printk(f, ## a); \ 57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
56} while (0) 58} while (0)
57 59
58#define esp_log_reset(f, a...) \ 60#define esp_log_reset(f, a...) \
59do { if (esp_debug & ESP_DEBUG_RESET) \ 61do { if (esp_debug & ESP_DEBUG_RESET) \
60 printk(f, ## a); \ 62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
61} while (0) 63} while (0)
62 64
63#define esp_log_msgin(f, a...) \ 65#define esp_log_msgin(f, a...) \
64do { if (esp_debug & ESP_DEBUG_MSGIN) \ 66do { if (esp_debug & ESP_DEBUG_MSGIN) \
65 printk(f, ## a); \ 67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
66} while (0) 68} while (0)
67 69
68#define esp_log_msgout(f, a...) \ 70#define esp_log_msgout(f, a...) \
69do { if (esp_debug & ESP_DEBUG_MSGOUT) \ 71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
70 printk(f, ## a); \ 72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
71} while (0) 73} while (0)
72 74
73#define esp_log_cmddone(f, a...) \ 75#define esp_log_cmddone(f, a...) \
74do { if (esp_debug & ESP_DEBUG_CMDDONE) \ 76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
75 printk(f, ## a); \ 77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
76} while (0) 78} while (0)
77 79
78#define esp_log_disconnect(f, a...) \ 80#define esp_log_disconnect(f, a...) \
79do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ 81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
80 printk(f, ## a); \ 82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
81} while (0) 83} while (0)
82 84
83#define esp_log_datastart(f, a...) \ 85#define esp_log_datastart(f, a...) \
84do { if (esp_debug & ESP_DEBUG_DATASTART) \ 86do { if (esp_debug & ESP_DEBUG_DATASTART) \
85 printk(f, ## a); \ 87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
86} while (0) 88} while (0)
87 89
88#define esp_log_datadone(f, a...) \ 90#define esp_log_datadone(f, a...) \
89do { if (esp_debug & ESP_DEBUG_DATADONE) \ 91do { if (esp_debug & ESP_DEBUG_DATADONE) \
90 printk(f, ## a); \ 92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
91} while (0) 93} while (0)
92 94
93#define esp_log_reconnect(f, a...) \ 95#define esp_log_reconnect(f, a...) \
94do { if (esp_debug & ESP_DEBUG_RECONNECT) \ 96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
95 printk(f, ## a); \ 97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
96} while (0) 98} while (0)
97 99
98#define esp_log_autosense(f, a...) \ 100#define esp_log_autosense(f, a...) \
99do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ 101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
100 printk(f, ## a); \ 102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
103} while (0)
104
105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
101} while (0) 113} while (0)
102 114
103#define esp_read8(REG) esp->ops->esp_read8(esp, REG) 115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
@@ -126,10 +138,29 @@ void scsi_esp_cmd(struct esp *esp, u8 val)
126 138
127 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
128 140
141 esp_log_command("cmd[%02x]\n", val);
129 esp_write8(val, ESP_CMD); 142 esp_write8(val, ESP_CMD);
130} 143}
131EXPORT_SYMBOL(scsi_esp_cmd); 144EXPORT_SYMBOL(scsi_esp_cmd);
132 145
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
133static void esp_event(struct esp *esp, u8 val) 164static void esp_event(struct esp *esp, u8 val)
134{ 165{
135 struct esp_event_ent *p; 166 struct esp_event_ent *p;
@@ -150,19 +181,17 @@ static void esp_dump_cmd_log(struct esp *esp)
150 int idx = esp->esp_event_cur; 181 int idx = esp->esp_event_cur;
151 int stop = idx; 182 int stop = idx;
152 183
153 printk(KERN_INFO PFX "esp%d: Dumping command log\n", 184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
154 esp->host->unique_id);
155 do { 185 do {
156 struct esp_event_ent *p = &esp->esp_event_log[idx]; 186 struct esp_event_ent *p = &esp->esp_event_log[idx];
157 187
158 printk(KERN_INFO PFX "esp%d: ent[%d] %s ", 188 shost_printk(KERN_INFO, esp->host,
159 esp->host->unique_id, idx, 189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
160 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); 190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
161 191 idx,
162 printk("val[%02x] sreg[%02x] seqreg[%02x] " 192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
163 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", 193 p->val, p->sreg, p->seqreg,
164 p->val, p->sreg, p->seqreg, 194 p->sreg2, p->ireg, p->select_state, p->event);
165 p->sreg2, p->ireg, p->select_state, p->event);
166 195
167 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); 196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
168 } while (idx != stop); 197 } while (idx != stop);
@@ -176,9 +205,8 @@ static void esp_flush_fifo(struct esp *esp)
176 205
177 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { 206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
178 if (--lim == 0) { 207 if (--lim == 0) {
179 printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " 208 shost_printk(KERN_ALERT, esp->host,
180 "will not clear!\n", 209 "ESP_FF_BYTES will not clear!\n");
181 esp->host->unique_id);
182 break; 210 break;
183 } 211 }
184 udelay(1); 212 udelay(1);
@@ -240,6 +268,19 @@ static void esp_reset_esp(struct esp *esp)
240 } else { 268 } else {
241 esp->min_period = ((5 * esp->ccycle) / 1000); 269 esp->min_period = ((5 * esp->ccycle) / 1000);
242 } 270 }
271 if (esp->rev == FAS236) {
272 /*
273 * The AM53c974 chip returns the same ID as FAS236;
274 * try to configure glitch eater.
275 */
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
243 esp->max_period = (esp->max_period + 3)>>2; 284 esp->max_period = (esp->max_period + 3)>>2;
244 esp->min_period = (esp->min_period + 3)>>2; 285 esp->min_period = (esp->min_period + 3)>>2;
245 286
@@ -265,7 +306,8 @@ static void esp_reset_esp(struct esp *esp)
265 /* fallthrough... */ 306 /* fallthrough... */
266 307
267 case FAS236: 308 case FAS236:
268 /* Fast 236 or HME */ 309 case PCSCSI:
310 /* Fast 236, AM53c974 or HME */
269 esp_write8(esp->config2, ESP_CFG2); 311 esp_write8(esp->config2, ESP_CFG2);
270 if (esp->rev == FASHME) { 312 if (esp->rev == FASHME) {
271 u8 cfg3 = esp->target[0].esp_config3; 313 u8 cfg3 = esp->target[0].esp_config3;
@@ -383,12 +425,11 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
383 p->cur_residue -= len; 425 p->cur_residue -= len;
384 p->tot_residue -= len; 426 p->tot_residue -= len;
385 if (p->cur_residue < 0 || p->tot_residue < 0) { 427 if (p->cur_residue < 0 || p->tot_residue < 0) {
386 printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", 428 shost_printk(KERN_ERR, esp->host,
387 esp->host->unique_id); 429 "Data transfer overflow.\n");
388 printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " 430 shost_printk(KERN_ERR, esp->host,
389 "len[%u]\n", 431 "cur_residue[%d] tot_residue[%d] len[%u]\n",
390 esp->host->unique_id, 432 p->cur_residue, p->tot_residue, len);
391 p->cur_residue, p->tot_residue, len);
392 p->cur_residue = 0; 433 p->cur_residue = 0;
393 p->tot_residue = 0; 434 p->tot_residue = 0;
394 } 435 }
@@ -604,9 +645,8 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
604 645
605 646
606 if (!ent->sense_ptr) { 647 if (!ent->sense_ptr) {
607 esp_log_autosense("esp%d: Doing auto-sense for " 648 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
608 "tgt[%d] lun[%d]\n", 649 tgt, lun);
609 esp->host->unique_id, tgt, lun);
610 650
611 ent->sense_ptr = cmd->sense_buffer; 651 ent->sense_ptr = cmd->sense_buffer;
612 ent->sense_dma = esp->ops->map_single(esp, 652 ent->sense_dma = esp->ops->map_single(esp,
@@ -642,10 +682,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
642 682
643 val = (p - esp->command_block); 683 val = (p - esp->command_block);
644 684
645 if (esp->rev == FASHME) 685 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
646 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
647 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
648 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
649} 686}
650 687
651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) 688static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
@@ -663,7 +700,7 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
663 return ent; 700 return ent;
664 } 701 }
665 702
666 if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { 703 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
667 ent->tag[0] = 0; 704 ent->tag[0] = 0;
668 ent->tag[1] = 0; 705 ent->tag[1] = 0;
669 } 706 }
@@ -781,12 +818,12 @@ build_identify:
781 } 818 }
782 819
783 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { 820 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
784 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; 821 start_cmd = ESP_CMD_SELA;
785 if (ent->tag[0]) { 822 if (ent->tag[0]) {
786 *p++ = ent->tag[0]; 823 *p++ = ent->tag[0];
787 *p++ = ent->tag[1]; 824 *p++ = ent->tag[1];
788 825
789 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; 826 start_cmd = ESP_CMD_SA3;
790 } 827 }
791 828
792 for (i = 0; i < cmd->cmd_len; i++) 829 for (i = 0; i < cmd->cmd_len; i++)
@@ -806,7 +843,7 @@ build_identify:
806 esp->msg_out_len += 2; 843 esp->msg_out_len += 2;
807 } 844 }
808 845
809 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; 846 start_cmd = ESP_CMD_SELAS;
810 esp->select_state = ESP_SELECT_MSGOUT; 847 esp->select_state = ESP_SELECT_MSGOUT;
811 } 848 }
812 val = tgt; 849 val = tgt;
@@ -826,10 +863,7 @@ build_identify:
826 printk("]\n"); 863 printk("]\n");
827 } 864 }
828 865
829 if (esp->rev == FASHME) 866 esp_send_dma_cmd(esp, val, 16, start_cmd);
830 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
831 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
832 val, 16, 0, start_cmd);
833} 867}
834 868
835static struct esp_cmd_entry *esp_get_ent(struct esp *esp) 869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
@@ -953,8 +987,8 @@ static int esp_check_gross_error(struct esp *esp)
953 * - DMA programmed with wrong direction 987 * - DMA programmed with wrong direction
954 * - improper phase change 988 * - improper phase change
955 */ 989 */
956 printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", 990 shost_printk(KERN_ERR, esp->host,
957 esp->host->unique_id, esp->sreg); 991 "Gross error sreg[%02x]\n", esp->sreg);
958 /* XXX Reset the chip. XXX */ 992 /* XXX Reset the chip. XXX */
959 return 1; 993 return 1;
960 } 994 }
@@ -974,7 +1008,6 @@ static int esp_check_spur_intr(struct esp *esp)
974 1008
975 default: 1009 default:
976 if (!(esp->sreg & ESP_STAT_INTR)) { 1010 if (!(esp->sreg & ESP_STAT_INTR)) {
977 esp->ireg = esp_read8(ESP_INTRPT);
978 if (esp->ireg & ESP_INTR_SR) 1011 if (esp->ireg & ESP_INTR_SR)
979 return 1; 1012 return 1;
980 1013
@@ -982,14 +1015,13 @@ static int esp_check_spur_intr(struct esp *esp)
982 * ESP is not, the only possibility is a DMA error. 1015 * ESP is not, the only possibility is a DMA error.
983 */ 1016 */
984 if (!esp->ops->dma_error(esp)) { 1017 if (!esp->ops->dma_error(esp)) {
985 printk(KERN_ERR PFX "esp%d: Spurious irq, " 1018 shost_printk(KERN_ERR, esp->host,
986 "sreg=%02x.\n", 1019 "Spurious irq, sreg=%02x.\n",
987 esp->host->unique_id, esp->sreg); 1020 esp->sreg);
988 return -1; 1021 return -1;
989 } 1022 }
990 1023
991 printk(KERN_ERR PFX "esp%d: DMA error\n", 1024 shost_printk(KERN_ERR, esp->host, "DMA error\n");
992 esp->host->unique_id);
993 1025
994 /* XXX Reset the chip. XXX */ 1026 /* XXX Reset the chip. XXX */
995 return -1; 1027 return -1;
@@ -1002,7 +1034,7 @@ static int esp_check_spur_intr(struct esp *esp)
1002 1034
1003static void esp_schedule_reset(struct esp *esp) 1035static void esp_schedule_reset(struct esp *esp)
1004{ 1036{
1005 esp_log_reset("ESP: esp_schedule_reset() from %pf\n", 1037 esp_log_reset("esp_schedule_reset() from %pf\n",
1006 __builtin_return_address(0)); 1038 __builtin_return_address(0));
1007 esp->flags |= ESP_FLAG_RESETTING; 1039 esp->flags |= ESP_FLAG_RESETTING;
1008 esp_event(esp, ESP_EVENT_RESET); 1040 esp_event(esp, ESP_EVENT_RESET);
@@ -1019,20 +1051,20 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1019 int i; 1051 int i;
1020 1052
1021 if (!lp->num_tagged) { 1053 if (!lp->num_tagged) {
1022 printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", 1054 shost_printk(KERN_ERR, esp->host,
1023 esp->host->unique_id); 1055 "Reconnect w/num_tagged==0\n");
1024 return NULL; 1056 return NULL;
1025 } 1057 }
1026 1058
1027 esp_log_reconnect("ESP: reconnect tag, "); 1059 esp_log_reconnect("reconnect tag, ");
1028 1060
1029 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { 1061 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1030 if (esp->ops->irq_pending(esp)) 1062 if (esp->ops->irq_pending(esp))
1031 break; 1063 break;
1032 } 1064 }
1033 if (i == ESP_QUICKIRQ_LIMIT) { 1065 if (i == ESP_QUICKIRQ_LIMIT) {
1034 printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", 1066 shost_printk(KERN_ERR, esp->host,
1035 esp->host->unique_id); 1067 "Reconnect IRQ1 timeout\n");
1036 return NULL; 1068 return NULL;
1037 } 1069 }
1038 1070
@@ -1043,14 +1075,14 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1043 i, esp->ireg, esp->sreg); 1075 i, esp->ireg, esp->sreg);
1044 1076
1045 if (esp->ireg & ESP_INTR_DC) { 1077 if (esp->ireg & ESP_INTR_DC) {
1046 printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", 1078 shost_printk(KERN_ERR, esp->host,
1047 esp->host->unique_id); 1079 "Reconnect, got disconnect.\n");
1048 return NULL; 1080 return NULL;
1049 } 1081 }
1050 1082
1051 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { 1083 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1052 printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", 1084 shost_printk(KERN_ERR, esp->host,
1053 esp->host->unique_id, esp->sreg); 1085 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1054 return NULL; 1086 return NULL;
1055 } 1087 }
1056 1088
@@ -1073,8 +1105,7 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1073 udelay(1); 1105 udelay(1);
1074 } 1106 }
1075 if (i == ESP_RESELECT_TAG_LIMIT) { 1107 if (i == ESP_RESELECT_TAG_LIMIT) {
1076 printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", 1108 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1077 esp->host->unique_id);
1078 return NULL; 1109 return NULL;
1079 } 1110 }
1080 esp->ops->dma_drain(esp); 1111 esp->ops->dma_drain(esp);
@@ -1087,17 +1118,17 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1087 1118
1088 if (esp->command_block[0] < SIMPLE_QUEUE_TAG || 1119 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1089 esp->command_block[0] > ORDERED_QUEUE_TAG) { 1120 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1090 printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " 1121 shost_printk(KERN_ERR, esp->host,
1091 "type %02x.\n", 1122 "Reconnect, bad tag type %02x.\n",
1092 esp->host->unique_id, esp->command_block[0]); 1123 esp->command_block[0]);
1093 return NULL; 1124 return NULL;
1094 } 1125 }
1095 1126
1096 ent = lp->tagged_cmds[esp->command_block[1]]; 1127 ent = lp->tagged_cmds[esp->command_block[1]];
1097 if (!ent) { 1128 if (!ent) {
1098 printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " 1129 shost_printk(KERN_ERR, esp->host,
1099 "tag %02x.\n", 1130 "Reconnect, no entry for tag %02x.\n",
1100 esp->host->unique_id, esp->command_block[1]); 1131 esp->command_block[1]);
1101 return NULL; 1132 return NULL;
1102 } 1133 }
1103 1134
@@ -1163,9 +1194,9 @@ static int esp_reconnect(struct esp *esp)
1163 tp = &esp->target[target]; 1194 tp = &esp->target[target];
1164 dev = __scsi_device_lookup_by_target(tp->starget, lun); 1195 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1165 if (!dev) { 1196 if (!dev) {
1166 printk(KERN_ERR PFX "esp%d: Reconnect, no lp " 1197 shost_printk(KERN_ERR, esp->host,
1167 "tgt[%u] lun[%u]\n", 1198 "Reconnect, no lp tgt[%u] lun[%u]\n",
1168 esp->host->unique_id, target, lun); 1199 target, lun);
1169 goto do_reset; 1200 goto do_reset;
1170 } 1201 }
1171 lp = dev->hostdata; 1202 lp = dev->hostdata;
@@ -1291,8 +1322,8 @@ static int esp_finish_select(struct esp *esp)
1291 return 0; 1322 return 0;
1292 } 1323 }
1293 1324
1294 printk("ESP: Unexpected selection completion ireg[%x].\n", 1325 shost_printk(KERN_INFO, esp->host,
1295 esp->ireg); 1326 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1296 esp_schedule_reset(esp); 1327 esp_schedule_reset(esp);
1297 return 0; 1328 return 0;
1298} 1329}
@@ -1312,11 +1343,42 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1312 (((unsigned int)esp_read8(ESP_TCMED)) << 8)); 1343 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1313 if (esp->rev == FASHME) 1344 if (esp->rev == FASHME)
1314 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; 1345 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1346 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1347 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1315 } 1348 }
1316 1349
1317 bytes_sent = esp->data_dma_len; 1350 bytes_sent = esp->data_dma_len;
1318 bytes_sent -= ecount; 1351 bytes_sent -= ecount;
1319 1352
1353 /*
1354 * The am53c974 has a DMA 'pecularity'. The doc states:
1355 * In some odd byte conditions, one residual byte will
1356 * be left in the SCSI FIFO, and the FIFO Flags will
1357 * never count to '0 '. When this happens, the residual
1358 * byte should be retrieved via PIO following completion
1359 * of the BLAST operation.
1360 */
1361 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1362 size_t count = 1;
1363 size_t offset = bytes_sent;
1364 u8 bval = esp_read8(ESP_FDATA);
1365
1366 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1367 ent->sense_ptr[bytes_sent] = bval;
1368 else {
1369 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1370 u8 *ptr;
1371
1372 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1373 &offset, &count);
1374 if (likely(ptr)) {
1375 *(ptr + offset) = bval;
1376 scsi_kunmap_atomic_sg(ptr);
1377 }
1378 }
1379 bytes_sent += fifo_cnt;
1380 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1381 }
1320 if (!(ent->flags & ESP_CMD_FLAG_WRITE)) 1382 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1321 bytes_sent -= fifo_cnt; 1383 bytes_sent -= fifo_cnt;
1322 1384
@@ -1556,8 +1618,8 @@ static void esp_msgin_extended(struct esp *esp)
1556 return; 1618 return;
1557 } 1619 }
1558 1620
1559 printk("ESP: Unexpected extended msg type %x\n", 1621 shost_printk(KERN_INFO, esp->host,
1560 esp->msg_in[2]); 1622 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1561 1623
1562 esp->msg_out[0] = ABORT_TASK_SET; 1624 esp->msg_out[0] = ABORT_TASK_SET;
1563 esp->msg_out_len = 1; 1625 esp->msg_out_len = 1;
@@ -1574,7 +1636,8 @@ static int esp_msgin_process(struct esp *esp)
1574 1636
1575 if (msg0 & 0x80) { 1637 if (msg0 & 0x80) {
1576 /* Identify */ 1638 /* Identify */
1577 printk("ESP: Unexpected msgin identify\n"); 1639 shost_printk(KERN_INFO, esp->host,
1640 "Unexpected msgin identify\n");
1578 return 0; 1641 return 0;
1579 } 1642 }
1580 1643
@@ -1640,10 +1703,12 @@ static int esp_msgin_process(struct esp *esp)
1640 1703
1641static int esp_process_event(struct esp *esp) 1704static int esp_process_event(struct esp *esp)
1642{ 1705{
1643 int write; 1706 int write, i;
1644 1707
1645again: 1708again:
1646 write = 0; 1709 write = 0;
1710 esp_log_event("process event %d phase %x\n",
1711 esp->event, esp->sreg & ESP_STAT_PMASK);
1647 switch (esp->event) { 1712 switch (esp->event) {
1648 case ESP_EVENT_CHECK_PHASE: 1713 case ESP_EVENT_CHECK_PHASE:
1649 switch (esp->sreg & ESP_STAT_PMASK) { 1714 switch (esp->sreg & ESP_STAT_PMASK) {
@@ -1673,8 +1738,9 @@ again:
1673 break; 1738 break;
1674 1739
1675 default: 1740 default:
1676 printk("ESP: Unexpected phase, sreg=%02x\n", 1741 shost_printk(KERN_INFO, esp->host,
1677 esp->sreg); 1742 "Unexpected phase, sreg=%02x\n",
1743 esp->sreg);
1678 esp_schedule_reset(esp); 1744 esp_schedule_reset(esp);
1679 return 0; 1745 return 0;
1680 } 1746 }
@@ -1708,18 +1774,17 @@ again:
1708 esp->data_dma_len = dma_len; 1774 esp->data_dma_len = dma_len;
1709 1775
1710 if (!dma_len) { 1776 if (!dma_len) {
1711 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", 1777 shost_printk(KERN_ERR, esp->host,
1712 esp->host->unique_id); 1778 "DMA length is zero!\n");
1713 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n", 1779 shost_printk(KERN_ERR, esp->host,
1714 esp->host->unique_id, 1780 "cur adr[%08llx] len[%08x]\n",
1715 (unsigned long long)esp_cur_dma_addr(ent, cmd), 1781 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1716 esp_cur_dma_len(ent, cmd)); 1782 esp_cur_dma_len(ent, cmd));
1717 esp_schedule_reset(esp); 1783 esp_schedule_reset(esp);
1718 return 0; 1784 return 0;
1719 } 1785 }
1720 1786
1721 esp_log_datastart("ESP: start data addr[%08llx] len[%u] " 1787 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1722 "write(%d)\n",
1723 (unsigned long long)dma_addr, dma_len, write); 1788 (unsigned long long)dma_addr, dma_len, write);
1724 1789
1725 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1790 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
@@ -1733,7 +1798,8 @@ again:
1733 int bytes_sent; 1798 int bytes_sent;
1734 1799
1735 if (esp->ops->dma_error(esp)) { 1800 if (esp->ops->dma_error(esp)) {
1736 printk("ESP: data done, DMA error, resetting\n"); 1801 shost_printk(KERN_INFO, esp->host,
1802 "data done, DMA error, resetting\n");
1737 esp_schedule_reset(esp); 1803 esp_schedule_reset(esp);
1738 return 0; 1804 return 0;
1739 } 1805 }
@@ -1749,14 +1815,15 @@ again:
1749 /* We should always see exactly a bus-service 1815 /* We should always see exactly a bus-service
1750 * interrupt at the end of a successful transfer. 1816 * interrupt at the end of a successful transfer.
1751 */ 1817 */
1752 printk("ESP: data done, not BSERV, resetting\n"); 1818 shost_printk(KERN_INFO, esp->host,
1819 "data done, not BSERV, resetting\n");
1753 esp_schedule_reset(esp); 1820 esp_schedule_reset(esp);
1754 return 0; 1821 return 0;
1755 } 1822 }
1756 1823
1757 bytes_sent = esp_data_bytes_sent(esp, ent, cmd); 1824 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1758 1825
1759 esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", 1826 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1760 ent->flags, bytes_sent); 1827 ent->flags, bytes_sent);
1761 1828
1762 if (bytes_sent < 0) { 1829 if (bytes_sent < 0) {
@@ -1785,8 +1852,9 @@ again:
1785 } 1852 }
1786 1853
1787 if (ent->message != COMMAND_COMPLETE) { 1854 if (ent->message != COMMAND_COMPLETE) {
1788 printk("ESP: Unexpected message %x in status\n", 1855 shost_printk(KERN_INFO, esp->host,
1789 ent->message); 1856 "Unexpected message %x in status\n",
1857 ent->message);
1790 esp_schedule_reset(esp); 1858 esp_schedule_reset(esp);
1791 return 0; 1859 return 0;
1792 } 1860 }
@@ -1804,8 +1872,7 @@ again:
1804 scsi_esp_cmd(esp, ESP_CMD_ESEL); 1872 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1805 1873
1806 if (ent->message == COMMAND_COMPLETE) { 1874 if (ent->message == COMMAND_COMPLETE) {
1807 esp_log_cmddone("ESP: Command done status[%x] " 1875 esp_log_cmddone("Command done status[%x] message[%x]\n",
1808 "message[%x]\n",
1809 ent->status, ent->message); 1876 ent->status, ent->message);
1810 if (ent->status == SAM_STAT_TASK_SET_FULL) 1877 if (ent->status == SAM_STAT_TASK_SET_FULL)
1811 esp_event_queue_full(esp, ent); 1878 esp_event_queue_full(esp, ent);
@@ -1821,16 +1888,16 @@ again:
1821 DID_OK)); 1888 DID_OK));
1822 } 1889 }
1823 } else if (ent->message == DISCONNECT) { 1890 } else if (ent->message == DISCONNECT) {
1824 esp_log_disconnect("ESP: Disconnecting tgt[%d] " 1891 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1825 "tag[%x:%x]\n",
1826 cmd->device->id, 1892 cmd->device->id,
1827 ent->tag[0], ent->tag[1]); 1893 ent->tag[0], ent->tag[1]);
1828 1894
1829 esp->active_cmd = NULL; 1895 esp->active_cmd = NULL;
1830 esp_maybe_execute_command(esp); 1896 esp_maybe_execute_command(esp);
1831 } else { 1897 } else {
1832 printk("ESP: Unexpected message %x in freebus\n", 1898 shost_printk(KERN_INFO, esp->host,
1833 ent->message); 1899 "Unexpected message %x in freebus\n",
1900 ent->message);
1834 esp_schedule_reset(esp); 1901 esp_schedule_reset(esp);
1835 return 0; 1902 return 0;
1836 } 1903 }
@@ -1862,6 +1929,10 @@ again:
1862 if (esp->msg_out_len == 1) { 1929 if (esp->msg_out_len == 1) {
1863 esp_write8(esp->msg_out[0], ESP_FDATA); 1930 esp_write8(esp->msg_out[0], ESP_FDATA);
1864 scsi_esp_cmd(esp, ESP_CMD_TI); 1931 scsi_esp_cmd(esp, ESP_CMD_TI);
1932 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1933 for (i = 0; i < esp->msg_out_len; i++)
1934 esp_write8(esp->msg_out[i], ESP_FDATA);
1935 scsi_esp_cmd(esp, ESP_CMD_TI);
1865 } else { 1936 } else {
1866 /* Use DMA. */ 1937 /* Use DMA. */
1867 memcpy(esp->command_block, 1938 memcpy(esp->command_block,
@@ -1917,7 +1988,7 @@ again:
1917 val = esp_read8(ESP_FDATA); 1988 val = esp_read8(ESP_FDATA);
1918 esp->msg_in[esp->msg_in_len++] = val; 1989 esp->msg_in[esp->msg_in_len++] = val;
1919 1990
1920 esp_log_msgin("ESP: Got msgin byte %x\n", val); 1991 esp_log_msgin("Got msgin byte %x\n", val);
1921 1992
1922 if (!esp_msgin_process(esp)) 1993 if (!esp_msgin_process(esp))
1923 esp->msg_in_len = 0; 1994 esp->msg_in_len = 0;
@@ -1930,7 +2001,8 @@ again:
1930 if (esp->event != ESP_EVENT_FREE_BUS) 2001 if (esp->event != ESP_EVENT_FREE_BUS)
1931 esp_event(esp, ESP_EVENT_CHECK_PHASE); 2002 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1932 } else { 2003 } else {
1933 printk("ESP: MSGIN neither BSERV not FDON, resetting"); 2004 shost_printk(KERN_INFO, esp->host,
2005 "MSGIN neither BSERV not FDON, resetting");
1934 esp_schedule_reset(esp); 2006 esp_schedule_reset(esp);
1935 return 0; 2007 return 0;
1936 } 2008 }
@@ -1938,11 +2010,7 @@ again:
1938 case ESP_EVENT_CMD_START: 2010 case ESP_EVENT_CMD_START:
1939 memcpy(esp->command_block, esp->cmd_bytes_ptr, 2011 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1940 esp->cmd_bytes_left); 2012 esp->cmd_bytes_left);
1941 if (esp->rev == FASHME) 2013 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
1942 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1943 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1944 esp->cmd_bytes_left, 16, 0,
1945 ESP_CMD_DMA | ESP_CMD_TI);
1946 esp_event(esp, ESP_EVENT_CMD_DONE); 2014 esp_event(esp, ESP_EVENT_CMD_DONE);
1947 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; 2015 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1948 break; 2016 break;
@@ -1961,8 +2029,8 @@ again:
1961 break; 2029 break;
1962 2030
1963 default: 2031 default:
1964 printk("ESP: Unexpected event %x, resetting\n", 2032 shost_printk(KERN_INFO, esp->host,
1965 esp->event); 2033 "Unexpected event %x, resetting\n", esp->event);
1966 esp_schedule_reset(esp); 2034 esp_schedule_reset(esp);
1967 return 0; 2035 return 0;
1968 break; 2036 break;
@@ -2044,7 +2112,12 @@ static void __esp_interrupt(struct esp *esp)
2044 int finish_reset, intr_done; 2112 int finish_reset, intr_done;
2045 u8 phase; 2113 u8 phase;
2046 2114
2115 /*
2116 * Once INTRPT is read STATUS and SSTEP are cleared.
2117 */
2047 esp->sreg = esp_read8(ESP_STATUS); 2118 esp->sreg = esp_read8(ESP_STATUS);
2119 esp->seqreg = esp_read8(ESP_SSTEP);
2120 esp->ireg = esp_read8(ESP_INTRPT);
2048 2121
2049 if (esp->flags & ESP_FLAG_RESETTING) { 2122 if (esp->flags & ESP_FLAG_RESETTING) {
2050 finish_reset = 1; 2123 finish_reset = 1;
@@ -2057,8 +2130,6 @@ static void __esp_interrupt(struct esp *esp)
2057 return; 2130 return;
2058 } 2131 }
2059 2132
2060 esp->ireg = esp_read8(ESP_INTRPT);
2061
2062 if (esp->ireg & ESP_INTR_SR) 2133 if (esp->ireg & ESP_INTR_SR)
2063 finish_reset = 1; 2134 finish_reset = 1;
2064 2135
@@ -2085,14 +2156,15 @@ static void __esp_interrupt(struct esp *esp)
2085 } 2156 }
2086 } 2157 }
2087 2158
2088 esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " 2159 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2089 "sreg2[%02x] ireg[%02x]\n", 2160 "sreg2[%02x] ireg[%02x]\n",
2090 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); 2161 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2091 2162
2092 intr_done = 0; 2163 intr_done = 0;
2093 2164
2094 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { 2165 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2095 printk("ESP: unexpected IREG %02x\n", esp->ireg); 2166 shost_printk(KERN_INFO, esp->host,
2167 "unexpected IREG %02x\n", esp->ireg);
2096 if (esp->ireg & ESP_INTR_IC) 2168 if (esp->ireg & ESP_INTR_IC)
2097 esp_dump_cmd_log(esp); 2169 esp_dump_cmd_log(esp);
2098 2170
@@ -2149,46 +2221,50 @@ static void esp_get_revision(struct esp *esp)
2149 u8 val; 2221 u8 val;
2150 2222
2151 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 2223 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2152 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 2224 if (esp->config2 == 0) {
2225 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2226 esp_write8(esp->config2, ESP_CFG2);
2227
2228 val = esp_read8(ESP_CFG2);
2229 val &= ~ESP_CONFIG2_MAGIC;
2230
2231 esp->config2 = 0;
2232 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2233 /*
2234 * If what we write to cfg2 does not come back,
2235 * cfg2 is not implemented.
2236 * Therefore this must be a plain esp100.
2237 */
2238 esp->rev = ESP100;
2239 return;
2240 }
2241 }
2242
2243 esp_set_all_config3(esp, 5);
2244 esp->prev_cfg3 = 5;
2153 esp_write8(esp->config2, ESP_CFG2); 2245 esp_write8(esp->config2, ESP_CFG2);
2246 esp_write8(0, ESP_CFG3);
2247 esp_write8(esp->prev_cfg3, ESP_CFG3);
2154 2248
2155 val = esp_read8(ESP_CFG2); 2249 val = esp_read8(ESP_CFG3);
2156 val &= ~ESP_CONFIG2_MAGIC; 2250 if (val != 5) {
2157 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 2251 /* The cfg2 register is implemented, however
2158 /* If what we write to cfg2 does not come back, cfg2 is not 2252 * cfg3 is not, must be esp100a.
2159 * implemented, therefore this must be a plain esp100.
2160 */ 2253 */
2161 esp->rev = ESP100; 2254 esp->rev = ESP100A;
2162 } else { 2255 } else {
2163 esp->config2 = 0; 2256 esp_set_all_config3(esp, 0);
2164 esp_set_all_config3(esp, 5); 2257 esp->prev_cfg3 = 0;
2165 esp->prev_cfg3 = 5;
2166 esp_write8(esp->config2, ESP_CFG2);
2167 esp_write8(0, ESP_CFG3);
2168 esp_write8(esp->prev_cfg3, ESP_CFG3); 2258 esp_write8(esp->prev_cfg3, ESP_CFG3);
2169 2259
2170 val = esp_read8(ESP_CFG3); 2260 /* All of cfg{1,2,3} implemented, must be one of
2171 if (val != 5) { 2261 * the fas variants, figure out which one.
2172 /* The cfg2 register is implemented, however 2262 */
2173 * cfg3 is not, must be esp100a. 2263 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2174 */ 2264 esp->rev = FAST;
2175 esp->rev = ESP100A; 2265 esp->sync_defp = SYNC_DEFP_FAST;
2176 } else { 2266 } else {
2177 esp_set_all_config3(esp, 0); 2267 esp->rev = ESP236;
2178 esp->prev_cfg3 = 0;
2179 esp_write8(esp->prev_cfg3, ESP_CFG3);
2180
2181 /* All of cfg{1,2,3} implemented, must be one of
2182 * the fas variants, figure out which one.
2183 */
2184 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2185 esp->rev = FAST;
2186 esp->sync_defp = SYNC_DEFP_FAST;
2187 } else {
2188 esp->rev = ESP236;
2189 }
2190 esp->config2 = 0;
2191 esp_write8(esp->config2, ESP_CFG2);
2192 } 2268 }
2193 } 2269 }
2194} 2270}
@@ -2308,6 +2384,7 @@ static const char *esp_chip_names[] = {
2308 "FAS100A", 2384 "FAS100A",
2309 "FAST", 2385 "FAST",
2310 "FASHME", 2386 "FASHME",
2387 "AM53C974",
2311}; 2388};
2312 2389
2313static struct scsi_transport_template *esp_transport_template; 2390static struct scsi_transport_template *esp_transport_template;
@@ -2317,6 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2317 static int instance; 2394 static int instance;
2318 int err; 2395 int err;
2319 2396
2397 if (!esp->num_tags)
2398 esp->num_tags = ESP_DEFAULT_TAGS;
2399 else if (esp->num_tags >= ESP_MAX_TAG)
2400 esp->num_tags = ESP_MAX_TAG - 1;
2320 esp->host->transportt = esp_transport_template; 2401 esp->host->transportt = esp_transport_template;
2321 esp->host->max_lun = ESP_MAX_LUN; 2402 esp->host->max_lun = ESP_MAX_LUN;
2322 esp->host->cmd_per_lun = 2; 2403 esp->host->cmd_per_lun = 2;
@@ -2330,12 +2411,13 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2330 2411
2331 esp_bootup_reset(esp); 2412 esp_bootup_reset(esp);
2332 2413
2333 printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", 2414 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2334 esp->host->unique_id, esp->regs, esp->dma_regs, 2415 esp->host->unique_id, esp->regs, esp->dma_regs,
2335 esp->host->irq); 2416 esp->host->irq);
2336 printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2417 dev_printk(KERN_INFO, dev,
2337 esp->host->unique_id, esp_chip_names[esp->rev], 2418 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2338 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2419 esp->host->unique_id, esp_chip_names[esp->rev],
2420 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2339 2421
2340 /* Let the SCSI bus reset settle. */ 2422 /* Let the SCSI bus reset settle. */
2341 ssleep(esp_bus_reset_settle); 2423 ssleep(esp_bus_reset_settle);
@@ -2402,28 +2484,10 @@ static int esp_slave_configure(struct scsi_device *dev)
2402{ 2484{
2403 struct esp *esp = shost_priv(dev->host); 2485 struct esp *esp = shost_priv(dev->host);
2404 struct esp_target_data *tp = &esp->target[dev->id]; 2486 struct esp_target_data *tp = &esp->target[dev->id];
2405 int goal_tags, queue_depth;
2406
2407 goal_tags = 0;
2408 2487
2409 if (dev->tagged_supported) { 2488 if (dev->tagged_supported)
2410 /* XXX make this configurable somehow XXX */ 2489 scsi_change_queue_depth(dev, esp->num_tags);
2411 goal_tags = ESP_DEFAULT_TAGS;
2412
2413 if (goal_tags > ESP_MAX_TAG)
2414 goal_tags = ESP_MAX_TAG;
2415 }
2416 2490
2417 queue_depth = goal_tags;
2418 if (queue_depth < dev->host->cmd_per_lun)
2419 queue_depth = dev->host->cmd_per_lun;
2420
2421 if (goal_tags) {
2422 scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2423 scsi_activate_tcq(dev, queue_depth);
2424 } else {
2425 scsi_deactivate_tcq(dev, queue_depth);
2426 }
2427 tp->flags |= ESP_TGT_DISCONNECT; 2491 tp->flags |= ESP_TGT_DISCONNECT;
2428 2492
2429 if (!spi_initial_dv(dev->sdev_target)) 2493 if (!spi_initial_dv(dev->sdev_target))
@@ -2451,19 +2515,20 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2451 * XXX much for the final driver. 2515 * XXX much for the final driver.
2452 */ 2516 */
2453 spin_lock_irqsave(esp->host->host_lock, flags); 2517 spin_lock_irqsave(esp->host->host_lock, flags);
2454 printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", 2518 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2455 esp->host->unique_id, cmd, cmd->cmnd[0]); 2519 cmd, cmd->cmnd[0]);
2456 ent = esp->active_cmd; 2520 ent = esp->active_cmd;
2457 if (ent) 2521 if (ent)
2458 printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", 2522 shost_printk(KERN_ERR, esp->host,
2459 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2523 "Current command [%p:%02x]\n",
2524 ent->cmd, ent->cmd->cmnd[0]);
2460 list_for_each_entry(ent, &esp->queued_cmds, list) { 2525 list_for_each_entry(ent, &esp->queued_cmds, list) {
2461 printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", 2526 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2462 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2527 ent->cmd, ent->cmd->cmnd[0]);
2463 } 2528 }
2464 list_for_each_entry(ent, &esp->active_cmds, list) { 2529 list_for_each_entry(ent, &esp->active_cmds, list) {
2465 printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", 2530 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2466 esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); 2531 ent->cmd, ent->cmd->cmnd[0]);
2467 } 2532 }
2468 esp_dump_cmd_log(esp); 2533 esp_dump_cmd_log(esp);
2469 spin_unlock_irqrestore(esp->host->host_lock, flags); 2534 spin_unlock_irqrestore(esp->host->host_lock, flags);
@@ -2631,6 +2696,7 @@ struct scsi_host_template scsi_esp_template = {
2631 .use_clustering = ENABLE_CLUSTERING, 2696 .use_clustering = ENABLE_CLUSTERING,
2632 .max_sectors = 0xffff, 2697 .max_sectors = 0xffff,
2633 .skip_settle_delay = 1, 2698 .skip_settle_delay = 1,
2699 .use_blk_tags = 1,
2634}; 2700};
2635EXPORT_SYMBOL(scsi_esp_template); 2701EXPORT_SYMBOL(scsi_esp_template);
2636 2702
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index cd68805e8d78..84dcbe4a6268 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -1,4 +1,4 @@
1/* esp_scsi.h: Defines and structures for the ESP drier. 1/* esp_scsi.h: Defines and structures for the ESP driver.
2 * 2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
@@ -25,6 +25,7 @@
25#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ 25#define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */
26#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ 26#define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */
27#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ 27#define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */
28#define ESP_CFG4 0x0dUL /* rw Fourth cfg register 0x34 */
28#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ 29#define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */
29#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ 30#define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */
30#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ 31#define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */
@@ -76,6 +77,18 @@
76#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ 77#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
77#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ 78#define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */
78 79
80/* ESP config register 4 read-write, found only on am53c974 chips */
81#define ESP_CONFIG4_RADE 0x04 /* Active negation */
82#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */
83#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */
84#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */
85#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */
86
87#define ESP_CONFIG_GE_12NS (0)
88#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1)
89#define ESP_CONFIG_GE_35NS (ESP_CONFIG_GE0)
90#define ESP_CONFIG_GE_0NS (ESP_CONFIG_GE0 | ESP_CONFIG_GE1)
91
79/* ESP command register read-write */ 92/* ESP command register read-write */
80/* Group 1 commands: These may be sent at any point in time to the ESP 93/* Group 1 commands: These may be sent at any point in time to the ESP
81 * chip. None of them can generate interrupts 'cept 94 * chip. None of them can generate interrupts 'cept
@@ -254,6 +267,7 @@ enum esp_rev {
254 FAS100A = 0x04, 267 FAS100A = 0x04,
255 FAST = 0x05, 268 FAST = 0x05,
256 FASHME = 0x06, 269 FASHME = 0x06,
270 PCSCSI = 0x07, /* AM53c974 */
257}; 271};
258 272
259struct esp_cmd_entry { 273struct esp_cmd_entry {
@@ -269,6 +283,7 @@ struct esp_cmd_entry {
269#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ 283#define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */
270#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ 284#define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */
271#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ 285#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
286#define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */
272 287
273 u8 tag[2]; 288 u8 tag[2];
274 u8 orig_tag[2]; 289 u8 orig_tag[2];
@@ -283,7 +298,6 @@ struct esp_cmd_entry {
283 struct completion *eh_done; 298 struct completion *eh_done;
284}; 299};
285 300
286/* XXX make this configurable somehow XXX */
287#define ESP_DEFAULT_TAGS 16 301#define ESP_DEFAULT_TAGS 16
288 302
289#define ESP_MAX_TARGET 16 303#define ESP_MAX_TARGET 16
@@ -445,7 +459,7 @@ struct esp {
445 u8 prev_soff; 459 u8 prev_soff;
446 u8 prev_stp; 460 u8 prev_stp;
447 u8 prev_cfg3; 461 u8 prev_cfg3;
448 u8 __pad; 462 u8 num_tags;
449 463
450 struct list_head esp_cmd_pool; 464 struct list_head esp_cmd_pool;
451 465
@@ -466,6 +480,7 @@ struct esp {
466 u8 bursts; 480 u8 bursts;
467 u8 config1; 481 u8 config1;
468 u8 config2; 482 u8 config2;
483 u8 config4;
469 484
470 u8 scsi_id; 485 u8 scsi_id;
471 u32 scsi_id_mask; 486 u32 scsi_id_mask;
@@ -479,6 +494,7 @@ struct esp {
479#define ESP_FLAG_WIDE_CAPABLE 0x00000008 494#define ESP_FLAG_WIDE_CAPABLE 0x00000008
480#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 495#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
481#define ESP_FLAG_DISABLE_SYNC 0x00000020 496#define ESP_FLAG_DISABLE_SYNC 0x00000020
497#define ESP_FLAG_USE_FIFO 0x00000040
482 498
483 u8 select_state; 499 u8 select_state;
484#define ESP_SELECT_NONE 0x00 /* Not selecting */ 500#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 4a8ac7d8c76b..308a016fdaea 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -280,14 +280,16 @@ static struct scsi_host_template fcoe_shost_template = {
280 .eh_device_reset_handler = fc_eh_device_reset, 280 .eh_device_reset_handler = fc_eh_device_reset,
281 .eh_host_reset_handler = fc_eh_host_reset, 281 .eh_host_reset_handler = fc_eh_host_reset,
282 .slave_alloc = fc_slave_alloc, 282 .slave_alloc = fc_slave_alloc,
283 .change_queue_depth = fc_change_queue_depth, 283 .change_queue_depth = scsi_change_queue_depth,
284 .change_queue_type = fc_change_queue_type, 284 .change_queue_type = scsi_change_queue_type,
285 .this_id = -1, 285 .this_id = -1,
286 .cmd_per_lun = 3, 286 .cmd_per_lun = 3,
287 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 287 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
288 .use_clustering = ENABLE_CLUSTERING, 288 .use_clustering = ENABLE_CLUSTERING,
289 .sg_tablesize = SG_ALL, 289 .sg_tablesize = SG_ALL,
290 .max_sectors = 0xffff, 290 .max_sectors = 0xffff,
291 .use_blk_tags = 1,
292 .track_queue_depth = 1,
291}; 293};
292 294
293/** 295/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index bf8d34c26f13..3b73b96619e2 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.11" 42#define DRV_VERSION "1.6.0.16"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index f3984b48f8e9..bf0bbd42efb5 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -135,6 +135,11 @@ void fnic_handle_link(struct work_struct *work)
135 fnic->lport->host->host_no, FNIC_FC_LE, 135 fnic->lport->host->host_no, FNIC_FC_LE,
136 "Link Status: UP_DOWN", 136 "Link Status: UP_DOWN",
137 strlen("Link Status: UP_DOWN")); 137 strlen("Link Status: UP_DOWN"));
138 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
139 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
140 "deleting fip-timer during link-down\n");
141 del_timer_sync(&fnic->fip_timer);
142 }
138 fcoe_ctlr_link_down(&fnic->ctlr); 143 fcoe_ctlr_link_down(&fnic->ctlr);
139 } 144 }
140 145
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 8c56fdc3a456..0c1f8177b5b7 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -95,12 +95,10 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
95{ 95{
96 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 96 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
97 97
98 sdev->tagged_supported = 1;
99
100 if (!rport || fc_remote_port_chkready(rport)) 98 if (!rport || fc_remote_port_chkready(rport))
101 return -ENXIO; 99 return -ENXIO;
102 100
103 scsi_activate_tcq(sdev, fnic_max_qdepth); 101 scsi_change_queue_depth(sdev, fnic_max_qdepth);
104 return 0; 102 return 0;
105} 103}
106 104
@@ -112,8 +110,8 @@ static struct scsi_host_template fnic_host_template = {
112 .eh_device_reset_handler = fnic_device_reset, 110 .eh_device_reset_handler = fnic_device_reset,
113 .eh_host_reset_handler = fnic_host_reset, 111 .eh_host_reset_handler = fnic_host_reset,
114 .slave_alloc = fnic_slave_alloc, 112 .slave_alloc = fnic_slave_alloc,
115 .change_queue_depth = fc_change_queue_depth, 113 .change_queue_depth = scsi_change_queue_depth,
116 .change_queue_type = fc_change_queue_type, 114 .change_queue_type = scsi_change_queue_type,
117 .this_id = -1, 115 .this_id = -1,
118 .cmd_per_lun = 3, 116 .cmd_per_lun = 3,
119 .can_queue = FNIC_DFLT_IO_REQ, 117 .can_queue = FNIC_DFLT_IO_REQ,
@@ -121,6 +119,8 @@ static struct scsi_host_template fnic_host_template = {
121 .sg_tablesize = FNIC_MAX_SG_DESC_CNT, 119 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
122 .max_sectors = 0xffff, 120 .max_sectors = 0xffff,
123 .shost_attrs = fnic_attrs, 121 .shost_attrs = fnic_attrs,
122 .use_blk_tags = 1,
123 .track_queue_depth = 1,
124}; 124};
125 125
126static void 126static void
@@ -438,21 +438,30 @@ static int fnic_dev_wait(struct vnic_dev *vdev,
438 unsigned long time; 438 unsigned long time;
439 int done; 439 int done;
440 int err; 440 int err;
441 int count;
442
443 count = 0;
441 444
442 err = start(vdev, arg); 445 err = start(vdev, arg);
443 if (err) 446 if (err)
444 return err; 447 return err;
445 448
446 /* Wait for func to complete...2 seconds max */ 449 /* Wait for func to complete.
450 * Sometime schedule_timeout_uninterruptible take long time
451 * to wake up so we do not retry as we are only waiting for
452 * 2 seconds in while loop. By adding count, we make sure
453 * we try atleast three times before returning -ETIMEDOUT
454 */
447 time = jiffies + (HZ * 2); 455 time = jiffies + (HZ * 2);
448 do { 456 do {
449 err = finished(vdev, &done); 457 err = finished(vdev, &done);
458 count++;
450 if (err) 459 if (err)
451 return err; 460 return err;
452 if (done) 461 if (done)
453 return 0; 462 return 0;
454 schedule_timeout_uninterruptible(HZ / 10); 463 schedule_timeout_uninterruptible(HZ / 10);
455 } while (time_after(time, jiffies)); 464 } while (time_after(time, jiffies) || (count < 3));
456 465
457 return -ETIMEDOUT; 466 return -ETIMEDOUT;
458} 467}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 961bdf5d31cd..2097de42a147 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -325,13 +325,11 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
325 struct fc_rport_libfc_priv *rp = rport->dd_data; 325 struct fc_rport_libfc_priv *rp = rport->dd_data;
326 struct host_sg_desc *desc; 326 struct host_sg_desc *desc;
327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; 327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
328 u8 pri_tag = 0;
329 unsigned int i; 328 unsigned int i;
330 unsigned long intr_flags; 329 unsigned long intr_flags;
331 int flags; 330 int flags;
332 u8 exch_flags; 331 u8 exch_flags;
333 struct scsi_lun fc_lun; 332 struct scsi_lun fc_lun;
334 char msg[2];
335 333
336 if (sg_count) { 334 if (sg_count) {
337 /* For each SGE, create a device desc entry */ 335 /* For each SGE, create a device desc entry */
@@ -357,12 +355,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
357 355
358 int_to_scsilun(sc->device->lun, &fc_lun); 356 int_to_scsilun(sc->device->lun, &fc_lun);
359 357
360 pri_tag = FCPIO_ICMND_PTA_SIMPLE;
361 msg[0] = MSG_SIMPLE_TAG;
362 scsi_populate_tag_msg(sc, msg);
363 if (msg[0] == MSG_ORDERED_TAG)
364 pri_tag = FCPIO_ICMND_PTA_ORDERED;
365
366 /* Enqueue the descriptor in the Copy WQ */ 358 /* Enqueue the descriptor in the Copy WQ */
367 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); 359 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
368 360
@@ -394,7 +386,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
394 io_req->sgl_list_pa, 386 io_req->sgl_list_pa,
395 io_req->sense_buf_pa, 387 io_req->sense_buf_pa,
396 0, /* scsi cmd ref, always 0 */ 388 0, /* scsi cmd ref, always 0 */
397 pri_tag, /* scsi pri and tag */ 389 FCPIO_ICMND_PTA_SIMPLE,
390 /* scsi pri and tag */
398 flags, /* command flags */ 391 flags, /* command flags */
399 sc->cmnd, sc->cmd_len, 392 sc->cmnd, sc->cmd_len,
400 scsi_bufflen(sc), 393 scsi_bufflen(sc),
@@ -428,8 +421,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
428 int ret; 421 int ret;
429 u64 cmd_trace; 422 u64 cmd_trace;
430 int sg_count = 0; 423 int sg_count = 0;
431 unsigned long flags; 424 unsigned long flags = 0;
432 unsigned long ptr; 425 unsigned long ptr;
426 struct fc_rport_priv *rdata;
427 spinlock_t *io_lock = NULL;
433 428
434 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 429 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
435 return SCSI_MLQUEUE_HOST_BUSY; 430 return SCSI_MLQUEUE_HOST_BUSY;
@@ -443,6 +438,16 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
443 return 0; 438 return 0;
444 } 439 }
445 440
441 rdata = lp->tt.rport_lookup(lp, rport->port_id);
442 if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
443 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
444 "returning IO as rport is removed\n");
445 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
446 sc->result = DID_NO_CONNECT;
447 done(sc);
448 return 0;
449 }
450
446 if (lp->state != LPORT_ST_READY || !(lp->link_up)) 451 if (lp->state != LPORT_ST_READY || !(lp->link_up))
447 return SCSI_MLQUEUE_HOST_BUSY; 452 return SCSI_MLQUEUE_HOST_BUSY;
448 453
@@ -505,6 +510,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
505 } 510 }
506 } 511 }
507 512
513 /*
514 * Will acquire lock defore setting to IO initialized.
515 */
516
517 io_lock = fnic_io_lock_hash(fnic, sc);
518 spin_lock_irqsave(io_lock, flags);
519
508 /* initialize rest of io_req */ 520 /* initialize rest of io_req */
509 io_req->port_id = rport->port_id; 521 io_req->port_id = rport->port_id;
510 io_req->start_time = jiffies; 522 io_req->start_time = jiffies;
@@ -521,11 +533,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
521 * In case another thread cancelled the request, 533 * In case another thread cancelled the request,
522 * refetch the pointer under the lock. 534 * refetch the pointer under the lock.
523 */ 535 */
524 spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
525 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 536 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
526 sc->request->tag, sc, 0, 0, 0, 537 sc->request->tag, sc, 0, 0, 0,
527 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 538 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
528 spin_lock_irqsave(io_lock, flags);
529 io_req = (struct fnic_io_req *)CMD_SP(sc); 539 io_req = (struct fnic_io_req *)CMD_SP(sc);
530 CMD_SP(sc) = NULL; 540 CMD_SP(sc) = NULL;
531 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 541 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
@@ -534,6 +544,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
534 fnic_release_ioreq_buf(fnic, io_req, sc); 544 fnic_release_ioreq_buf(fnic, io_req, sc);
535 mempool_free(io_req, fnic->io_req_pool); 545 mempool_free(io_req, fnic->io_req_pool);
536 } 546 }
547 atomic_dec(&fnic->in_flight);
548 /* acquire host lock before returning to SCSI */
549 spin_lock(lp->host->host_lock);
550 return ret;
537 } else { 551 } else {
538 atomic64_inc(&fnic_stats->io_stats.active_ios); 552 atomic64_inc(&fnic_stats->io_stats.active_ios);
539 atomic64_inc(&fnic_stats->io_stats.num_ios); 553 atomic64_inc(&fnic_stats->io_stats.num_ios);
@@ -555,6 +569,11 @@ out:
555 sc->request->tag, sc, io_req, 569 sc->request->tag, sc, io_req,
556 sg_count, cmd_trace, 570 sg_count, cmd_trace,
557 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); 571 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
572
573 /* if only we issued IO, will we have the io lock */
574 if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
575 spin_unlock_irqrestore(io_lock, flags);
576
558 atomic_dec(&fnic->in_flight); 577 atomic_dec(&fnic->in_flight);
559 /* acquire host lock before returning to SCSI */ 578 /* acquire host lock before returning to SCSI */
560 spin_lock(lp->host->host_lock); 579 spin_lock(lp->host->host_lock);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index acf1f95cb5c5..65a9bde26974 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -624,12 +624,12 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
624 if (frame_type == FNIC_FC_RECV) { 624 if (frame_type == FNIC_FC_RECV) {
625 eth_fcoe_hdr_len = sizeof(struct ethhdr) + 625 eth_fcoe_hdr_len = sizeof(struct ethhdr) +
626 sizeof(struct fcoe_hdr); 626 sizeof(struct fcoe_hdr);
627 fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
628 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); 627 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
629 /* Copy the rest of data frame */ 628 /* Copy the rest of data frame */
630 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, 629 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
631 min_t(u8, fc_trc_frame_len, 630 min_t(u8, fc_trc_frame_len,
632 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); 631 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
632 - eth_fcoe_hdr_len)));
633 } else { 633 } else {
634 memcpy((char *)fc_trace, (void *)frame, 634 memcpy((char *)fc_trace, (void *)frame,
635 min_t(u8, fc_trc_frame_len, 635 min_t(u8, fc_trc_frame_len,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index b331272e93bc..f35792f7051c 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -18,20 +18,6 @@
18 * 18 *
19 * Added ISAPNP support for DTC436 adapters, 19 * Added ISAPNP support for DTC436 adapters,
20 * Thomas Sailer, sailer@ife.ee.ethz.ch 20 * Thomas Sailer, sailer@ife.ee.ethz.ch
21 *
22 * ALPHA RELEASE 1.
23 *
24 * For more information, please consult
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29 *
30 * NCR Microelectronics
31 * 1635 Aeroplaza Drive
32 * Colorado Springs, CO 80916
33 * 1+ (719) 578-3400
34 * 1+ (800) 334-5454
35 */ 21 */
36 22
37/* 23/*
@@ -40,14 +26,6 @@
40 */ 26 */
41 27
42/* 28/*
43 * Options :
44 *
45 * PARITY - enable parity checking. Not supported.
46 *
47 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
48 *
49 * USLEEP - enable support for devices that don't disconnect. Untested.
50 *
51 * The card is detected and initialized in one of several ways : 29 * The card is detected and initialized in one of several ways :
52 * 1. With command line overrides - NCR5380=port,irq may be 30 * 1. With command line overrides - NCR5380=port,irq may be
53 * used on the LILO command line to override the defaults. 31 * used on the LILO command line to override the defaults.
@@ -79,27 +57,21 @@
79 */ 57 */
80 58
81/* settings for DTC3181E card with only Mustek scanner attached */ 59/* settings for DTC3181E card with only Mustek scanner attached */
82#define USLEEP
83#define USLEEP_POLL 1 60#define USLEEP_POLL 1
84#define USLEEP_SLEEP 20 61#define USLEEP_SLEEP 20
85#define USLEEP_WAITLONG 500 62#define USLEEP_WAITLONG 500
86 63
87#define AUTOPROBE_IRQ 64#define AUTOPROBE_IRQ
88#define AUTOSENSE
89
90 65
91#ifdef CONFIG_SCSI_GENERIC_NCR53C400 66#ifdef CONFIG_SCSI_GENERIC_NCR53C400
92#define NCR53C400_PSEUDO_DMA 1 67#define NCR53C400_PSEUDO_DMA 1
93#define PSEUDO_DMA 68#define PSEUDO_DMA
94#define NCR53C400 69#define NCR53C400
95#define NCR5380_STATS
96#undef NCR5380_STAT_LIMIT
97#endif 70#endif
98 71
99#include <asm/io.h> 72#include <asm/io.h>
100#include <linux/signal.h> 73#include <linux/signal.h>
101#include <linux/blkdev.h> 74#include <linux/blkdev.h>
102#include "scsi.h"
103#include <scsi/scsi_host.h> 75#include <scsi/scsi_host.h>
104#include "g_NCR5380.h" 76#include "g_NCR5380.h"
105#include "NCR5380.h" 77#include "NCR5380.h"
@@ -277,7 +249,7 @@ static int __init do_DTC3181E_setup(char *str)
277 * Locks: none 249 * Locks: none
278 */ 250 */
279 251
280int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) 252static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
281{ 253{
282 static int current_override = 0; 254 static int current_override = 0;
283 int count; 255 int count;
@@ -335,7 +307,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
335 if (pnp_irq_valid(dev, 0)) 307 if (pnp_irq_valid(dev, 0))
336 overrides[count].irq = pnp_irq(dev, 0); 308 overrides[count].irq = pnp_irq(dev, 0);
337 else 309 else
338 overrides[count].irq = SCSI_IRQ_NONE; 310 overrides[count].irq = NO_IRQ;
339 if (pnp_dma_valid(dev, 0)) 311 if (pnp_dma_valid(dev, 0))
340 overrides[count].dma = pnp_dma(dev, 0); 312 overrides[count].dma = pnp_dma(dev, 0);
341 else 313 else
@@ -455,27 +427,22 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
455 else 427 else
456 instance->irq = NCR5380_probe_irq(instance, 0xffff); 428 instance->irq = NCR5380_probe_irq(instance, 0xffff);
457 429
458 if (instance->irq != SCSI_IRQ_NONE) 430 /* Compatibility with documented NCR5380 kernel parameters */
431 if (instance->irq == 255)
432 instance->irq = NO_IRQ;
433
434 if (instance->irq != NO_IRQ)
459 if (request_irq(instance->irq, generic_NCR5380_intr, 435 if (request_irq(instance->irq, generic_NCR5380_intr,
460 0, "NCR5380", instance)) { 436 0, "NCR5380", instance)) {
461 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 437 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
462 instance->irq = SCSI_IRQ_NONE; 438 instance->irq = NO_IRQ;
463 } 439 }
464 440
465 if (instance->irq == SCSI_IRQ_NONE) { 441 if (instance->irq == NO_IRQ) {
466 printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 442 printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
467 printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 443 printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
468 } 444 }
469 445
470 printk(KERN_INFO "scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int) instance->NCR5380_instance_name);
471 if (instance->irq == SCSI_IRQ_NONE)
472 printk(" interrupts disabled");
473 else
474 printk(" irq %d", instance->irq);
475 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
476 NCR5380_print_options(instance);
477 printk("\n");
478
479 ++current_override; 446 ++current_override;
480 ++count; 447 ++count;
481 } 448 }
@@ -483,19 +450,6 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
483} 450}
484 451
485/** 452/**
486 * generic_NCR5380_info - reporting string
487 * @host: NCR5380 to report on
488 *
489 * Report driver information for the NCR5380
490 */
491
492const char *generic_NCR5380_info(struct Scsi_Host *host)
493{
494 static const char string[] = "Generic NCR5380/53C400 Driver";
495 return string;
496}
497
498/**
499 * generic_NCR5380_release_resources - free resources 453 * generic_NCR5380_release_resources - free resources
500 * @instance: host adapter to clean up 454 * @instance: host adapter to clean up
501 * 455 *
@@ -504,12 +458,12 @@ const char *generic_NCR5380_info(struct Scsi_Host *host)
504 * Locks: none 458 * Locks: none
505 */ 459 */
506 460
507int generic_NCR5380_release_resources(struct Scsi_Host *instance) 461static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
508{ 462{
509 NCR5380_local_declare(); 463 NCR5380_local_declare();
510 NCR5380_setup(instance); 464 NCR5380_setup(instance);
511 465
512 if (instance->irq != SCSI_IRQ_NONE) 466 if (instance->irq != NO_IRQ)
513 free_irq(instance->irq, instance); 467 free_irq(instance->irq, instance);
514 NCR5380_exit(instance); 468 NCR5380_exit(instance);
515 469
@@ -741,163 +695,9 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
741 695
742#include "NCR5380.c" 696#include "NCR5380.c"
743 697
744#define PRINTP(x) seq_printf(m, x)
745#define ANDP ,
746
747static void sprint_opcode(struct seq_file *m, int opcode)
748{
749 PRINTP("0x%02x " ANDP opcode);
750}
751
752static void sprint_command(struct seq_file *m, unsigned char *command)
753{
754 int i, s;
755 sprint_opcode(m, command[0]);
756 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
757 PRINTP("%02x " ANDP command[i]);
758 PRINTP("\n");
759}
760
761/**
762 * sprintf_Scsi_Cmnd - print a scsi command
763 * @m: seq_fil to print into
764 * @cmd: SCSI command block
765 *
766 * Print out the target and command data in hex
767 */
768
769static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd)
770{
771 PRINTP("host number %d destination target %d, lun %llu\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun);
772 PRINTP(" command = ");
773 sprint_command(m, cmd->cmnd);
774}
775
776/**
777 * generic_NCR5380_proc_info - /proc for NCR5380 driver
778 * @buffer: buffer to print into
779 * @start: start position
780 * @offset: offset into buffer
781 * @len: length
782 * @hostno: instance to affect
783 * @inout: read/write
784 *
785 * Provide the procfs information for the 5380 controller. We fill
786 * this with useful debugging information including the commands
787 * being executed, disconnected command queue and the statistical
788 * data
789 *
790 * Locks: global cli/lock for queue walk
791 */
792
793static int generic_NCR5380_show_info(struct seq_file *m, struct Scsi_Host *scsi_ptr)
794{
795 NCR5380_local_declare();
796 unsigned long flags;
797 unsigned char status;
798 int i;
799 Scsi_Cmnd *ptr;
800 struct NCR5380_hostdata *hostdata;
801#ifdef NCR5380_STATS
802 struct scsi_device *dev;
803#endif
804
805 NCR5380_setup(scsi_ptr);
806 hostdata = (struct NCR5380_hostdata *) scsi_ptr->hostdata;
807
808 spin_lock_irqsave(scsi_ptr->host_lock, flags);
809 PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
810 PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
811 PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
812#ifdef NCR53C400
813 PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
814 PRINTP("NCR53C400 card%s detected\n" ANDP(((struct NCR5380_hostdata *) scsi_ptr->hostdata)->flags & FLAG_NCR53C400) ? "" : " not");
815# if NCR53C400_PSEUDO_DMA
816 PRINTP("NCR53C400 pseudo DMA used\n");
817# endif
818#else
819 PRINTP("NO NCR53C400 driver extensions\n");
820#endif
821 PRINTP("Using %s mapping at %s 0x%lx, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
822 if (scsi_ptr->irq == SCSI_IRQ_NONE)
823 PRINTP("no interrupt\n");
824 else
825 PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
826
827#ifdef NCR5380_STATS
828 if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
829 PRINTP("There are commands pending, transfer rates may be crud\n");
830 if (hostdata->pendingr)
831 PRINTP(" %d pending reads" ANDP hostdata->pendingr);
832 if (hostdata->pendingw)
833 PRINTP(" %d pending writes" ANDP hostdata->pendingw);
834 if (hostdata->pendingr || hostdata->pendingw)
835 PRINTP("\n");
836 shost_for_each_device(dev, scsi_ptr) {
837 unsigned long br = hostdata->bytes_read[dev->id];
838 unsigned long bw = hostdata->bytes_write[dev->id];
839 long tr = hostdata->time_read[dev->id] / HZ;
840 long tw = hostdata->time_write[dev->id] / HZ;
841
842 PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
843 for (i = 0; i < 8; i++)
844 if (dev->vendor[i] >= 0x20)
845 seq_putc(m, dev->vendor[i]);
846 seq_putc(m, ' ');
847 for (i = 0; i < 16; i++)
848 if (dev->model[i] >= 0x20)
849 seq_putc(m, dev->model[i]);
850 seq_putc(m, ' ');
851 for (i = 0; i < 4; i++)
852 if (dev->rev[i] >= 0x20)
853 seq_putc(m, dev->rev[i]);
854 seq_putc(m, ' ');
855
856 PRINTP("\n%10ld kb read in %5ld secs" ANDP br / 1024 ANDP tr);
857 if (tr)
858 PRINTP(" @ %5ld bps" ANDP br / tr);
859
860 PRINTP("\n%10ld kb written in %5ld secs" ANDP bw / 1024 ANDP tw);
861 if (tw)
862 PRINTP(" @ %5ld bps" ANDP bw / tw);
863 PRINTP("\n");
864 }
865#endif
866
867 status = NCR5380_read(STATUS_REG);
868 if (!(status & SR_REQ))
869 PRINTP("REQ not asserted, phase unknown.\n");
870 else {
871 for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i);
872 PRINTP("Phase %s\n" ANDP phases[i].name);
873 }
874
875 if (!hostdata->connected) {
876 PRINTP("No currently connected command\n");
877 } else {
878 sprint_Scsi_Cmnd(m, (Scsi_Cmnd *) hostdata->connected);
879 }
880
881 PRINTP("issue_queue\n");
882
883 for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
884 sprint_Scsi_Cmnd(m, ptr);
885
886 PRINTP("disconnected_queue\n");
887
888 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
889 sprint_Scsi_Cmnd(m, ptr);
890
891 spin_unlock_irqrestore(scsi_ptr->host_lock, flags);
892 return 0;
893}
894
895#undef PRINTP
896#undef ANDP
897
898static struct scsi_host_template driver_template = { 698static struct scsi_host_template driver_template = {
899 .show_info = generic_NCR5380_show_info, 699 .show_info = generic_NCR5380_show_info,
900 .name = "Generic NCR5380/NCR53C400 Scsi Driver", 700 .name = "Generic NCR5380/NCR53C400 SCSI",
901 .detect = generic_NCR5380_detect, 701 .detect = generic_NCR5380_detect,
902 .release = generic_NCR5380_release_resources, 702 .release = generic_NCR5380_release_resources,
903 .info = generic_NCR5380_info, 703 .info = generic_NCR5380_info,
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 703adf78e0b2..bea1a3b9b862 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -9,28 +9,11 @@
9 * 9 *
10 * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin 10 * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
11 * K.Lentin@cs.monash.edu.au 11 * K.Lentin@cs.monash.edu.au
12 *
13 * ALPHA RELEASE 1.
14 *
15 * For more information, please consult
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 12 */
27 13
28#ifndef GENERIC_NCR5380_H 14#ifndef GENERIC_NCR5380_H
29#define GENERIC_NCR5380_H 15#define GENERIC_NCR5380_H
30 16
31
32#define GENERIC_NCR5380_PUBLIC_RELEASE 1
33
34#ifdef NCR53C400 17#ifdef NCR53C400
35#define BIOSPARAM 18#define BIOSPARAM
36#define NCR5380_BIOSPARAM generic_NCR5380_biosparam 19#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
@@ -39,12 +22,6 @@
39#endif 22#endif
40 23
41#ifndef ASM 24#ifndef ASM
42static int generic_NCR5380_abort(Scsi_Cmnd *);
43static int generic_NCR5380_detect(struct scsi_host_template *);
44static int generic_NCR5380_release_resources(struct Scsi_Host *);
45static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
46static int generic_NCR5380_bus_reset(Scsi_Cmnd *);
47static const char* generic_NCR5380_info(struct Scsi_Host *);
48 25
49#ifndef CMD_PER_LUN 26#ifndef CMD_PER_LUN
50#define CMD_PER_LUN 2 27#define CMD_PER_LUN 2
@@ -118,7 +95,8 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
118#define NCR5380_bus_reset generic_NCR5380_bus_reset 95#define NCR5380_bus_reset generic_NCR5380_bus_reset
119#define NCR5380_pread generic_NCR5380_pread 96#define NCR5380_pread generic_NCR5380_pread
120#define NCR5380_pwrite generic_NCR5380_pwrite 97#define NCR5380_pwrite generic_NCR5380_pwrite
121#define NCR5380_proc_info notyet_generic_proc_info 98#define NCR5380_info generic_NCR5380_info
99#define NCR5380_show_info generic_NCR5380_show_info
122 100
123#define BOARD_NCR5380 0 101#define BOARD_NCR5380 0
124#define BOARD_NCR53C400 1 102#define BOARD_NCR53C400 1
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 0f1ae13ce7c7..71e138044379 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2159,7 +2159,7 @@ static void gdth_next(gdth_ha_str *ha)
2159 case VERIFY: 2159 case VERIFY:
2160 case START_STOP: 2160 case START_STOP:
2161 case MODE_SENSE: 2161 case MODE_SENSE:
2162 case SERVICE_ACTION_IN: 2162 case SERVICE_ACTION_IN_16:
2163 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], 2163 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
2164 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], 2164 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
2165 nscp->cmnd[4],nscp->cmnd[5])); 2165 nscp->cmnd[4],nscp->cmnd[5]));
@@ -2391,7 +2391,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2391 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); 2391 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
2392 break; 2392 break;
2393 2393
2394 case SERVICE_ACTION_IN: 2394 case SERVICE_ACTION_IN_16:
2395 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 && 2395 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
2396 (ha->cache_feat & GDT_64BIT)) { 2396 (ha->cache_feat & GDT_64BIT)) {
2397 gdth_rdcap16_data rdc16; 2397 gdth_rdcap16_data rdc16;
@@ -4661,7 +4661,6 @@ static void gdth_flush(gdth_ha_str *ha)
4661/* configure lun */ 4661/* configure lun */
4662static int gdth_slave_configure(struct scsi_device *sdev) 4662static int gdth_slave_configure(struct scsi_device *sdev)
4663{ 4663{
4664 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4665 sdev->skip_ms_page_3f = 1; 4664 sdev->skip_ms_page_3f = 1;
4666 sdev->skip_ms_page_8 = 1; 4665 sdev->skip_ms_page_8 = 1;
4667 return 0; 4666 return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 6de80e352871..8bb173e01084 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -418,7 +418,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
418 shost->cmd_per_lun = sht->cmd_per_lun; 418 shost->cmd_per_lun = sht->cmd_per_lun;
419 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 419 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
420 shost->use_clustering = sht->use_clustering; 420 shost->use_clustering = sht->use_clustering;
421 shost->ordered_tag = sht->ordered_tag;
422 shost->no_write_same = sht->no_write_same; 421 shost->no_write_same = sht->no_write_same;
423 422
424 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) 423 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
@@ -485,8 +484,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
485 WQ_UNBOUND | WQ_MEM_RECLAIM, 484 WQ_UNBOUND | WQ_MEM_RECLAIM,
486 1, shost->host_no); 485 1, shost->host_no);
487 if (!shost->tmf_work_q) { 486 if (!shost->tmf_work_q) {
488 printk(KERN_WARNING "scsi%d: failed to create tmf workq\n", 487 shost_printk(KERN_WARNING, shost,
489 shost->host_no); 488 "failed to create tmf workq\n");
490 goto fail_kthread; 489 goto fail_kthread;
491 } 490 }
492 scsi_proc_hostdir_add(shost->hostt); 491 scsi_proc_hostdir_add(shost->hostt);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cef5d49b59cd..6bb4611b238a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu-defs.h>
51#include <linux/percpu.h> 52#include <linux/percpu.h>
52#include <asm/div64.h> 53#include <asm/div64.h>
53#include "hpsa_cmd.h" 54#include "hpsa_cmd.h"
@@ -103,7 +104,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
@@ -149,6 +149,7 @@ static struct board_type products[] = {
149 {0x3249103C, "Smart Array P812", &SA5_access}, 149 {0x3249103C, "Smart Array P812", &SA5_access},
150 {0x324A103C, "Smart Array P712m", &SA5_access}, 150 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access}, 151 {0x324B103C, "Smart Array P711m", &SA5_access},
152 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
152 {0x3350103C, "Smart Array P222", &SA5_access}, 153 {0x3350103C, "Smart Array P222", &SA5_access},
153 {0x3351103C, "Smart Array P420", &SA5_access}, 154 {0x3351103C, "Smart Array P420", &SA5_access},
154 {0x3352103C, "Smart Array P421", &SA5_access}, 155 {0x3352103C, "Smart Array P421", &SA5_access},
@@ -193,12 +194,13 @@ static int number_of_controllers;
193 194
194static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 195static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
195static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 196static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
196static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 197static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
197static void lock_and_start_io(struct ctlr_info *h); 198static void lock_and_start_io(struct ctlr_info *h);
198static void start_io(struct ctlr_info *h, unsigned long *flags); 199static void start_io(struct ctlr_info *h, unsigned long *flags);
199 200
200#ifdef CONFIG_COMPAT 201#ifdef CONFIG_COMPAT
201static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 202static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
203 void __user *arg);
202#endif 204#endif
203 205
204static void cmd_free(struct ctlr_info *h, struct CommandList *c); 206static void cmd_free(struct ctlr_info *h, struct CommandList *c);
@@ -214,8 +216,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214static void hpsa_scan_start(struct Scsi_Host *); 216static void hpsa_scan_start(struct Scsi_Host *);
215static int hpsa_scan_finished(struct Scsi_Host *sh, 217static int hpsa_scan_finished(struct Scsi_Host *sh,
216 unsigned long elapsed_time); 218 unsigned long elapsed_time);
217static int hpsa_change_queue_depth(struct scsi_device *sdev,
218 int qdepth, int reason);
219 219
220static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 220static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
221static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 221static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
@@ -274,12 +274,12 @@ static int check_for_unit_attention(struct ctlr_info *h,
274 "detected, command retried\n", h->ctlr); 274 "detected, command retried\n", h->ctlr);
275 break; 275 break;
276 case LUN_FAILED: 276 case LUN_FAILED:
277 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 277 dev_warn(&h->pdev->dev,
278 "detected, action required\n", h->ctlr); 278 HPSA "%d: LUN failure detected\n", h->ctlr);
279 break; 279 break;
280 case REPORT_LUNS_CHANGED: 280 case REPORT_LUNS_CHANGED:
281 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 281 dev_warn(&h->pdev->dev,
282 "changed, action required\n", h->ctlr); 282 HPSA "%d: report LUN data changed\n", h->ctlr);
283 /* 283 /*
284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285 * target (array) devices. 285 * target (array) devices.
@@ -392,7 +392,8 @@ static ssize_t host_show_commands_outstanding(struct device *dev,
392 struct Scsi_Host *shost = class_to_shost(dev); 392 struct Scsi_Host *shost = class_to_shost(dev);
393 struct ctlr_info *h = shost_to_hba(shost); 393 struct ctlr_info *h = shost_to_hba(shost);
394 394
395 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 395 return snprintf(buf, 20, "%d\n",
396 atomic_read(&h->commands_outstanding));
396} 397}
397 398
398static ssize_t host_show_transport_mode(struct device *dev, 399static ssize_t host_show_transport_mode(struct device *dev,
@@ -670,7 +671,7 @@ static struct scsi_host_template hpsa_driver_template = {
670 .queuecommand = hpsa_scsi_queue_command, 671 .queuecommand = hpsa_scsi_queue_command,
671 .scan_start = hpsa_scan_start, 672 .scan_start = hpsa_scan_start,
672 .scan_finished = hpsa_scan_finished, 673 .scan_finished = hpsa_scan_finished,
673 .change_queue_depth = hpsa_change_queue_depth, 674 .change_queue_depth = scsi_change_queue_depth,
674 .this_id = -1, 675 .this_id = -1,
675 .use_clustering = ENABLE_CLUSTERING, 676 .use_clustering = ENABLE_CLUSTERING,
676 .eh_abort_handler = hpsa_eh_abort_handler, 677 .eh_abort_handler = hpsa_eh_abort_handler,
@@ -698,7 +699,6 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
698{ 699{
699 u32 a; 700 u32 a;
700 struct reply_queue_buffer *rq = &h->reply_queue[q]; 701 struct reply_queue_buffer *rq = &h->reply_queue[q];
701 unsigned long flags;
702 702
703 if (h->transMethod & CFGTBL_Trans_io_accel1) 703 if (h->transMethod & CFGTBL_Trans_io_accel1)
704 return h->access.command_completed(h, q); 704 return h->access.command_completed(h, q);
@@ -709,9 +709,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
709 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 709 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
710 a = rq->head[rq->current_entry]; 710 a = rq->head[rq->current_entry];
711 rq->current_entry++; 711 rq->current_entry++;
712 spin_lock_irqsave(&h->lock, flags); 712 atomic_dec(&h->commands_outstanding);
713 h->commands_outstanding--;
714 spin_unlock_irqrestore(&h->lock, flags);
715 } else { 713 } else {
716 a = FIFO_EMPTY; 714 a = FIFO_EMPTY;
717 } 715 }
@@ -1500,22 +1498,22 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1500{ 1498{
1501 struct SGDescriptor *chain_sg, *chain_block; 1499 struct SGDescriptor *chain_sg, *chain_block;
1502 u64 temp64; 1500 u64 temp64;
1501 u32 chain_len;
1503 1502
1504 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1503 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1505 chain_block = h->cmd_sg_list[c->cmdindex]; 1504 chain_block = h->cmd_sg_list[c->cmdindex];
1506 chain_sg->Ext = HPSA_SG_CHAIN; 1505 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1507 chain_sg->Len = sizeof(*chain_sg) * 1506 chain_len = sizeof(*chain_sg) *
1508 (c->Header.SGTotal - h->max_cmd_sg_entries); 1507 (c->Header.SGTotal - h->max_cmd_sg_entries);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1508 chain_sg->Len = cpu_to_le32(chain_len);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1510 PCI_DMA_TODEVICE); 1510 PCI_DMA_TODEVICE);
1511 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1511 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1512 /* prevent subsequent unmapping */ 1512 /* prevent subsequent unmapping */
1513 chain_sg->Addr.lower = 0; 1513 chain_sg->Addr = cpu_to_le64(0);
1514 chain_sg->Addr.upper = 0;
1515 return -1; 1514 return -1;
1516 } 1515 }
1517 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1516 chain_sg->Addr = cpu_to_le64(temp64);
1518 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1519 return 0; 1517 return 0;
1520} 1518}
1521 1519
@@ -1523,15 +1521,13 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1523 struct CommandList *c) 1521 struct CommandList *c)
1524{ 1522{
1525 struct SGDescriptor *chain_sg; 1523 struct SGDescriptor *chain_sg;
1526 union u64bit temp64;
1527 1524
1528 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1525 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1529 return; 1526 return;
1530 1527
1531 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1528 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1532 temp64.val32.lower = chain_sg->Addr.lower; 1529 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1533 temp64.val32.upper = chain_sg->Addr.upper; 1530 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1534 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1535} 1531}
1536 1532
1537 1533
@@ -1732,8 +1728,7 @@ static void complete_scsi_command(struct CommandList *cp)
1732 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1728 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1733 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1729 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1734 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1730 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1735 cp->Header.Tag.lower = c->Tag.lower; 1731 cp->Header.tag = c->tag;
1736 cp->Header.Tag.upper = c->Tag.upper;
1737 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1732 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1738 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1733 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1739 1734
@@ -1763,72 +1758,13 @@ static void complete_scsi_command(struct CommandList *cp)
1763 /* Get addition sense code qualifier */ 1758 /* Get addition sense code qualifier */
1764 ascq = ei->SenseInfo[13]; 1759 ascq = ei->SenseInfo[13];
1765 } 1760 }
1766
1767 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1761 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1768 if (check_for_unit_attention(h, cp))
1769 break;
1770 if (sense_key == ILLEGAL_REQUEST) {
1771 /*
1772 * SCSI REPORT_LUNS is commonly unsupported on
1773 * Smart Array. Suppress noisy complaint.
1774 */
1775 if (cp->Request.CDB[0] == REPORT_LUNS)
1776 break;
1777
1778 /* If ASC/ASCQ indicate Logical Unit
1779 * Not Supported condition,
1780 */
1781 if ((asc == 0x25) && (ascq == 0x0)) {
1782 dev_warn(&h->pdev->dev, "cp %p "
1783 "has check condition\n", cp);
1784 break;
1785 }
1786 }
1787
1788 if (sense_key == NOT_READY) {
1789 /* If Sense is Not Ready, Logical Unit
1790 * Not ready, Manual Intervention
1791 * required
1792 */
1793 if ((asc == 0x04) && (ascq == 0x03)) {
1794 dev_warn(&h->pdev->dev, "cp %p "
1795 "has check condition: unit "
1796 "not ready, manual "
1797 "intervention required\n", cp);
1798 break;
1799 }
1800 }
1801 if (sense_key == ABORTED_COMMAND) { 1762 if (sense_key == ABORTED_COMMAND) {
1802 /* Aborted command is retryable */
1803 dev_warn(&h->pdev->dev, "cp %p "
1804 "has check condition: aborted command: "
1805 "ASC: 0x%x, ASCQ: 0x%x\n",
1806 cp, asc, ascq);
1807 cmd->result |= DID_SOFT_ERROR << 16; 1763 cmd->result |= DID_SOFT_ERROR << 16;
1808 break; 1764 break;
1809 } 1765 }
1810 /* Must be some other type of check condition */
1811 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1812 "unknown type: "
1813 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1814 "Returning result: 0x%x, "
1815 "cmd=[%02x %02x %02x %02x %02x "
1816 "%02x %02x %02x %02x %02x %02x "
1817 "%02x %02x %02x %02x %02x]\n",
1818 cp, sense_key, asc, ascq,
1819 cmd->result,
1820 cmd->cmnd[0], cmd->cmnd[1],
1821 cmd->cmnd[2], cmd->cmnd[3],
1822 cmd->cmnd[4], cmd->cmnd[5],
1823 cmd->cmnd[6], cmd->cmnd[7],
1824 cmd->cmnd[8], cmd->cmnd[9],
1825 cmd->cmnd[10], cmd->cmnd[11],
1826 cmd->cmnd[12], cmd->cmnd[13],
1827 cmd->cmnd[14], cmd->cmnd[15]);
1828 break; 1766 break;
1829 } 1767 }
1830
1831
1832 /* Problem was not a check condition 1768 /* Problem was not a check condition
1833 * Pass it up to the upper layers... 1769 * Pass it up to the upper layers...
1834 */ 1770 */
@@ -1934,14 +1870,11 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
1934 struct CommandList *c, int sg_used, int data_direction) 1870 struct CommandList *c, int sg_used, int data_direction)
1935{ 1871{
1936 int i; 1872 int i;
1937 union u64bit addr64;
1938 1873
1939 for (i = 0; i < sg_used; i++) { 1874 for (i = 0; i < sg_used; i++)
1940 addr64.val32.lower = c->SG[i].Addr.lower; 1875 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1941 addr64.val32.upper = c->SG[i].Addr.upper; 1876 le32_to_cpu(c->SG[i].Len),
1942 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1877 data_direction);
1943 data_direction);
1944 }
1945} 1878}
1946 1879
1947static int hpsa_map_one(struct pci_dev *pdev, 1880static int hpsa_map_one(struct pci_dev *pdev,
@@ -1954,25 +1887,22 @@ static int hpsa_map_one(struct pci_dev *pdev,
1954 1887
1955 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1888 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1956 cp->Header.SGList = 0; 1889 cp->Header.SGList = 0;
1957 cp->Header.SGTotal = 0; 1890 cp->Header.SGTotal = cpu_to_le16(0);
1958 return 0; 1891 return 0;
1959 } 1892 }
1960 1893
1961 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1894 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1962 if (dma_mapping_error(&pdev->dev, addr64)) { 1895 if (dma_mapping_error(&pdev->dev, addr64)) {
1963 /* Prevent subsequent unmap of something never mapped */ 1896 /* Prevent subsequent unmap of something never mapped */
1964 cp->Header.SGList = 0; 1897 cp->Header.SGList = 0;
1965 cp->Header.SGTotal = 0; 1898 cp->Header.SGTotal = cpu_to_le16(0);
1966 return -1; 1899 return -1;
1967 } 1900 }
1968 cp->SG[0].Addr.lower = 1901 cp->SG[0].Addr = cpu_to_le64(addr64);
1969 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1902 cp->SG[0].Len = cpu_to_le32(buflen);
1970 cp->SG[0].Addr.upper = 1903 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1971 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1904 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
1972 cp->SG[0].Len = buflen; 1905 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1973 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1974 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1975 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1976 return 0; 1906 return 0;
1977} 1907}
1978 1908
@@ -2830,8 +2760,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2830 if (d == NULL) 2760 if (d == NULL)
2831 return 0; /* no match */ 2761 return 0; /* no match */
2832 2762
2833 it_nexus = cpu_to_le32((u32) d->ioaccel_handle); 2763 it_nexus = cpu_to_le32(d->ioaccel_handle);
2834 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); 2764 scsi_nexus = cpu_to_le32(c2a->scsi_nexus);
2835 find = c2a->scsi_nexus; 2765 find = c2a->scsi_nexus;
2836 2766
2837 if (h->raid_offload_debug > 0) 2767 if (h->raid_offload_debug > 0)
@@ -2891,7 +2821,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2891 * Returns 0 on success, -1 otherwise. 2821 * Returns 0 on success, -1 otherwise.
2892 */ 2822 */
2893static int hpsa_gather_lun_info(struct ctlr_info *h, 2823static int hpsa_gather_lun_info(struct ctlr_info *h,
2894 int reportlunsize, 2824 int reportphyslunsize, int reportloglunsize,
2895 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2825 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2896 struct ReportLUNdata *logdev, u32 *nlogicals) 2826 struct ReportLUNdata *logdev, u32 *nlogicals)
2897{ 2827{
@@ -2905,7 +2835,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2905 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2835 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2906 physical_entry_size = 24; 2836 physical_entry_size = 24;
2907 } 2837 }
2908 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 2838 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
2909 *physical_mode)) { 2839 *physical_mode)) {
2910 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2840 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2911 return -1; 2841 return -1;
@@ -2918,7 +2848,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2918 *nphysicals - HPSA_MAX_PHYS_LUN); 2848 *nphysicals - HPSA_MAX_PHYS_LUN);
2919 *nphysicals = HPSA_MAX_PHYS_LUN; 2849 *nphysicals = HPSA_MAX_PHYS_LUN;
2920 } 2850 }
2921 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 2851 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
2922 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2852 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2923 return -1; 2853 return -1;
2924 } 2854 }
@@ -2941,8 +2871,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
2941 return 0; 2871 return 0;
2942} 2872}
2943 2873
2944u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2874static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2945 int nphysicals, int nlogicals, 2875 int i, int nphysicals, int nlogicals,
2946 struct ReportExtendedLUNdata *physdev_list, 2876 struct ReportExtendedLUNdata *physdev_list,
2947 struct ReportLUNdata *logdev_list) 2877 struct ReportLUNdata *logdev_list)
2948{ 2878{
@@ -3011,15 +2941,14 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3011 u32 ndev_allocated = 0; 2941 u32 ndev_allocated = 0;
3012 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2942 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3013 int ncurrent = 0; 2943 int ncurrent = 0;
3014 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3015 int i, n_ext_target_devs, ndevs_to_allocate; 2944 int i, n_ext_target_devs, ndevs_to_allocate;
3016 int raid_ctlr_position; 2945 int raid_ctlr_position;
3017 int rescan_hba_mode; 2946 int rescan_hba_mode;
3018 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 2947 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3019 2948
3020 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 2949 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3021 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2950 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3022 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2951 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3023 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 2952 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3024 2953
3025 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 2954 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
@@ -3039,7 +2968,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3039 2968
3040 h->hba_mode_enabled = rescan_hba_mode; 2969 h->hba_mode_enabled = rescan_hba_mode;
3041 2970
3042 if (hpsa_gather_lun_info(h, reportlunsize, 2971 if (hpsa_gather_lun_info(h,
2972 sizeof(*physdev_list), sizeof(*logdev_list),
3043 (struct ReportLUNdata *) physdev_list, &nphysicals, 2973 (struct ReportLUNdata *) physdev_list, &nphysicals,
3044 &physical_mode, logdev_list, &nlogicals)) 2974 &physical_mode, logdev_list, &nlogicals))
3045 goto out; 2975 goto out;
@@ -3210,19 +3140,19 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3210 } 3140 }
3211 addr64 = (u64) sg_dma_address(sg); 3141 addr64 = (u64) sg_dma_address(sg);
3212 len = sg_dma_len(sg); 3142 len = sg_dma_len(sg);
3213 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3143 curr_sg->Addr = cpu_to_le64(addr64);
3214 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3144 curr_sg->Len = cpu_to_le32(len);
3215 curr_sg->Len = len; 3145 curr_sg->Ext = cpu_to_le32(0);
3216 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
3217 curr_sg++; 3146 curr_sg++;
3218 } 3147 }
3148 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3219 3149
3220 if (use_sg + chained > h->maxSG) 3150 if (use_sg + chained > h->maxSG)
3221 h->maxSG = use_sg + chained; 3151 h->maxSG = use_sg + chained;
3222 3152
3223 if (chained) { 3153 if (chained) {
3224 cp->Header.SGList = h->max_cmd_sg_entries; 3154 cp->Header.SGList = h->max_cmd_sg_entries;
3225 cp->Header.SGTotal = (u16) (use_sg + 1); 3155 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3226 if (hpsa_map_sg_chain_block(h, cp)) { 3156 if (hpsa_map_sg_chain_block(h, cp)) {
3227 scsi_dma_unmap(cmd); 3157 scsi_dma_unmap(cmd);
3228 return -1; 3158 return -1;
@@ -3233,7 +3163,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
3233sglist_finished: 3163sglist_finished:
3234 3164
3235 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3165 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3236 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 3166 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */
3237 return 0; 3167 return 0;
3238} 3168}
3239 3169
@@ -3325,17 +3255,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3325 addr64 = (u64) sg_dma_address(sg); 3255 addr64 = (u64) sg_dma_address(sg);
3326 len = sg_dma_len(sg); 3256 len = sg_dma_len(sg);
3327 total_len += len; 3257 total_len += len;
3328 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3258 curr_sg->Addr = cpu_to_le64(addr64);
3329 curr_sg->Addr.upper = 3259 curr_sg->Len = cpu_to_le32(len);
3330 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3260 curr_sg->Ext = cpu_to_le32(0);
3331 curr_sg->Len = len;
3332
3333 if (i == (scsi_sg_count(cmd) - 1))
3334 curr_sg->Ext = HPSA_SG_LAST;
3335 else
3336 curr_sg->Ext = 0; /* we are not chaining */
3337 curr_sg++; 3261 curr_sg++;
3338 } 3262 }
3263 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3339 3264
3340 switch (cmd->sc_data_direction) { 3265 switch (cmd->sc_data_direction) {
3341 case DMA_TO_DEVICE: 3266 case DMA_TO_DEVICE:
@@ -3592,7 +3517,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3592 cp->data_len = cpu_to_le32(total_len); 3517 cp->data_len = cpu_to_le32(total_len);
3593 cp->err_ptr = cpu_to_le64(c->busaddr + 3518 cp->err_ptr = cpu_to_le64(c->busaddr +
3594 offsetof(struct io_accel2_cmd, error_data)); 3519 offsetof(struct io_accel2_cmd, error_data));
3595 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); 3520 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3596 3521
3597 enqueue_cmd_and_start_io(h, c); 3522 enqueue_cmd_and_start_io(h, c);
3598 return 0; 3523 return 0;
@@ -3809,11 +3734,6 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3809 offload_to_mirror = 3734 offload_to_mirror =
3810 (offload_to_mirror >= map->layout_map_count - 1) 3735 (offload_to_mirror >= map->layout_map_count - 1)
3811 ? 0 : offload_to_mirror + 1; 3736 ? 0 : offload_to_mirror + 1;
3812 /* FIXME: remove after debug/dev */
3813 BUG_ON(offload_to_mirror >= map->layout_map_count);
3814 dev_warn(&h->pdev->dev,
3815 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3816 map_index, offload_to_mirror);
3817 dev->offload_to_mirror = offload_to_mirror; 3737 dev->offload_to_mirror = offload_to_mirror;
3818 /* Avoid direct use of dev->offload_to_mirror within this 3738 /* Avoid direct use of dev->offload_to_mirror within this
3819 * function since multiple threads might simultaneously 3739 * function since multiple threads might simultaneously
@@ -3959,8 +3879,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3959 dev->scsi3addr); 3879 dev->scsi3addr);
3960} 3880}
3961 3881
3962static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3882/*
3963 void (*done)(struct scsi_cmnd *)) 3883 * Running in struct Scsi_Host->host_lock less mode using LLD internal
3884 * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection.
3885 */
3886static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
3964{ 3887{
3965 struct ctlr_info *h; 3888 struct ctlr_info *h;
3966 struct hpsa_scsi_dev_t *dev; 3889 struct hpsa_scsi_dev_t *dev;
@@ -3973,14 +3896,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3973 dev = cmd->device->hostdata; 3896 dev = cmd->device->hostdata;
3974 if (!dev) { 3897 if (!dev) {
3975 cmd->result = DID_NO_CONNECT << 16; 3898 cmd->result = DID_NO_CONNECT << 16;
3976 done(cmd); 3899 cmd->scsi_done(cmd);
3977 return 0; 3900 return 0;
3978 } 3901 }
3979 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3902 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3980 3903
3981 if (unlikely(lockup_detected(h))) { 3904 if (unlikely(lockup_detected(h))) {
3982 cmd->result = DID_ERROR << 16; 3905 cmd->result = DID_ERROR << 16;
3983 done(cmd); 3906 cmd->scsi_done(cmd);
3984 return 0; 3907 return 0;
3985 } 3908 }
3986 c = cmd_alloc(h); 3909 c = cmd_alloc(h);
@@ -3990,9 +3913,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3990 } 3913 }
3991 3914
3992 /* Fill in the command list header */ 3915 /* Fill in the command list header */
3993
3994 cmd->scsi_done = done; /* save this for use by completion code */
3995
3996 /* save c in case we have to abort it */ 3916 /* save c in case we have to abort it */
3997 cmd->host_scribble = (unsigned char *) c; 3917 cmd->host_scribble = (unsigned char *) c;
3998 3918
@@ -4026,8 +3946,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4026 3946
4027 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3947 c->Header.ReplyQueue = 0; /* unused in simple mode */
4028 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3948 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4029 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 3949 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) |
4030 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 3950 DIRECT_LOOKUP_BIT);
4031 3951
4032 /* Fill in the request block... */ 3952 /* Fill in the request block... */
4033 3953
@@ -4036,17 +3956,18 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4036 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3956 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4037 c->Request.CDBLen = cmd->cmd_len; 3957 c->Request.CDBLen = cmd->cmd_len;
4038 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 3958 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4039 c->Request.Type.Type = TYPE_CMD;
4040 c->Request.Type.Attribute = ATTR_SIMPLE;
4041 switch (cmd->sc_data_direction) { 3959 switch (cmd->sc_data_direction) {
4042 case DMA_TO_DEVICE: 3960 case DMA_TO_DEVICE:
4043 c->Request.Type.Direction = XFER_WRITE; 3961 c->Request.type_attr_dir =
3962 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4044 break; 3963 break;
4045 case DMA_FROM_DEVICE: 3964 case DMA_FROM_DEVICE:
4046 c->Request.Type.Direction = XFER_READ; 3965 c->Request.type_attr_dir =
3966 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4047 break; 3967 break;
4048 case DMA_NONE: 3968 case DMA_NONE:
4049 c->Request.Type.Direction = XFER_NONE; 3969 c->Request.type_attr_dir =
3970 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4050 break; 3971 break;
4051 case DMA_BIDIRECTIONAL: 3972 case DMA_BIDIRECTIONAL:
4052 /* This can happen if a buggy application does a scsi passthru 3973 /* This can happen if a buggy application does a scsi passthru
@@ -4054,7 +3975,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4054 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 3975 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4055 */ 3976 */
4056 3977
4057 c->Request.Type.Direction = XFER_RSVD; 3978 c->Request.type_attr_dir =
3979 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4058 /* This is technically wrong, and hpsa controllers should 3980 /* This is technically wrong, and hpsa controllers should
4059 * reject it with CMD_INVALID, which is the most correct 3981 * reject it with CMD_INVALID, which is the most correct
4060 * response, but non-fibre backends appear to let it 3982 * response, but non-fibre backends appear to let it
@@ -4081,8 +4003,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
4081 return 0; 4003 return 0;
4082} 4004}
4083 4005
4084static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4085
4086static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 4006static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4087{ 4007{
4088 unsigned long flags; 4008 unsigned long flags;
@@ -4152,23 +4072,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
4152 return finished; 4072 return finished;
4153} 4073}
4154 4074
4155static int hpsa_change_queue_depth(struct scsi_device *sdev,
4156 int qdepth, int reason)
4157{
4158 struct ctlr_info *h = sdev_to_hba(sdev);
4159
4160 if (reason != SCSI_QDEPTH_DEFAULT)
4161 return -ENOTSUPP;
4162
4163 if (qdepth < 1)
4164 qdepth = 1;
4165 else
4166 if (qdepth > h->nr_cmds)
4167 qdepth = h->nr_cmds;
4168 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4169 return sdev->queue_depth;
4170}
4171
4172static void hpsa_unregister_scsi(struct ctlr_info *h) 4075static void hpsa_unregister_scsi(struct ctlr_info *h)
4173{ 4076{
4174 /* we are being forcibly unloaded, and may not refuse. */ 4077 /* we are being forcibly unloaded, and may not refuse. */
@@ -4329,8 +4232,8 @@ static void hpsa_get_tag(struct ctlr_info *h,
4329 if (c->cmd_type == CMD_IOACCEL1) { 4232 if (c->cmd_type == CMD_IOACCEL1) {
4330 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4233 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4331 &h->ioaccel_cmd_pool[c->cmdindex]; 4234 &h->ioaccel_cmd_pool[c->cmdindex];
4332 *tagupper = cm1->Tag.upper; 4235 *tagupper = (u32) (cm1->tag >> 32);
4333 *taglower = cm1->Tag.lower; 4236 *taglower = (u32) (cm1->tag & 0x0ffffffffULL);
4334 return; 4237 return;
4335 } 4238 }
4336 if (c->cmd_type == CMD_IOACCEL2) { 4239 if (c->cmd_type == CMD_IOACCEL2) {
@@ -4341,11 +4244,10 @@ static void hpsa_get_tag(struct ctlr_info *h,
4341 *taglower = cm2->Tag; 4244 *taglower = cm2->Tag;
4342 return; 4245 return;
4343 } 4246 }
4344 *tagupper = c->Header.Tag.upper; 4247 *tagupper = (u32) (c->Header.tag >> 32);
4345 *taglower = c->Header.Tag.lower; 4248 *taglower = (u32) (c->Header.tag & 0x0ffffffffULL);
4346} 4249}
4347 4250
4348
4349static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4251static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4350 struct CommandList *abort, int swizzle) 4252 struct CommandList *abort, int swizzle)
4351{ 4253{
@@ -4410,7 +4312,7 @@ static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4410 struct CommandList *c = NULL; /* ptr into cmpQ */ 4312 struct CommandList *c = NULL; /* ptr into cmpQ */
4411 4313
4412 if (!find) 4314 if (!find)
4413 return 0; 4315 return NULL;
4414 spin_lock_irqsave(&h->lock, flags); 4316 spin_lock_irqsave(&h->lock, flags);
4415 list_for_each_entry(c, queue_head, list) { 4317 list_for_each_entry(c, queue_head, list) {
4416 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 4318 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
@@ -4432,7 +4334,7 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4432 4334
4433 spin_lock_irqsave(&h->lock, flags); 4335 spin_lock_irqsave(&h->lock, flags);
4434 list_for_each_entry(c, queue_head, list) { 4336 list_for_each_entry(c, queue_head, list) {
4435 if (memcmp(&c->Header.Tag, tag, 8) != 0) 4337 if (memcmp(&c->Header.tag, tag, 8) != 0)
4436 continue; 4338 continue;
4437 spin_unlock_irqrestore(&h->lock, flags); 4339 spin_unlock_irqrestore(&h->lock, flags);
4438 return c; 4340 return c;
@@ -4686,19 +4588,32 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4686 int i; 4588 int i;
4687 union u64bit temp64; 4589 union u64bit temp64;
4688 dma_addr_t cmd_dma_handle, err_dma_handle; 4590 dma_addr_t cmd_dma_handle, err_dma_handle;
4689 unsigned long flags; 4591 int loopcount;
4592
4593 /* There is some *extremely* small but non-zero chance that that
4594 * multiple threads could get in here, and one thread could
4595 * be scanning through the list of bits looking for a free
4596 * one, but the free ones are always behind him, and other
4597 * threads sneak in behind him and eat them before he can
4598 * get to them, so that while there is always a free one, a
4599 * very unlucky thread might be starved anyway, never able to
4600 * beat the other threads. In reality, this happens so
4601 * infrequently as to be indistinguishable from never.
4602 */
4690 4603
4691 spin_lock_irqsave(&h->lock, flags); 4604 loopcount = 0;
4692 do { 4605 do {
4693 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4606 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4694 if (i == h->nr_cmds) { 4607 if (i == h->nr_cmds)
4695 spin_unlock_irqrestore(&h->lock, flags); 4608 i = 0;
4696 return NULL; 4609 loopcount++;
4697 } 4610 } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
4698 } while (test_and_set_bit 4611 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 &&
4699 (i & (BITS_PER_LONG - 1), 4612 loopcount < 10);
4700 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 4613
4701 spin_unlock_irqrestore(&h->lock, flags); 4614 /* Thread got starved? We do not expect this to ever happen. */
4615 if (loopcount >= 10)
4616 return NULL;
4702 4617
4703 c = h->cmd_pool + i; 4618 c = h->cmd_pool + i;
4704 memset(c, 0, sizeof(*c)); 4619 memset(c, 0, sizeof(*c));
@@ -4714,9 +4629,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4714 INIT_LIST_HEAD(&c->list); 4629 INIT_LIST_HEAD(&c->list);
4715 c->busaddr = (u32) cmd_dma_handle; 4630 c->busaddr = (u32) cmd_dma_handle;
4716 temp64.val = (u64) err_dma_handle; 4631 temp64.val = (u64) err_dma_handle;
4717 c->ErrDesc.Addr.lower = temp64.val32.lower; 4632 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4718 c->ErrDesc.Addr.upper = temp64.val32.upper; 4633 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
4719 c->ErrDesc.Len = sizeof(*c->err_info);
4720 4634
4721 c->h = h; 4635 c->h = h;
4722 return c; 4636 return c;
@@ -4729,7 +4643,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
4729static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4643static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4730{ 4644{
4731 struct CommandList *c; 4645 struct CommandList *c;
4732 union u64bit temp64;
4733 dma_addr_t cmd_dma_handle, err_dma_handle; 4646 dma_addr_t cmd_dma_handle, err_dma_handle;
4734 4647
4735 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4648 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
@@ -4750,10 +4663,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4750 4663
4751 INIT_LIST_HEAD(&c->list); 4664 INIT_LIST_HEAD(&c->list);
4752 c->busaddr = (u32) cmd_dma_handle; 4665 c->busaddr = (u32) cmd_dma_handle;
4753 temp64.val = (u64) err_dma_handle; 4666 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4754 c->ErrDesc.Addr.lower = temp64.val32.lower; 4667 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
4755 c->ErrDesc.Addr.upper = temp64.val32.upper;
4756 c->ErrDesc.Len = sizeof(*c->err_info);
4757 4668
4758 c->h = h; 4669 c->h = h;
4759 return c; 4670 return c;
@@ -4762,30 +4673,25 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4762static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4673static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4763{ 4674{
4764 int i; 4675 int i;
4765 unsigned long flags;
4766 4676
4767 i = c - h->cmd_pool; 4677 i = c - h->cmd_pool;
4768 spin_lock_irqsave(&h->lock, flags);
4769 clear_bit(i & (BITS_PER_LONG - 1), 4678 clear_bit(i & (BITS_PER_LONG - 1),
4770 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4679 h->cmd_pool_bits + (i / BITS_PER_LONG));
4771 spin_unlock_irqrestore(&h->lock, flags);
4772} 4680}
4773 4681
4774static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4682static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4775{ 4683{
4776 union u64bit temp64;
4777
4778 temp64.val32.lower = c->ErrDesc.Addr.lower;
4779 temp64.val32.upper = c->ErrDesc.Addr.upper;
4780 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4684 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4781 c->err_info, (dma_addr_t) temp64.val); 4685 c->err_info,
4686 (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr));
4782 pci_free_consistent(h->pdev, sizeof(*c), 4687 pci_free_consistent(h->pdev, sizeof(*c),
4783 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4688 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
4784} 4689}
4785 4690
4786#ifdef CONFIG_COMPAT 4691#ifdef CONFIG_COMPAT
4787 4692
4788static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 4693static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4694 void __user *arg)
4789{ 4695{
4790 IOCTL32_Command_struct __user *arg32 = 4696 IOCTL32_Command_struct __user *arg32 =
4791 (IOCTL32_Command_struct __user *) arg; 4697 (IOCTL32_Command_struct __user *) arg;
@@ -4810,7 +4716,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4810 if (err) 4716 if (err)
4811 return -EFAULT; 4717 return -EFAULT;
4812 4718
4813 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 4719 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4814 if (err) 4720 if (err)
4815 return err; 4721 return err;
4816 err |= copy_in_user(&arg32->error_info, &p->error_info, 4722 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -4821,7 +4727,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4821} 4727}
4822 4728
4823static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4729static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4824 int cmd, void *arg) 4730 int cmd, void __user *arg)
4825{ 4731{
4826 BIG_IOCTL32_Command_struct __user *arg32 = 4732 BIG_IOCTL32_Command_struct __user *arg32 =
4827 (BIG_IOCTL32_Command_struct __user *) arg; 4733 (BIG_IOCTL32_Command_struct __user *) arg;
@@ -4848,7 +4754,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4848 if (err) 4754 if (err)
4849 return -EFAULT; 4755 return -EFAULT;
4850 4756
4851 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 4757 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4852 if (err) 4758 if (err)
4853 return err; 4759 return err;
4854 err |= copy_in_user(&arg32->error_info, &p->error_info, 4760 err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -4858,7 +4764,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4858 return err; 4764 return err;
4859} 4765}
4860 4766
4861static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 4767static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4862{ 4768{
4863 switch (cmd) { 4769 switch (cmd) {
4864 case CCISS_GETPCIINFO: 4770 case CCISS_GETPCIINFO:
@@ -4932,7 +4838,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4932 IOCTL_Command_struct iocommand; 4838 IOCTL_Command_struct iocommand;
4933 struct CommandList *c; 4839 struct CommandList *c;
4934 char *buff = NULL; 4840 char *buff = NULL;
4935 union u64bit temp64; 4841 u64 temp64;
4936 int rc = 0; 4842 int rc = 0;
4937 4843
4938 if (!argp) 4844 if (!argp)
@@ -4971,14 +4877,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4971 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4877 c->Header.ReplyQueue = 0; /* unused in simple mode */
4972 if (iocommand.buf_size > 0) { /* buffer to fill */ 4878 if (iocommand.buf_size > 0) { /* buffer to fill */
4973 c->Header.SGList = 1; 4879 c->Header.SGList = 1;
4974 c->Header.SGTotal = 1; 4880 c->Header.SGTotal = cpu_to_le16(1);
4975 } else { /* no buffers to fill */ 4881 } else { /* no buffers to fill */
4976 c->Header.SGList = 0; 4882 c->Header.SGList = 0;
4977 c->Header.SGTotal = 0; 4883 c->Header.SGTotal = cpu_to_le16(0);
4978 } 4884 }
4979 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4885 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4980 /* use the kernel address the cmd block for tag */ 4886 /* use the kernel address the cmd block for tag */
4981 c->Header.Tag.lower = c->busaddr; 4887 c->Header.tag = c->busaddr;
4982 4888
4983 /* Fill in Request block */ 4889 /* Fill in Request block */
4984 memcpy(&c->Request, &iocommand.Request, 4890 memcpy(&c->Request, &iocommand.Request,
@@ -4986,19 +4892,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4986 4892
4987 /* Fill in the scatter gather information */ 4893 /* Fill in the scatter gather information */
4988 if (iocommand.buf_size > 0) { 4894 if (iocommand.buf_size > 0) {
4989 temp64.val = pci_map_single(h->pdev, buff, 4895 temp64 = pci_map_single(h->pdev, buff,
4990 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4896 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4991 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 4897 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4992 c->SG[0].Addr.lower = 0; 4898 c->SG[0].Addr = cpu_to_le64(0);
4993 c->SG[0].Addr.upper = 0; 4899 c->SG[0].Len = cpu_to_le32(0);
4994 c->SG[0].Len = 0;
4995 rc = -ENOMEM; 4900 rc = -ENOMEM;
4996 goto out; 4901 goto out;
4997 } 4902 }
4998 c->SG[0].Addr.lower = temp64.val32.lower; 4903 c->SG[0].Addr = cpu_to_le64(temp64);
4999 c->SG[0].Addr.upper = temp64.val32.upper; 4904 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5000 c->SG[0].Len = iocommand.buf_size; 4905 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5001 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
5002 } 4906 }
5003 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4907 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5004 if (iocommand.buf_size > 0) 4908 if (iocommand.buf_size > 0)
@@ -5033,7 +4937,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5033 struct CommandList *c; 4937 struct CommandList *c;
5034 unsigned char **buff = NULL; 4938 unsigned char **buff = NULL;
5035 int *buff_size = NULL; 4939 int *buff_size = NULL;
5036 union u64bit temp64; 4940 u64 temp64;
5037 BYTE sg_used = 0; 4941 BYTE sg_used = 0;
5038 int status = 0; 4942 int status = 0;
5039 int i; 4943 int i;
@@ -5107,29 +5011,30 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5107 } 5011 }
5108 c->cmd_type = CMD_IOCTL_PEND; 5012 c->cmd_type = CMD_IOCTL_PEND;
5109 c->Header.ReplyQueue = 0; 5013 c->Header.ReplyQueue = 0;
5110 c->Header.SGList = c->Header.SGTotal = sg_used; 5014 c->Header.SGList = (u8) sg_used;
5015 c->Header.SGTotal = cpu_to_le16(sg_used);
5111 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 5016 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5112 c->Header.Tag.lower = c->busaddr; 5017 c->Header.tag = c->busaddr;
5113 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 5018 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5114 if (ioc->buf_size > 0) { 5019 if (ioc->buf_size > 0) {
5115 int i; 5020 int i;
5116 for (i = 0; i < sg_used; i++) { 5021 for (i = 0; i < sg_used; i++) {
5117 temp64.val = pci_map_single(h->pdev, buff[i], 5022 temp64 = pci_map_single(h->pdev, buff[i],
5118 buff_size[i], PCI_DMA_BIDIRECTIONAL); 5023 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5119 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 5024 if (dma_mapping_error(&h->pdev->dev,
5120 c->SG[i].Addr.lower = 0; 5025 (dma_addr_t) temp64)) {
5121 c->SG[i].Addr.upper = 0; 5026 c->SG[i].Addr = cpu_to_le64(0);
5122 c->SG[i].Len = 0; 5027 c->SG[i].Len = cpu_to_le32(0);
5123 hpsa_pci_unmap(h->pdev, c, i, 5028 hpsa_pci_unmap(h->pdev, c, i,
5124 PCI_DMA_BIDIRECTIONAL); 5029 PCI_DMA_BIDIRECTIONAL);
5125 status = -ENOMEM; 5030 status = -ENOMEM;
5126 goto cleanup0; 5031 goto cleanup0;
5127 } 5032 }
5128 c->SG[i].Addr.lower = temp64.val32.lower; 5033 c->SG[i].Addr = cpu_to_le64(temp64);
5129 c->SG[i].Addr.upper = temp64.val32.upper; 5034 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5130 c->SG[i].Len = buff_size[i]; 5035 c->SG[i].Ext = cpu_to_le32(0);
5131 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
5132 } 5036 }
5037 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5133 } 5038 }
5134 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5039 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5135 if (sg_used) 5040 if (sg_used)
@@ -5206,7 +5111,7 @@ static void decrement_passthru_count(struct ctlr_info *h)
5206/* 5111/*
5207 * ioctl 5112 * ioctl
5208 */ 5113 */
5209static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 5114static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5210{ 5115{
5211 struct ctlr_info *h; 5116 struct ctlr_info *h;
5212 void __user *argp = (void __user *)arg; 5117 void __user *argp = (void __user *)arg;
@@ -5268,20 +5173,20 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5268{ 5173{
5269 int pci_dir = XFER_NONE; 5174 int pci_dir = XFER_NONE;
5270 struct CommandList *a; /* for commands to be aborted */ 5175 struct CommandList *a; /* for commands to be aborted */
5176 u32 tupper, tlower;
5271 5177
5272 c->cmd_type = CMD_IOCTL_PEND; 5178 c->cmd_type = CMD_IOCTL_PEND;
5273 c->Header.ReplyQueue = 0; 5179 c->Header.ReplyQueue = 0;
5274 if (buff != NULL && size > 0) { 5180 if (buff != NULL && size > 0) {
5275 c->Header.SGList = 1; 5181 c->Header.SGList = 1;
5276 c->Header.SGTotal = 1; 5182 c->Header.SGTotal = cpu_to_le16(1);
5277 } else { 5183 } else {
5278 c->Header.SGList = 0; 5184 c->Header.SGList = 0;
5279 c->Header.SGTotal = 0; 5185 c->Header.SGTotal = cpu_to_le16(0);
5280 } 5186 }
5281 c->Header.Tag.lower = c->busaddr; 5187 c->Header.tag = c->busaddr;
5282 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5188 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5283 5189
5284 c->Request.Type.Type = cmd_type;
5285 if (cmd_type == TYPE_CMD) { 5190 if (cmd_type == TYPE_CMD) {
5286 switch (cmd) { 5191 switch (cmd) {
5287 case HPSA_INQUIRY: 5192 case HPSA_INQUIRY:
@@ -5291,8 +5196,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5291 c->Request.CDB[2] = (page_code & 0xff); 5196 c->Request.CDB[2] = (page_code & 0xff);
5292 } 5197 }
5293 c->Request.CDBLen = 6; 5198 c->Request.CDBLen = 6;
5294 c->Request.Type.Attribute = ATTR_SIMPLE; 5199 c->Request.type_attr_dir =
5295 c->Request.Type.Direction = XFER_READ; 5200 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5296 c->Request.Timeout = 0; 5201 c->Request.Timeout = 0;
5297 c->Request.CDB[0] = HPSA_INQUIRY; 5202 c->Request.CDB[0] = HPSA_INQUIRY;
5298 c->Request.CDB[4] = size & 0xFF; 5203 c->Request.CDB[4] = size & 0xFF;
@@ -5303,8 +5208,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5303 mode = 00 target = 0. Nothing to write. 5208 mode = 00 target = 0. Nothing to write.
5304 */ 5209 */
5305 c->Request.CDBLen = 12; 5210 c->Request.CDBLen = 12;
5306 c->Request.Type.Attribute = ATTR_SIMPLE; 5211 c->Request.type_attr_dir =
5307 c->Request.Type.Direction = XFER_READ; 5212 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5308 c->Request.Timeout = 0; 5213 c->Request.Timeout = 0;
5309 c->Request.CDB[0] = cmd; 5214 c->Request.CDB[0] = cmd;
5310 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5215 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
@@ -5314,8 +5219,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5314 break; 5219 break;
5315 case HPSA_CACHE_FLUSH: 5220 case HPSA_CACHE_FLUSH:
5316 c->Request.CDBLen = 12; 5221 c->Request.CDBLen = 12;
5317 c->Request.Type.Attribute = ATTR_SIMPLE; 5222 c->Request.type_attr_dir =
5318 c->Request.Type.Direction = XFER_WRITE; 5223 TYPE_ATTR_DIR(cmd_type,
5224 ATTR_SIMPLE, XFER_WRITE);
5319 c->Request.Timeout = 0; 5225 c->Request.Timeout = 0;
5320 c->Request.CDB[0] = BMIC_WRITE; 5226 c->Request.CDB[0] = BMIC_WRITE;
5321 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5227 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
@@ -5324,14 +5230,14 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5324 break; 5230 break;
5325 case TEST_UNIT_READY: 5231 case TEST_UNIT_READY:
5326 c->Request.CDBLen = 6; 5232 c->Request.CDBLen = 6;
5327 c->Request.Type.Attribute = ATTR_SIMPLE; 5233 c->Request.type_attr_dir =
5328 c->Request.Type.Direction = XFER_NONE; 5234 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5329 c->Request.Timeout = 0; 5235 c->Request.Timeout = 0;
5330 break; 5236 break;
5331 case HPSA_GET_RAID_MAP: 5237 case HPSA_GET_RAID_MAP:
5332 c->Request.CDBLen = 12; 5238 c->Request.CDBLen = 12;
5333 c->Request.Type.Attribute = ATTR_SIMPLE; 5239 c->Request.type_attr_dir =
5334 c->Request.Type.Direction = XFER_READ; 5240 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5335 c->Request.Timeout = 0; 5241 c->Request.Timeout = 0;
5336 c->Request.CDB[0] = HPSA_CISS_READ; 5242 c->Request.CDB[0] = HPSA_CISS_READ;
5337 c->Request.CDB[1] = cmd; 5243 c->Request.CDB[1] = cmd;
@@ -5342,8 +5248,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5342 break; 5248 break;
5343 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5249 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5344 c->Request.CDBLen = 10; 5250 c->Request.CDBLen = 10;
5345 c->Request.Type.Attribute = ATTR_SIMPLE; 5251 c->Request.type_attr_dir =
5346 c->Request.Type.Direction = XFER_READ; 5252 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5347 c->Request.Timeout = 0; 5253 c->Request.Timeout = 0;
5348 c->Request.CDB[0] = BMIC_READ; 5254 c->Request.CDB[0] = BMIC_READ;
5349 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5255 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
@@ -5360,9 +5266,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5360 5266
5361 case HPSA_DEVICE_RESET_MSG: 5267 case HPSA_DEVICE_RESET_MSG:
5362 c->Request.CDBLen = 16; 5268 c->Request.CDBLen = 16;
5363 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 5269 c->Request.type_attr_dir =
5364 c->Request.Type.Attribute = ATTR_SIMPLE; 5270 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5365 c->Request.Type.Direction = XFER_NONE;
5366 c->Request.Timeout = 0; /* Don't time out */ 5271 c->Request.Timeout = 0; /* Don't time out */
5367 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5272 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5368 c->Request.CDB[0] = cmd; 5273 c->Request.CDB[0] = cmd;
@@ -5376,27 +5281,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5376 break; 5281 break;
5377 case HPSA_ABORT_MSG: 5282 case HPSA_ABORT_MSG:
5378 a = buff; /* point to command to be aborted */ 5283 a = buff; /* point to command to be aborted */
5379 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", 5284 dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx",
5380 a->Header.Tag.upper, a->Header.Tag.lower, 5285 a->Header.tag, c->Header.tag);
5381 c->Header.Tag.upper, c->Header.Tag.lower); 5286 tlower = (u32) (a->Header.tag >> 32);
5287 tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
5382 c->Request.CDBLen = 16; 5288 c->Request.CDBLen = 16;
5383 c->Request.Type.Type = TYPE_MSG; 5289 c->Request.type_attr_dir =
5384 c->Request.Type.Attribute = ATTR_SIMPLE; 5290 TYPE_ATTR_DIR(cmd_type,
5385 c->Request.Type.Direction = XFER_WRITE; 5291 ATTR_SIMPLE, XFER_WRITE);
5386 c->Request.Timeout = 0; /* Don't time out */ 5292 c->Request.Timeout = 0; /* Don't time out */
5387 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5293 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5388 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5294 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5389 c->Request.CDB[2] = 0x00; /* reserved */ 5295 c->Request.CDB[2] = 0x00; /* reserved */
5390 c->Request.CDB[3] = 0x00; /* reserved */ 5296 c->Request.CDB[3] = 0x00; /* reserved */
5391 /* Tag to abort goes in CDB[4]-CDB[11] */ 5297 /* Tag to abort goes in CDB[4]-CDB[11] */
5392 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; 5298 c->Request.CDB[4] = tlower & 0xFF;
5393 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; 5299 c->Request.CDB[5] = (tlower >> 8) & 0xFF;
5394 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; 5300 c->Request.CDB[6] = (tlower >> 16) & 0xFF;
5395 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; 5301 c->Request.CDB[7] = (tlower >> 24) & 0xFF;
5396 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; 5302 c->Request.CDB[8] = tupper & 0xFF;
5397 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; 5303 c->Request.CDB[9] = (tupper >> 8) & 0xFF;
5398 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; 5304 c->Request.CDB[10] = (tupper >> 16) & 0xFF;
5399 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; 5305 c->Request.CDB[11] = (tupper >> 24) & 0xFF;
5400 c->Request.CDB[12] = 0x00; /* reserved */ 5306 c->Request.CDB[12] = 0x00; /* reserved */
5401 c->Request.CDB[13] = 0x00; /* reserved */ 5307 c->Request.CDB[13] = 0x00; /* reserved */
5402 c->Request.CDB[14] = 0x00; /* reserved */ 5308 c->Request.CDB[14] = 0x00; /* reserved */
@@ -5412,7 +5318,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5412 BUG(); 5318 BUG();
5413 } 5319 }
5414 5320
5415 switch (c->Request.Type.Direction) { 5321 switch (GET_DIR(c->Request.type_attr_dir)) {
5416 case XFER_READ: 5322 case XFER_READ:
5417 pci_dir = PCI_DMA_FROMDEVICE; 5323 pci_dir = PCI_DMA_FROMDEVICE;
5418 break; 5324 break;
@@ -5467,15 +5373,9 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
5467 5373
5468 /* Put job onto the completed Q */ 5374 /* Put job onto the completed Q */
5469 addQ(&h->cmpQ, c); 5375 addQ(&h->cmpQ, c);
5470 5376 atomic_inc(&h->commands_outstanding);
5471 /* Must increment commands_outstanding before unlocking
5472 * and submitting to avoid race checking for fifo full
5473 * condition.
5474 */
5475 h->commands_outstanding++;
5476
5477 /* Tell the controller execute command */
5478 spin_unlock_irqrestore(&h->lock, *flags); 5377 spin_unlock_irqrestore(&h->lock, *flags);
5378 /* Tell the controller execute command */
5479 h->access.submit_command(h, c); 5379 h->access.submit_command(h, c);
5480 spin_lock_irqsave(&h->lock, *flags); 5380 spin_lock_irqsave(&h->lock, *flags);
5481 } 5381 }
@@ -5521,6 +5421,7 @@ static inline void finish_cmd(struct CommandList *c)
5521 unsigned long flags; 5421 unsigned long flags;
5522 int io_may_be_stalled = 0; 5422 int io_may_be_stalled = 0;
5523 struct ctlr_info *h = c->h; 5423 struct ctlr_info *h = c->h;
5424 int count;
5524 5425
5525 spin_lock_irqsave(&h->lock, flags); 5426 spin_lock_irqsave(&h->lock, flags);
5526 removeQ(c); 5427 removeQ(c);
@@ -5541,11 +5442,10 @@ static inline void finish_cmd(struct CommandList *c)
5541 * want to get in a cycle where we call start_io every time 5442 * want to get in a cycle where we call start_io every time
5542 * through here. 5443 * through here.
5543 */ 5444 */
5544 if (unlikely(h->fifo_recently_full) && 5445 count = atomic_read(&h->commands_outstanding);
5545 h->commands_outstanding < 5)
5546 io_may_be_stalled = 1;
5547
5548 spin_unlock_irqrestore(&h->lock, flags); 5446 spin_unlock_irqrestore(&h->lock, flags);
5447 if (unlikely(h->fifo_recently_full) && count < 5)
5448 io_may_be_stalled = 1;
5549 5449
5550 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5450 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5551 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5451 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
@@ -5765,22 +5665,20 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5765 5665
5766 cmd->CommandHeader.ReplyQueue = 0; 5666 cmd->CommandHeader.ReplyQueue = 0;
5767 cmd->CommandHeader.SGList = 0; 5667 cmd->CommandHeader.SGList = 0;
5768 cmd->CommandHeader.SGTotal = 0; 5668 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5769 cmd->CommandHeader.Tag.lower = paddr32; 5669 cmd->CommandHeader.tag = paddr32;
5770 cmd->CommandHeader.Tag.upper = 0;
5771 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5670 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5772 5671
5773 cmd->Request.CDBLen = 16; 5672 cmd->Request.CDBLen = 16;
5774 cmd->Request.Type.Type = TYPE_MSG; 5673 cmd->Request.type_attr_dir =
5775 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 5674 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5776 cmd->Request.Type.Direction = XFER_NONE;
5777 cmd->Request.Timeout = 0; /* Don't time out */ 5675 cmd->Request.Timeout = 0; /* Don't time out */
5778 cmd->Request.CDB[0] = opcode; 5676 cmd->Request.CDB[0] = opcode;
5779 cmd->Request.CDB[1] = type; 5677 cmd->Request.CDB[1] = type;
5780 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5678 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5781 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 5679 cmd->ErrorDescriptor.Addr =
5782 cmd->ErrorDescriptor.Addr.upper = 0; 5680 cpu_to_le64((paddr32 + sizeof(*cmd)));
5783 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 5681 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5784 5682
5785 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5683 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5786 5684
@@ -5818,7 +5716,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5818#define hpsa_noop(p) hpsa_message(p, 3, 0) 5716#define hpsa_noop(p) hpsa_message(p, 3, 0)
5819 5717
5820static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5718static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5821 void * __iomem vaddr, u32 use_doorbell) 5719 void __iomem *vaddr, u32 use_doorbell)
5822{ 5720{
5823 u16 pmcsr; 5721 u16 pmcsr;
5824 int pos; 5722 int pos;
@@ -6056,7 +5954,7 @@ unmap_vaddr:
6056 * the io functions. 5954 * the io functions.
6057 * This is for debug only. 5955 * This is for debug only.
6058 */ 5956 */
6059static void print_cfg_table(struct device *dev, struct CfgTable *tb) 5957static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6060{ 5958{
6061#ifdef HPSA_DEBUG 5959#ifdef HPSA_DEBUG
6062 int i; 5960 int i;
@@ -6323,11 +6221,11 @@ static void hpsa_find_board_params(struct ctlr_info *h)
6323 h->max_cmd_sg_entries = 31; 6221 h->max_cmd_sg_entries = 31;
6324 if (h->maxsgentries > 512) { 6222 if (h->maxsgentries > 512) {
6325 h->max_cmd_sg_entries = 32; 6223 h->max_cmd_sg_entries = 32;
6326 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 6224 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6327 h->maxsgentries--; /* save one for chain pointer */ 6225 h->maxsgentries--; /* save one for chain pointer */
6328 } else { 6226 } else {
6329 h->maxsgentries = 31; /* default to traditional values */
6330 h->chainsize = 0; 6227 h->chainsize = 0;
6228 h->maxsgentries = 31; /* default to traditional values */
6331 } 6229 }
6332 6230
6333 /* Find out what task management functions are supported and cache */ 6231 /* Find out what task management functions are supported and cache */
@@ -6456,15 +6354,15 @@ static int hpsa_pci_init(struct ctlr_info *h)
6456 return err; 6354 return err;
6457 } 6355 }
6458 6356
6459 /* Enable bus mastering (pci_disable_device may disable this) */
6460 pci_set_master(h->pdev);
6461
6462 err = pci_request_regions(h->pdev, HPSA); 6357 err = pci_request_regions(h->pdev, HPSA);
6463 if (err) { 6358 if (err) {
6464 dev_err(&h->pdev->dev, 6359 dev_err(&h->pdev->dev,
6465 "cannot obtain PCI resources, aborting\n"); 6360 "cannot obtain PCI resources, aborting\n");
6466 return err; 6361 return err;
6467 } 6362 }
6363
6364 pci_set_master(h->pdev);
6365
6468 hpsa_interrupt_mode(h); 6366 hpsa_interrupt_mode(h);
6469 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6367 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6470 if (err) 6368 if (err)
@@ -6544,7 +6442,9 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6544 dev_warn(&pdev->dev, "failed to enable device.\n"); 6442 dev_warn(&pdev->dev, "failed to enable device.\n");
6545 return -ENODEV; 6443 return -ENODEV;
6546 } 6444 }
6445
6547 pci_set_master(pdev); 6446 pci_set_master(pdev);
6447
6548 /* Reset the controller with a PCI power-cycle or via doorbell */ 6448 /* Reset the controller with a PCI power-cycle or via doorbell */
6549 rc = hpsa_kdump_hard_reset_controller(pdev); 6449 rc = hpsa_kdump_hard_reset_controller(pdev);
6550 6450
@@ -7431,13 +7331,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7431 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 7331 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7432 cp->timeout_sec = 0; 7332 cp->timeout_sec = 0;
7433 cp->ReplyQueue = 0; 7333 cp->ReplyQueue = 0;
7434 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | 7334 cp->tag =
7435 DIRECT_LOOKUP_BIT; 7335 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) |
7436 cp->Tag.upper = 0; 7336 DIRECT_LOOKUP_BIT);
7437 cp->host_addr.lower = 7337 cp->host_addr =
7438 (u32) (h->ioaccel_cmd_pool_dhandle + 7338 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7439 (i * sizeof(struct io_accel1_cmd))); 7339 (i * sizeof(struct io_accel1_cmd)));
7440 cp->host_addr.upper = 0;
7441 } 7340 }
7442 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7341 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7443 u64 cfg_offset, cfg_base_addr_index; 7342 u64 cfg_offset, cfg_base_addr_index;
@@ -7711,7 +7610,7 @@ static void __attribute__((unused)) verify_offsets(void)
7711 VERIFY_OFFSET(timeout_sec, 0x62); 7610 VERIFY_OFFSET(timeout_sec, 0x62);
7712 VERIFY_OFFSET(ReplyQueue, 0x64); 7611 VERIFY_OFFSET(ReplyQueue, 0x64);
7713 VERIFY_OFFSET(reserved9, 0x65); 7612 VERIFY_OFFSET(reserved9, 0x65);
7714 VERIFY_OFFSET(Tag, 0x68); 7613 VERIFY_OFFSET(tag, 0x68);
7715 VERIFY_OFFSET(host_addr, 0x70); 7614 VERIFY_OFFSET(host_addr, 0x70);
7716 VERIFY_OFFSET(CISS_LUN, 0x78); 7615 VERIFY_OFFSET(CISS_LUN, 0x78);
7717 VERIFY_OFFSET(SG, 0x78 + 8); 7616 VERIFY_OFFSET(SG, 0x78 + 8);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 24472cec7de3..8e06d9e280ec 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -118,7 +118,7 @@ struct ctlr_info {
118 struct CfgTable __iomem *cfgtable; 118 struct CfgTable __iomem *cfgtable;
119 int interrupts_enabled; 119 int interrupts_enabled;
120 int max_commands; 120 int max_commands;
121 int commands_outstanding; 121 atomic_t commands_outstanding;
122# define PERF_MODE_INT 0 122# define PERF_MODE_INT 0
123# define DOORBELL_INT 1 123# define DOORBELL_INT 1
124# define SIMPLE_MODE_INT 2 124# define SIMPLE_MODE_INT 2
@@ -164,7 +164,7 @@ struct ctlr_info {
164 */ 164 */
165 u32 trans_support; 165 u32 trans_support;
166 u32 trans_offset; 166 u32 trans_offset;
167 struct TransTable_struct *transtable; 167 struct TransTable_struct __iomem *transtable;
168 unsigned long transMethod; 168 unsigned long transMethod;
169 169
170 /* cap concurrent passthrus at some reasonable maximum */ 170 /* cap concurrent passthrus at some reasonable maximum */
@@ -181,7 +181,7 @@ struct ctlr_info {
181 u32 *blockFetchTable; 181 u32 *blockFetchTable;
182 u32 *ioaccel1_blockFetchTable; 182 u32 *ioaccel1_blockFetchTable;
183 u32 *ioaccel2_blockFetchTable; 183 u32 *ioaccel2_blockFetchTable;
184 u32 *ioaccel2_bft2_regs; 184 u32 __iomem *ioaccel2_bft2_regs;
185 unsigned char *hba_inquiry_data; 185 unsigned char *hba_inquiry_data;
186 u32 driver_support; 186 u32 driver_support;
187 u32 fw_support; 187 u32 fw_support;
@@ -192,7 +192,7 @@ struct ctlr_info {
192 u64 last_heartbeat_timestamp; 192 u64 last_heartbeat_timestamp;
193 u32 heartbeat_sample_interval; 193 u32 heartbeat_sample_interval;
194 atomic_t firmware_flash_in_progress; 194 atomic_t firmware_flash_in_progress;
195 u32 *lockup_detected; 195 u32 __percpu *lockup_detected;
196 struct delayed_work monitor_ctlr_work; 196 struct delayed_work monitor_ctlr_work;
197 int remove_in_progress; 197 int remove_in_progress;
198 u32 fifo_recently_full; 198 u32 fifo_recently_full;
@@ -395,7 +395,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
396{ 396{
397 struct reply_queue_buffer *rq = &h->reply_queue[q]; 397 struct reply_queue_buffer *rq = &h->reply_queue[q];
398 unsigned long flags, register_value = FIFO_EMPTY; 398 unsigned long register_value = FIFO_EMPTY;
399 399
400 /* msi auto clears the interrupt pending bit. */ 400 /* msi auto clears the interrupt pending bit. */
401 if (!(h->msi_vector || h->msix_vector)) { 401 if (!(h->msi_vector || h->msix_vector)) {
@@ -413,9 +413,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
414 register_value = rq->head[rq->current_entry]; 414 register_value = rq->head[rq->current_entry];
415 rq->current_entry++; 415 rq->current_entry++;
416 spin_lock_irqsave(&h->lock, flags); 416 atomic_dec(&h->commands_outstanding);
417 h->commands_outstanding--;
418 spin_unlock_irqrestore(&h->lock, flags);
419 } else { 417 } else {
420 register_value = FIFO_EMPTY; 418 register_value = FIFO_EMPTY;
421 } 419 }
@@ -433,11 +431,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
433 */ 431 */
434static unsigned long SA5_fifo_full(struct ctlr_info *h) 432static unsigned long SA5_fifo_full(struct ctlr_info *h)
435{ 433{
436 if (h->commands_outstanding >= h->max_commands) 434 return atomic_read(&h->commands_outstanding) >= h->max_commands;
437 return 1;
438 else
439 return 0;
440
441} 435}
442/* 436/*
443 * returns value read from hardware. 437 * returns value read from hardware.
@@ -448,13 +442,9 @@ static unsigned long SA5_completed(struct ctlr_info *h,
448{ 442{
449 unsigned long register_value 443 unsigned long register_value
450 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 444 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
451 unsigned long flags;
452 445
453 if (register_value != FIFO_EMPTY) { 446 if (register_value != FIFO_EMPTY)
454 spin_lock_irqsave(&h->lock, flags); 447 atomic_dec(&h->commands_outstanding);
455 h->commands_outstanding--;
456 spin_unlock_irqrestore(&h->lock, flags);
457 }
458 448
459#ifdef HPSA_DEBUG 449#ifdef HPSA_DEBUG
460 if (register_value != FIFO_EMPTY) 450 if (register_value != FIFO_EMPTY)
@@ -510,7 +500,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
510{ 500{
511 u64 register_value; 501 u64 register_value;
512 struct reply_queue_buffer *rq = &h->reply_queue[q]; 502 struct reply_queue_buffer *rq = &h->reply_queue[q];
513 unsigned long flags;
514 503
515 BUG_ON(q >= h->nreply_queues); 504 BUG_ON(q >= h->nreply_queues);
516 505
@@ -528,9 +517,7 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
528 wmb(); 517 wmb();
529 writel((q << 24) | rq->current_entry, h->vaddr + 518 writel((q << 24) | rq->current_entry, h->vaddr +
530 IOACCEL_MODE1_CONSUMER_INDEX); 519 IOACCEL_MODE1_CONSUMER_INDEX);
531 spin_lock_irqsave(&h->lock, flags); 520 atomic_dec(&h->commands_outstanding);
532 h->commands_outstanding--;
533 spin_unlock_irqrestore(&h->lock, flags);
534 } 521 }
535 return (unsigned long) register_value; 522 return (unsigned long) register_value;
536} 523}
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index b5125dc31439..cb988c41cad9 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -252,7 +252,7 @@ struct ReportExtendedLUNdata {
252 u8 LUNListLength[4]; 252 u8 LUNListLength[4];
253 u8 extended_response_flag; 253 u8 extended_response_flag;
254 u8 reserved[3]; 254 u8 reserved[3];
255 struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; 255 struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
256}; 256};
257 257
258struct SenseSubsystem_info { 258struct SenseSubsystem_info {
@@ -314,28 +314,36 @@ struct CommandListHeader {
314 u8 ReplyQueue; 314 u8 ReplyQueue;
315 u8 SGList; 315 u8 SGList;
316 u16 SGTotal; 316 u16 SGTotal;
317 struct vals32 Tag; 317 u64 tag;
318 union LUNAddr LUN; 318 union LUNAddr LUN;
319}; 319};
320 320
321struct RequestBlock { 321struct RequestBlock {
322 u8 CDBLen; 322 u8 CDBLen;
323 struct { 323 /*
324 u8 Type:3; 324 * type_attr_dir:
325 u8 Attribute:3; 325 * type: low 3 bits
326 u8 Direction:2; 326 * attr: middle 3 bits
327 } Type; 327 * dir: high 2 bits
328 */
329 u8 type_attr_dir;
330#define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\
331 (((a) & 0x07) << 3) |\
332 ((t) & 0x07))
333#define GET_TYPE(tad) ((tad) & 0x07)
334#define GET_ATTR(tad) (((tad) >> 3) & 0x07)
335#define GET_DIR(tad) (((tad) >> 6) & 0x03)
328 u16 Timeout; 336 u16 Timeout;
329 u8 CDB[16]; 337 u8 CDB[16];
330}; 338};
331 339
332struct ErrDescriptor { 340struct ErrDescriptor {
333 struct vals32 Addr; 341 u64 Addr;
334 u32 Len; 342 u32 Len;
335}; 343};
336 344
337struct SGDescriptor { 345struct SGDescriptor {
338 struct vals32 Addr; 346 u64 Addr;
339 u32 Len; 347 u32 Len;
340 u32 Ext; 348 u32 Ext;
341}; 349};
@@ -434,8 +442,8 @@ struct io_accel1_cmd {
434 u16 timeout_sec; /* 0x62 - 0x63 */ 442 u16 timeout_sec; /* 0x62 - 0x63 */
435 u8 ReplyQueue; /* 0x64 */ 443 u8 ReplyQueue; /* 0x64 */
436 u8 reserved9[3]; /* 0x65 - 0x67 */ 444 u8 reserved9[3]; /* 0x65 - 0x67 */
437 struct vals32 Tag; /* 0x68 - 0x6F */ 445 u64 tag; /* 0x68 - 0x6F */
438 struct vals32 host_addr; /* 0x70 - 0x77 */ 446 u64 host_addr; /* 0x70 - 0x77 */
439 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 447 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
440 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 448 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
441} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 449} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -555,8 +563,8 @@ struct hpsa_tmf_struct {
555 u8 reserved1; /* byte 3 Reserved */ 563 u8 reserved1; /* byte 3 Reserved */
556 u32 it_nexus; /* SCSI I-T Nexus */ 564 u32 it_nexus; /* SCSI I-T Nexus */
557 u8 lun_id[8]; /* LUN ID for TMF request */ 565 u8 lun_id[8]; /* LUN ID for TMF request */
558 struct vals32 Tag; /* cciss tag associated w/ request */ 566 u64 tag; /* cciss tag associated w/ request */
559 struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */ 567 u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */
560 u64 error_ptr; /* Error Pointer */ 568 u64 error_ptr; /* Error Pointer */
561 u32 error_len; /* Error Length */ 569 u32 error_len; /* Error Length */
562}; 570};
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index dedb62c21b29..e995218476ed 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1118,17 +1118,13 @@ static int hptiop_reset(struct scsi_cmnd *scp)
1118} 1118}
1119 1119
1120static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 1120static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
1121 int queue_depth, int reason) 1121 int queue_depth)
1122{ 1122{
1123 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; 1123 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
1124 1124
1125 if (reason != SCSI_QDEPTH_DEFAULT)
1126 return -EOPNOTSUPP;
1127
1128 if (queue_depth > hba->max_requests) 1125 if (queue_depth > hba->max_requests)
1129 queue_depth = hba->max_requests; 1126 queue_depth = hba->max_requests;
1130 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 1127 return scsi_change_queue_depth(sdev, queue_depth);
1131 return queue_depth;
1132} 1128}
1133 1129
1134static ssize_t hptiop_show_version(struct device *dev, 1130static ssize_t hptiop_show_version(struct device *dev,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 598c42cba5a8..f58c6d8e0264 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1643,19 +1643,9 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1643 int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); 1643 int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1644 memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); 1644 memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1645 1645
1646 if (scsi_populate_tag_msg(cmnd, tag)) { 1646 if (cmnd->flags & SCMD_TAGGED) {
1647 vfc_cmd->task_tag = cpu_to_be64(tag[1]); 1647 vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1648 switch (tag[0]) { 1648 vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1649 case MSG_SIMPLE_TAG:
1650 vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1651 break;
1652 case MSG_HEAD_TAG:
1653 vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1654 break;
1655 case MSG_ORDERED_TAG:
1656 vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1657 break;
1658 };
1659 } 1649 }
1660 1650
1661 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) 1651 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
@@ -2897,12 +2887,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2897 spin_lock_irqsave(shost->host_lock, flags); 2887 spin_lock_irqsave(shost->host_lock, flags);
2898 if (sdev->type == TYPE_DISK) 2888 if (sdev->type == TYPE_DISK)
2899 sdev->allow_restart = 1; 2889 sdev->allow_restart = 1;
2900
2901 if (sdev->tagged_supported) {
2902 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2903 scsi_activate_tcq(sdev, sdev->queue_depth);
2904 } else
2905 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2906 spin_unlock_irqrestore(shost->host_lock, flags); 2890 spin_unlock_irqrestore(shost->host_lock, flags);
2907 return 0; 2891 return 0;
2908} 2892}
@@ -2916,40 +2900,12 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
2916 * Return value: 2900 * Return value:
2917 * actual depth set 2901 * actual depth set
2918 **/ 2902 **/
2919static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, 2903static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2920 int reason)
2921{ 2904{
2922 if (reason != SCSI_QDEPTH_DEFAULT)
2923 return -EOPNOTSUPP;
2924
2925 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) 2905 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2926 qdepth = IBMVFC_MAX_CMDS_PER_LUN; 2906 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2927 2907
2928 scsi_adjust_queue_depth(sdev, 0, qdepth); 2908 return scsi_change_queue_depth(sdev, qdepth);
2929 return sdev->queue_depth;
2930}
2931
2932/**
2933 * ibmvfc_change_queue_type - Change the device's queue type
2934 * @sdev: scsi device struct
2935 * @tag_type: type of tags to use
2936 *
2937 * Return value:
2938 * actual queue type set
2939 **/
2940static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2941{
2942 if (sdev->tagged_supported) {
2943 scsi_set_tag_type(sdev, tag_type);
2944
2945 if (tag_type)
2946 scsi_activate_tcq(sdev, sdev->queue_depth);
2947 else
2948 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2949 } else
2950 tag_type = 0;
2951
2952 return tag_type;
2953} 2909}
2954 2910
2955static ssize_t ibmvfc_show_host_partition_name(struct device *dev, 2911static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
@@ -3133,7 +3089,7 @@ static struct scsi_host_template driver_template = {
3133 .target_alloc = ibmvfc_target_alloc, 3089 .target_alloc = ibmvfc_target_alloc,
3134 .scan_finished = ibmvfc_scan_finished, 3090 .scan_finished = ibmvfc_scan_finished,
3135 .change_queue_depth = ibmvfc_change_queue_depth, 3091 .change_queue_depth = ibmvfc_change_queue_depth,
3136 .change_queue_type = ibmvfc_change_queue_type, 3092 .change_queue_type = scsi_change_queue_type,
3137 .cmd_per_lun = 16, 3093 .cmd_per_lun = 16,
3138 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, 3094 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3139 .this_id = -1, 3095 .this_id = -1,
@@ -3141,6 +3097,8 @@ static struct scsi_host_template driver_template = {
3141 .max_sectors = IBMVFC_MAX_SECTORS, 3097 .max_sectors = IBMVFC_MAX_SECTORS,
3142 .use_clustering = ENABLE_CLUSTERING, 3098 .use_clustering = ENABLE_CLUSTERING,
3143 .shost_attrs = ibmvfc_attrs, 3099 .shost_attrs = ibmvfc_attrs,
3100 .use_blk_tags = 1,
3101 .track_queue_depth = 1,
3144}; 3102};
3145 3103
3146/** 3104/**
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b23f21f22f1..acea5d6eebd0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1929,7 +1929,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1929 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); 1929 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1930 } 1930 }
1931 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1931 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1932 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1933 return 0; 1932 return 0;
1934} 1933}
1935 1934
@@ -1942,17 +1941,11 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1942 * Return value: 1941 * Return value:
1943 * actual depth set 1942 * actual depth set
1944 **/ 1943 **/
1945static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth, 1944static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1946 int reason)
1947{ 1945{
1948 if (reason != SCSI_QDEPTH_DEFAULT)
1949 return -EOPNOTSUPP;
1950
1951 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) 1946 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1952 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; 1947 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1953 1948 return scsi_change_queue_depth(sdev, qdepth);
1954 scsi_adjust_queue_depth(sdev, 0, qdepth);
1955 return sdev->queue_depth;
1956} 1949}
1957 1950
1958/* ------------------------------------------------------------ 1951/* ------------------------------------------------------------
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2a9578c116b7..540294389355 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3942,8 +3942,9 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3942 return -EIO; 3942 return -EIO;
3943 } 3943 }
3944 3944
3945 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, 3945 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3946 sglist->num_sg, DMA_TO_DEVICE); 3946 sglist->scatterlist, sglist->num_sg,
3947 DMA_TO_DEVICE);
3947 3948
3948 if (!sglist->num_dma_sg) { 3949 if (!sglist->num_dma_sg) {
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4327,16 +4328,12 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4327 * Return value: 4328 * Return value:
4328 * actual depth set 4329 * actual depth set
4329 **/ 4330 **/
4330static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, 4331static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4331 int reason)
4332{ 4332{
4333 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4333 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334 struct ipr_resource_entry *res; 4334 struct ipr_resource_entry *res;
4335 unsigned long lock_flags = 0; 4335 unsigned long lock_flags = 0;
4336 4336
4337 if (reason != SCSI_QDEPTH_DEFAULT)
4338 return -EOPNOTSUPP;
4339
4340 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4337 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341 res = (struct ipr_resource_entry *)sdev->hostdata; 4338 res = (struct ipr_resource_entry *)sdev->hostdata;
4342 4339
@@ -4344,7 +4341,7 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4344 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4341 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346 4343
4347 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 4344 scsi_change_queue_depth(sdev, qdepth);
4348 return sdev->queue_depth; 4345 return sdev->queue_depth;
4349} 4346}
4350 4347
@@ -4364,24 +4361,10 @@ static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4364 4361
4365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4366 res = (struct ipr_resource_entry *)sdev->hostdata; 4363 res = (struct ipr_resource_entry *)sdev->hostdata;
4367 4364 if (res && ipr_is_gscsi(res))
4368 if (res) { 4365 tag_type = scsi_change_queue_type(sdev, tag_type);
4369 if (ipr_is_gscsi(res) && sdev->tagged_supported) { 4366 else
4370 /*
4371 * We don't bother quiescing the device here since the
4372 * adapter firmware does it for us.
4373 */
4374 scsi_set_tag_type(sdev, tag_type);
4375
4376 if (tag_type)
4377 scsi_activate_tcq(sdev, sdev->queue_depth);
4378 else
4379 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4380 } else
4381 tag_type = 0;
4382 } else
4383 tag_type = 0; 4367 tag_type = 0;
4384
4385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4386 return tag_type; 4369 return tag_type;
4387} 4370}
@@ -4765,10 +4748,10 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4766 4749
4767 if (ap) { 4750 if (ap) {
4768 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN); 4751 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4769 ata_sas_slave_configure(sdev, ap); 4752 ata_sas_slave_configure(sdev, ap);
4770 } else 4753 }
4771 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4754
4772 if (ioa_cfg->sis64) 4755 if (ioa_cfg->sis64)
4773 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4756 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4774 ipr_format_res_path(ioa_cfg, 4757 ipr_format_res_path(ioa_cfg,
@@ -5585,7 +5568,7 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5585 nseg = scsi_dma_map(scsi_cmd); 5568 nseg = scsi_dma_map(scsi_cmd);
5586 if (nseg < 0) { 5569 if (nseg < 0) {
5587 if (printk_ratelimit()) 5570 if (printk_ratelimit())
5588 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5571 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5589 return -1; 5572 return -1;
5590 } 5573 }
5591 5574
@@ -5636,7 +5619,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5636 5619
5637 nseg = scsi_dma_map(scsi_cmd); 5620 nseg = scsi_dma_map(scsi_cmd);
5638 if (nseg < 0) { 5621 if (nseg < 0) {
5639 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 5622 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5640 return -1; 5623 return -1;
5641 } 5624 }
5642 5625
@@ -5673,35 +5656,6 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5673} 5656}
5674 5657
5675/** 5658/**
5676 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5677 * @scsi_cmd: scsi command struct
5678 *
5679 * Return value:
5680 * task attributes
5681 **/
5682static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5683{
5684 u8 tag[2];
5685 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5686
5687 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5688 switch (tag[0]) {
5689 case MSG_SIMPLE_TAG:
5690 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5691 break;
5692 case MSG_HEAD_TAG:
5693 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5694 break;
5695 case MSG_ORDERED_TAG:
5696 rc = IPR_FLAGS_LO_ORDERED_TASK;
5697 break;
5698 };
5699 }
5700
5701 return rc;
5702}
5703
5704/**
5705 * ipr_erp_done - Process completion of ERP for a device 5659 * ipr_erp_done - Process completion of ERP for a device
5706 * @ipr_cmd: ipr command struct 5660 * @ipr_cmd: ipr command struct
5707 * 5661 *
@@ -6236,7 +6190,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6236 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6190 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6237 } 6191 }
6238 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6192 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6239 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); 6193 if (scsi_cmd->flags & SCMD_TAGGED)
6194 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6195 else
6196 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6240 } 6197 }
6241 6198
6242 if (scsi_cmd->cmnd[0] >= 0xC0 && 6199 if (scsi_cmd->cmnd[0] >= 0xC0 &&
@@ -6357,6 +6314,7 @@ static struct scsi_host_template driver_template = {
6357 .sdev_attrs = ipr_dev_attrs, 6314 .sdev_attrs = ipr_dev_attrs,
6358 .proc_name = IPR_NAME, 6315 .proc_name = IPR_NAME,
6359 .no_write_same = 1, 6316 .no_write_same = 1,
6317 .use_blk_tags = 1,
6360}; 6318};
6361 6319
6362/** 6320/**
@@ -8431,7 +8389,7 @@ static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8389 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8390 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8433 8391
8434 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, 8392 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8435 sglist->num_sg, DMA_TO_DEVICE); 8393 sglist->num_sg, DMA_TO_DEVICE);
8436 8394
8437 ipr_cmd->job_step = ipr_reset_alert; 8395 ipr_cmd->job_step = ipr_reset_alert;
@@ -8871,7 +8829,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8871 8829
8872 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8830 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8873 if (ioa_cfg->ipr_cmnd_list[i]) 8831 if (ioa_cfg->ipr_cmnd_list[i])
8874 pci_pool_free(ioa_cfg->ipr_cmd_pool, 8832 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8875 ioa_cfg->ipr_cmnd_list[i], 8833 ioa_cfg->ipr_cmnd_list[i],
8876 ioa_cfg->ipr_cmnd_list_dma[i]); 8834 ioa_cfg->ipr_cmnd_list_dma[i]);
8877 8835
@@ -8879,7 +8837,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8879 } 8837 }
8880 8838
8881 if (ioa_cfg->ipr_cmd_pool) 8839 if (ioa_cfg->ipr_cmd_pool)
8882 pci_pool_destroy(ioa_cfg->ipr_cmd_pool); 8840 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8883 8841
8884 kfree(ioa_cfg->ipr_cmnd_list); 8842 kfree(ioa_cfg->ipr_cmnd_list);
8885 kfree(ioa_cfg->ipr_cmnd_list_dma); 8843 kfree(ioa_cfg->ipr_cmnd_list_dma);
@@ -8900,25 +8858,24 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8900 int i; 8858 int i;
8901 8859
8902 kfree(ioa_cfg->res_entries); 8860 kfree(ioa_cfg->res_entries);
8903 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), 8861 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8904 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8862 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8905 ipr_free_cmd_blks(ioa_cfg); 8863 ipr_free_cmd_blks(ioa_cfg);
8906 8864
8907 for (i = 0; i < ioa_cfg->hrrq_num; i++) 8865 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8908 pci_free_consistent(ioa_cfg->pdev, 8866 dma_free_coherent(&ioa_cfg->pdev->dev,
8909 sizeof(u32) * ioa_cfg->hrrq[i].size, 8867 sizeof(u32) * ioa_cfg->hrrq[i].size,
8910 ioa_cfg->hrrq[i].host_rrq, 8868 ioa_cfg->hrrq[i].host_rrq,
8911 ioa_cfg->hrrq[i].host_rrq_dma); 8869 ioa_cfg->hrrq[i].host_rrq_dma);
8912 8870
8913 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 8871 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8914 ioa_cfg->u.cfg_table, 8872 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8915 ioa_cfg->cfg_table_dma);
8916 8873
8917 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8874 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8918 pci_free_consistent(ioa_cfg->pdev, 8875 dma_free_coherent(&ioa_cfg->pdev->dev,
8919 sizeof(struct ipr_hostrcb), 8876 sizeof(struct ipr_hostrcb),
8920 ioa_cfg->hostrcb[i], 8877 ioa_cfg->hostrcb[i],
8921 ioa_cfg->hostrcb_dma[i]); 8878 ioa_cfg->hostrcb_dma[i]);
8922 } 8879 }
8923 8880
8924 ipr_free_dump(ioa_cfg); 8881 ipr_free_dump(ioa_cfg);
@@ -8979,7 +8936,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8979 dma_addr_t dma_addr; 8936 dma_addr_t dma_addr;
8980 int i, entries_each_hrrq, hrrq_id = 0; 8937 int i, entries_each_hrrq, hrrq_id = 0;
8981 8938
8982 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, 8939 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8983 sizeof(struct ipr_cmnd), 512, 0); 8940 sizeof(struct ipr_cmnd), 512, 0);
8984 8941
8985 if (!ioa_cfg->ipr_cmd_pool) 8942 if (!ioa_cfg->ipr_cmd_pool)
@@ -9029,7 +8986,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9029 } 8986 }
9030 8987
9031 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8988 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9032 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8989 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9033 8990
9034 if (!ipr_cmd) { 8991 if (!ipr_cmd) {
9035 ipr_free_cmd_blks(ioa_cfg); 8992 ipr_free_cmd_blks(ioa_cfg);
@@ -9100,9 +9057,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9100 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9057 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9101 } 9058 }
9102 9059
9103 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 9060 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9104 sizeof(struct ipr_misc_cbs), 9061 sizeof(struct ipr_misc_cbs),
9105 &ioa_cfg->vpd_cbs_dma); 9062 &ioa_cfg->vpd_cbs_dma,
9063 GFP_KERNEL);
9106 9064
9107 if (!ioa_cfg->vpd_cbs) 9065 if (!ioa_cfg->vpd_cbs)
9108 goto out_free_res_entries; 9066 goto out_free_res_entries;
@@ -9111,13 +9069,14 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9111 goto out_free_vpd_cbs; 9069 goto out_free_vpd_cbs;
9112 9070
9113 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9071 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9114 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev, 9072 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9115 sizeof(u32) * ioa_cfg->hrrq[i].size, 9073 sizeof(u32) * ioa_cfg->hrrq[i].size,
9116 &ioa_cfg->hrrq[i].host_rrq_dma); 9074 &ioa_cfg->hrrq[i].host_rrq_dma,
9075 GFP_KERNEL);
9117 9076
9118 if (!ioa_cfg->hrrq[i].host_rrq) { 9077 if (!ioa_cfg->hrrq[i].host_rrq) {
9119 while (--i > 0) 9078 while (--i > 0)
9120 pci_free_consistent(pdev, 9079 dma_free_coherent(&pdev->dev,
9121 sizeof(u32) * ioa_cfg->hrrq[i].size, 9080 sizeof(u32) * ioa_cfg->hrrq[i].size,
9122 ioa_cfg->hrrq[i].host_rrq, 9081 ioa_cfg->hrrq[i].host_rrq,
9123 ioa_cfg->hrrq[i].host_rrq_dma); 9082 ioa_cfg->hrrq[i].host_rrq_dma);
@@ -9126,17 +9085,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9126 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9085 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9127 } 9086 }
9128 9087
9129 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 9088 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9130 ioa_cfg->cfg_table_size, 9089 ioa_cfg->cfg_table_size,
9131 &ioa_cfg->cfg_table_dma); 9090 &ioa_cfg->cfg_table_dma,
9091 GFP_KERNEL);
9132 9092
9133 if (!ioa_cfg->u.cfg_table) 9093 if (!ioa_cfg->u.cfg_table)
9134 goto out_free_host_rrq; 9094 goto out_free_host_rrq;
9135 9095
9136 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9096 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9137 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, 9097 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9138 sizeof(struct ipr_hostrcb), 9098 sizeof(struct ipr_hostrcb),
9139 &ioa_cfg->hostrcb_dma[i]); 9099 &ioa_cfg->hostrcb_dma[i],
9100 GFP_KERNEL);
9140 9101
9141 if (!ioa_cfg->hostrcb[i]) 9102 if (!ioa_cfg->hostrcb[i])
9142 goto out_free_hostrcb_dma; 9103 goto out_free_hostrcb_dma;
@@ -9160,25 +9121,24 @@ out:
9160 9121
9161out_free_hostrcb_dma: 9122out_free_hostrcb_dma:
9162 while (i-- > 0) { 9123 while (i-- > 0) {
9163 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb), 9124 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9164 ioa_cfg->hostrcb[i], 9125 ioa_cfg->hostrcb[i],
9165 ioa_cfg->hostrcb_dma[i]); 9126 ioa_cfg->hostrcb_dma[i]);
9166 } 9127 }
9167 pci_free_consistent(pdev, ioa_cfg->cfg_table_size, 9128 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9168 ioa_cfg->u.cfg_table, 9129 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9169 ioa_cfg->cfg_table_dma);
9170out_free_host_rrq: 9130out_free_host_rrq:
9171 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9131 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9172 pci_free_consistent(pdev, 9132 dma_free_coherent(&pdev->dev,
9173 sizeof(u32) * ioa_cfg->hrrq[i].size, 9133 sizeof(u32) * ioa_cfg->hrrq[i].size,
9174 ioa_cfg->hrrq[i].host_rrq, 9134 ioa_cfg->hrrq[i].host_rrq,
9175 ioa_cfg->hrrq[i].host_rrq_dma); 9135 ioa_cfg->hrrq[i].host_rrq_dma);
9176 } 9136 }
9177out_ipr_free_cmd_blocks: 9137out_ipr_free_cmd_blocks:
9178 ipr_free_cmd_blks(ioa_cfg); 9138 ipr_free_cmd_blks(ioa_cfg);
9179out_free_vpd_cbs: 9139out_free_vpd_cbs:
9180 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs), 9140 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9181 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9141 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9182out_free_res_entries: 9142out_free_res_entries:
9183 kfree(ioa_cfg->res_entries); 9143 kfree(ioa_cfg->res_entries);
9184 goto out; 9144 goto out;
@@ -9618,16 +9578,17 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9618 ipr_init_regs(ioa_cfg); 9578 ipr_init_regs(ioa_cfg);
9619 9579
9620 if (ioa_cfg->sis64) { 9580 if (ioa_cfg->sis64) {
9621 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 9581 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9622 if (rc < 0) { 9582 if (rc < 0) {
9623 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 9583 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9624 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9584 rc = dma_set_mask_and_coherent(&pdev->dev,
9585 DMA_BIT_MASK(32));
9625 } 9586 }
9626 } else 9587 } else
9627 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9588 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9628 9589
9629 if (rc < 0) { 9590 if (rc < 0) {
9630 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 9591 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9631 goto cleanup_nomem; 9592 goto cleanup_nomem;
9632 } 9593 }
9633 9594
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d0201ceb4aac..9ebdebd944e7 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1549,7 +1549,7 @@ struct ipr_ioa_cfg {
1549 struct ipr_misc_cbs *vpd_cbs; 1549 struct ipr_misc_cbs *vpd_cbs;
1550 dma_addr_t vpd_cbs_dma; 1550 dma_addr_t vpd_cbs_dma;
1551 1551
1552 struct pci_pool *ipr_cmd_pool; 1552 struct dma_pool *ipr_cmd_pool;
1553 1553
1554 struct ipr_cmnd *reset_cmd; 1554 struct ipr_cmnd *reset_cmd;
1555 int (*reset) (struct ipr_cmnd *); 1555 int (*reset) (struct ipr_cmnd *);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e5afc3884d74..e5c28435d768 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1210,7 +1210,7 @@ ips_slave_configure(struct scsi_device * SDptr)
1210 min = ha->max_cmds / 2; 1210 min = ha->max_cmds / 2;
1211 if (ha->enq->ucLogDriveCount <= 2) 1211 if (ha->enq->ucLogDriveCount <= 2)
1212 min = ha->max_cmds - 1; 1212 min = ha->max_cmds - 1;
1213 scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min); 1213 scsi_change_queue_depth(SDptr, min);
1214 } 1214 }
1215 1215
1216 SDptr->skip_ms_page_8 = 1; 1216 SDptr->skip_ms_page_8 = 1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 2e890b1e2526..724c6265b667 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -172,6 +172,8 @@ static struct scsi_host_template isci_sht = {
172 .target_destroy = sas_target_destroy, 172 .target_destroy = sas_target_destroy,
173 .ioctl = sas_ioctl, 173 .ioctl = sas_ioctl,
174 .shost_attrs = isci_host_attrs, 174 .shost_attrs = isci_host_attrs,
175 .use_blk_tags = 1,
176 .track_queue_depth = 1,
175}; 177};
176 178
177static struct sas_domain_function_template isci_transport_ops = { 179static struct sas_domain_function_template isci_transport_ops = {
@@ -258,8 +260,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
258 sas_ha->sas_port = sas_ports; 260 sas_ha->sas_port = sas_ports;
259 sas_ha->num_phys = SCI_MAX_PHYS; 261 sas_ha->num_phys = SCI_MAX_PHYS;
260 262
261 sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
262 sas_ha->lldd_max_execute_num = 1;
263 sas_ha->strict_wide_ports = 1; 263 sas_ha->strict_wide_ports = 1;
264 264
265 sas_register_ha(sas_ha); 265 sas_register_ha(sas_ha);
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 5d6fda72d659..3f63c6318b0d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -117,104 +117,97 @@ static inline int isci_device_io_ready(struct isci_remote_device *idev,
117 * functions. This function is called by libsas to send a task down to 117 * functions. This function is called by libsas to send a task down to
118 * hardware. 118 * hardware.
119 * @task: This parameter specifies the SAS task to send. 119 * @task: This parameter specifies the SAS task to send.
120 * @num: This parameter specifies the number of tasks to queue.
121 * @gfp_flags: This parameter specifies the context of this call. 120 * @gfp_flags: This parameter specifies the context of this call.
122 * 121 *
123 * status, zero indicates success. 122 * status, zero indicates success.
124 */ 123 */
125int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 124int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
126{ 125{
127 struct isci_host *ihost = dev_to_ihost(task->dev); 126 struct isci_host *ihost = dev_to_ihost(task->dev);
128 struct isci_remote_device *idev; 127 struct isci_remote_device *idev;
129 unsigned long flags; 128 unsigned long flags;
129 enum sci_status status = SCI_FAILURE;
130 bool io_ready; 130 bool io_ready;
131 u16 tag; 131 u16 tag;
132 132
133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 133 spin_lock_irqsave(&ihost->scic_lock, flags);
134 idev = isci_lookup_device(task->dev);
135 io_ready = isci_device_io_ready(idev, task);
136 tag = isci_alloc_tag(ihost);
137 spin_unlock_irqrestore(&ihost->scic_lock, flags);
134 138
135 for_each_sas_task(num, task) { 139 dev_dbg(&ihost->pdev->dev,
136 enum sci_status status = SCI_FAILURE; 140 "task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
141 task, task->dev, idev, idev ? idev->flags : 0,
142 task->uldd_task);
137 143
138 spin_lock_irqsave(&ihost->scic_lock, flags); 144 if (!idev) {
139 idev = isci_lookup_device(task->dev); 145 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
140 io_ready = isci_device_io_ready(idev, task); 146 SAS_DEVICE_UNKNOWN);
141 tag = isci_alloc_tag(ihost); 147 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
142 spin_unlock_irqrestore(&ihost->scic_lock, flags); 148 /* Indicate QUEUE_FULL so that the scsi midlayer
149 * retries.
150 */
151 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
152 SAS_QUEUE_FULL);
153 } else {
154 /* There is a device and it's ready for I/O. */
155 spin_lock_irqsave(&task->task_state_lock, flags);
143 156
144 dev_dbg(&ihost->pdev->dev, 157 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
145 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", 158 /* The I/O was aborted. */
146 task, num, task->dev, idev, idev ? idev->flags : 0, 159 spin_unlock_irqrestore(&task->task_state_lock, flags);
147 task->uldd_task); 160
148 161 isci_task_refuse(ihost, task,
149 if (!idev) { 162 SAS_TASK_UNDELIVERED,
150 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, 163 SAM_STAT_TASK_ABORTED);
151 SAS_DEVICE_UNKNOWN);
152 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
153 /* Indicate QUEUE_FULL so that the scsi midlayer
154 * retries.
155 */
156 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
157 SAS_QUEUE_FULL);
158 } else { 164 } else {
159 /* There is a device and it's ready for I/O. */ 165 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
160 spin_lock_irqsave(&task->task_state_lock, flags); 166 spin_unlock_irqrestore(&task->task_state_lock, flags);
161 167
162 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 168 /* build and send the request. */
163 /* The I/O was aborted. */ 169 status = isci_request_execute(ihost, idev, task, tag);
164 spin_unlock_irqrestore(&task->task_state_lock, 170
165 flags); 171 if (status != SCI_SUCCESS) {
166 172 spin_lock_irqsave(&task->task_state_lock, flags);
167 isci_task_refuse(ihost, task, 173 /* Did not really start this command. */
168 SAS_TASK_UNDELIVERED, 174 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
169 SAM_STAT_TASK_ABORTED);
170 } else {
171 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
172 spin_unlock_irqrestore(&task->task_state_lock, flags); 175 spin_unlock_irqrestore(&task->task_state_lock, flags);
173 176
174 /* build and send the request. */ 177 if (test_bit(IDEV_GONE, &idev->flags)) {
175 status = isci_request_execute(ihost, idev, task, tag); 178 /* Indicate that the device
176 179 * is gone.
177 if (status != SCI_SUCCESS) { 180 */
178 181 isci_task_refuse(ihost, task,
179 spin_lock_irqsave(&task->task_state_lock, flags); 182 SAS_TASK_UNDELIVERED,
180 /* Did not really start this command. */ 183 SAS_DEVICE_UNKNOWN);
181 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 184 } else {
182 spin_unlock_irqrestore(&task->task_state_lock, flags); 185 /* Indicate QUEUE_FULL so that
183 186 * the scsi midlayer retries.
184 if (test_bit(IDEV_GONE, &idev->flags)) { 187 * If the request failed for
185 188 * remote device reasons, it
186 /* Indicate that the device 189 * gets returned as
187 * is gone. 190 * SAS_TASK_UNDELIVERED next
188 */ 191 * time through.
189 isci_task_refuse(ihost, task, 192 */
190 SAS_TASK_UNDELIVERED, 193 isci_task_refuse(ihost, task,
191 SAS_DEVICE_UNKNOWN); 194 SAS_TASK_COMPLETE,
192 } else { 195 SAS_QUEUE_FULL);
193 /* Indicate QUEUE_FULL so that
194 * the scsi midlayer retries.
195 * If the request failed for
196 * remote device reasons, it
197 * gets returned as
198 * SAS_TASK_UNDELIVERED next
199 * time through.
200 */
201 isci_task_refuse(ihost, task,
202 SAS_TASK_COMPLETE,
203 SAS_QUEUE_FULL);
204 }
205 } 196 }
206 } 197 }
207 } 198 }
208 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
209 spin_lock_irqsave(&ihost->scic_lock, flags);
210 /* command never hit the device, so just free
211 * the tci and skip the sequence increment
212 */
213 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
214 spin_unlock_irqrestore(&ihost->scic_lock, flags);
215 }
216 isci_put_device(idev);
217 } 199 }
200
201 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
202 spin_lock_irqsave(&ihost->scic_lock, flags);
203 /* command never hit the device, so just free
204 * the tci and skip the sequence increment
205 */
206 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
207 spin_unlock_irqrestore(&ihost->scic_lock, flags);
208 }
209
210 isci_put_device(idev);
218 return 0; 211 return 0;
219} 212}
220 213
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 9c06cbad1d26..8f4531f22ac2 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -131,7 +131,6 @@ static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
131 131
132int isci_task_execute_task( 132int isci_task_execute_task(
133 struct sas_task *task, 133 struct sas_task *task,
134 int num,
135 gfp_t gfp_flags); 134 gfp_t gfp_flags);
136 135
137int isci_task_abort_task( 136int isci_task_abort_task(
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 427af0f24b0f..0b8af186e707 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -952,7 +952,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
952 .module = THIS_MODULE, 952 .module = THIS_MODULE,
953 .name = "iSCSI Initiator over TCP/IP", 953 .name = "iSCSI Initiator over TCP/IP",
954 .queuecommand = iscsi_queuecommand, 954 .queuecommand = iscsi_queuecommand,
955 .change_queue_depth = iscsi_change_queue_depth, 955 .change_queue_depth = scsi_change_queue_depth,
956 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, 956 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
957 .sg_tablesize = 4096, 957 .sg_tablesize = 4096,
958 .max_sectors = 0xFFFF, 958 .max_sectors = 0xFFFF,
@@ -966,6 +966,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
966 .target_alloc = iscsi_target_alloc, 966 .target_alloc = iscsi_target_alloc,
967 .proc_name = "iscsi_tcp", 967 .proc_name = "iscsi_tcp",
968 .this_id = -1, 968 .this_id = -1,
969 .track_queue_depth = 1,
969}; 970};
970 971
971static struct iscsi_transport iscsi_sw_tcp_transport = { 972static struct iscsi_transport iscsi_sw_tcp_transport = {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 1d7e76e8b447..c6795941b45d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2160,62 +2160,12 @@ int fc_slave_alloc(struct scsi_device *sdev)
2160 if (!rport || fc_remote_port_chkready(rport)) 2160 if (!rport || fc_remote_port_chkready(rport))
2161 return -ENXIO; 2161 return -ENXIO;
2162 2162
2163 if (sdev->tagged_supported) 2163 scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
2164 scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
2165 else
2166 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2167 FC_FCP_DFLT_QUEUE_DEPTH);
2168
2169 return 0; 2164 return 0;
2170} 2165}
2171EXPORT_SYMBOL(fc_slave_alloc); 2166EXPORT_SYMBOL(fc_slave_alloc);
2172 2167
2173/** 2168/**
2174 * fc_change_queue_depth() - Change a device's queue depth
2175 * @sdev: The SCSI device whose queue depth is to change
2176 * @qdepth: The new queue depth
2177 * @reason: The resason for the change
2178 */
2179int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2180{
2181 switch (reason) {
2182 case SCSI_QDEPTH_DEFAULT:
2183 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2184 break;
2185 case SCSI_QDEPTH_QFULL:
2186 scsi_track_queue_full(sdev, qdepth);
2187 break;
2188 case SCSI_QDEPTH_RAMP_UP:
2189 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2190 break;
2191 default:
2192 return -EOPNOTSUPP;
2193 }
2194 return sdev->queue_depth;
2195}
2196EXPORT_SYMBOL(fc_change_queue_depth);
2197
2198/**
2199 * fc_change_queue_type() - Change a device's queue type
2200 * @sdev: The SCSI device whose queue depth is to change
2201 * @tag_type: Identifier for queue type
2202 */
2203int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2204{
2205 if (sdev->tagged_supported) {
2206 scsi_set_tag_type(sdev, tag_type);
2207 if (tag_type)
2208 scsi_activate_tcq(sdev, sdev->queue_depth);
2209 else
2210 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2211 } else
2212 tag_type = 0;
2213
2214 return tag_type;
2215}
2216EXPORT_SYMBOL(fc_change_queue_type);
2217
2218/**
2219 * fc_fcp_destory() - Tear down the FCP layer for a given local port 2169 * fc_fcp_destory() - Tear down the FCP layer for a given local port
2220 * @lport: The local port that no longer needs the FCP layer 2170 * @lport: The local port that no longer needs the FCP layer
2221 */ 2171 */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 0d8bc6c66650..8053f24f0349 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1771,25 +1771,6 @@ fault:
1771} 1771}
1772EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1772EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1773 1773
1774int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
1775{
1776 switch (reason) {
1777 case SCSI_QDEPTH_DEFAULT:
1778 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1779 break;
1780 case SCSI_QDEPTH_QFULL:
1781 scsi_track_queue_full(sdev, depth);
1782 break;
1783 case SCSI_QDEPTH_RAMP_UP:
1784 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1785 break;
1786 default:
1787 return -EOPNOTSUPP;
1788 }
1789 return sdev->queue_depth;
1790}
1791EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1792
1793int iscsi_target_alloc(struct scsi_target *starget) 1774int iscsi_target_alloc(struct scsi_target *starget)
1794{ 1775{
1795 struct iscsi_cls_session *cls_session = starget_to_session(starget); 1776 struct iscsi_cls_session *cls_session = starget_to_session(starget);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 766098af4eb7..577770fdee86 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -171,7 +171,6 @@ static void sas_ata_task_done(struct sas_task *task)
171 spin_unlock_irqrestore(ap->lock, flags); 171 spin_unlock_irqrestore(ap->lock, flags);
172 172
173qc_already_gone: 173qc_already_gone:
174 list_del_init(&task->list);
175 sas_free_task(task); 174 sas_free_task(task);
176} 175}
177 176
@@ -244,12 +243,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
244 if (qc->scsicmd) 243 if (qc->scsicmd)
245 ASSIGN_SAS_TASK(qc->scsicmd, task); 244 ASSIGN_SAS_TASK(qc->scsicmd, task);
246 245
247 if (sas_ha->lldd_max_execute_num < 2) 246 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
248 ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
249 else
250 ret = sas_queue_up(task);
251
252 /* Examine */
253 if (ret) { 247 if (ret) {
254 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); 248 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
255 249
@@ -485,7 +479,6 @@ static void sas_ata_internal_abort(struct sas_task *task)
485 479
486 return; 480 return;
487 out: 481 out:
488 list_del_init(&task->list);
489 sas_free_task(task); 482 sas_free_task(task);
490} 483}
491 484
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0cac7d8fd0f7..022bb6e10d98 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; 96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
97 add_timer(&task->slow_task->timer); 97 add_timer(&task->slow_task->timer);
98 98
99 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); 99 res = i->dft->lldd_execute_task(task, GFP_KERNEL);
100 100
101 if (res) { 101 if (res) {
102 del_timer(&task->slow_task->timer); 102 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dbc8a793fd86..362da44f2948 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -45,7 +45,6 @@ struct sas_task *sas_alloc_task(gfp_t flags)
45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); 45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
46 46
47 if (task) { 47 if (task) {
48 INIT_LIST_HEAD(&task->list);
49 spin_lock_init(&task->task_state_lock); 48 spin_lock_init(&task->task_state_lock);
50 task->task_state_flags = SAS_TASK_STATE_PENDING; 49 task->task_state_flags = SAS_TASK_STATE_PENDING;
51 } 50 }
@@ -77,7 +76,6 @@ EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
77void sas_free_task(struct sas_task *task) 76void sas_free_task(struct sas_task *task)
78{ 77{
79 if (task) { 78 if (task) {
80 BUG_ON(!list_empty(&task->list));
81 kfree(task->slow_task); 79 kfree(task->slow_task);
82 kmem_cache_free(sas_task_cache, task); 80 kmem_cache_free(sas_task_cache, task);
83 } 81 }
@@ -127,11 +125,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
127 spin_lock_init(&sas_ha->phy_port_lock); 125 spin_lock_init(&sas_ha->phy_port_lock);
128 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); 126 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
129 127
130 if (sas_ha->lldd_queue_size == 0)
131 sas_ha->lldd_queue_size = 1;
132 else if (sas_ha->lldd_queue_size == -1)
133 sas_ha->lldd_queue_size = 128; /* Sanity */
134
135 set_bit(SAS_HA_REGISTERED, &sas_ha->state); 128 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
136 spin_lock_init(&sas_ha->lock); 129 spin_lock_init(&sas_ha->lock);
137 mutex_init(&sas_ha->drain_mutex); 130 mutex_init(&sas_ha->drain_mutex);
@@ -157,15 +150,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
157 goto Undo_ports; 150 goto Undo_ports;
158 } 151 }
159 152
160 if (sas_ha->lldd_max_execute_num > 1) {
161 error = sas_init_queue(sas_ha);
162 if (error) {
163 printk(KERN_NOTICE "couldn't start queue thread:%d, "
164 "running in direct mode\n", error);
165 sas_ha->lldd_max_execute_num = 1;
166 }
167 }
168
169 INIT_LIST_HEAD(&sas_ha->eh_done_q); 153 INIT_LIST_HEAD(&sas_ha->eh_done_q);
170 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 154 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
171 155
@@ -201,11 +185,6 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
201 __sas_drain_work(sas_ha); 185 __sas_drain_work(sas_ha);
202 mutex_unlock(&sas_ha->drain_mutex); 186 mutex_unlock(&sas_ha->drain_mutex);
203 187
204 if (sas_ha->lldd_max_execute_num > 1) {
205 sas_shutdown_queue(sas_ha);
206 sas_ha->lldd_max_execute_num = 1;
207 }
208
209 return 0; 188 return 0;
210} 189}
211 190
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 7e7ba83f0a21..9cf0bc260b0e 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -66,9 +66,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
66 66
67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
68 68
69int sas_init_queue(struct sas_ha_struct *sas_ha);
70int sas_init_events(struct sas_ha_struct *sas_ha); 69int sas_init_events(struct sas_ha_struct *sas_ha);
71void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
72void sas_disable_revalidation(struct sas_ha_struct *ha); 70void sas_disable_revalidation(struct sas_ha_struct *ha);
73void sas_enable_revalidation(struct sas_ha_struct *ha); 71void sas_enable_revalidation(struct sas_ha_struct *ha);
74void __sas_drain_work(struct sas_ha_struct *ha); 72void __sas_drain_work(struct sas_ha_struct *ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 24e477d2ea70..72918d227ead 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -112,7 +112,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
112 112
113 sc->result = (hs << 16) | stat; 113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL); 114 ASSIGN_SAS_TASK(sc, NULL);
115 list_del_init(&task->list);
116 sas_free_task(task); 115 sas_free_task(task);
117} 116}
118 117
@@ -138,7 +137,6 @@ static void sas_scsi_task_done(struct sas_task *task)
138 137
139 if (unlikely(!sc)) { 138 if (unlikely(!sc)) {
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 139 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 list_del_init(&task->list);
142 sas_free_task(task); 140 sas_free_task(task);
143 return; 141 return;
144 } 142 }
@@ -179,31 +177,10 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
179 return task; 177 return task;
180} 178}
181 179
182int sas_queue_up(struct sas_task *task)
183{
184 struct sas_ha_struct *sas_ha = task->dev->port->ha;
185 struct scsi_core *core = &sas_ha->core;
186 unsigned long flags;
187 LIST_HEAD(list);
188
189 spin_lock_irqsave(&core->task_queue_lock, flags);
190 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
191 spin_unlock_irqrestore(&core->task_queue_lock, flags);
192 return -SAS_QUEUE_FULL;
193 }
194 list_add_tail(&task->list, &core->task_queue);
195 core->task_queue_size += 1;
196 spin_unlock_irqrestore(&core->task_queue_lock, flags);
197 wake_up_process(core->queue_thread);
198
199 return 0;
200}
201
202int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 180int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
203{ 181{
204 struct sas_internal *i = to_sas_internal(host->transportt); 182 struct sas_internal *i = to_sas_internal(host->transportt);
205 struct domain_device *dev = cmd_to_domain_dev(cmd); 183 struct domain_device *dev = cmd_to_domain_dev(cmd);
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task; 184 struct sas_task *task;
208 int res = 0; 185 int res = 0;
209 186
@@ -224,12 +201,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
224 if (!task) 201 if (!task)
225 return SCSI_MLQUEUE_HOST_BUSY; 202 return SCSI_MLQUEUE_HOST_BUSY;
226 203
227 /* Queue up, Direct Mode or Task Collector Mode. */ 204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
228 if (sas_ha->lldd_max_execute_num < 2)
229 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
230 else
231 res = sas_queue_up(task);
232
233 if (res) 205 if (res)
234 goto out_free_task; 206 goto out_free_task;
235 return 0; 207 return 0;
@@ -323,37 +295,17 @@ enum task_disposition {
323 TASK_IS_DONE, 295 TASK_IS_DONE,
324 TASK_IS_ABORTED, 296 TASK_IS_ABORTED,
325 TASK_IS_AT_LU, 297 TASK_IS_AT_LU,
326 TASK_IS_NOT_AT_HA,
327 TASK_IS_NOT_AT_LU, 298 TASK_IS_NOT_AT_LU,
328 TASK_ABORT_FAILED, 299 TASK_ABORT_FAILED,
329}; 300};
330 301
331static enum task_disposition sas_scsi_find_task(struct sas_task *task) 302static enum task_disposition sas_scsi_find_task(struct sas_task *task)
332{ 303{
333 struct sas_ha_struct *ha = task->dev->port->ha;
334 unsigned long flags; 304 unsigned long flags;
335 int i, res; 305 int i, res;
336 struct sas_internal *si = 306 struct sas_internal *si =
337 to_sas_internal(task->dev->port->ha->core.shost->transportt); 307 to_sas_internal(task->dev->port->ha->core.shost->transportt);
338 308
339 if (ha->lldd_max_execute_num > 1) {
340 struct scsi_core *core = &ha->core;
341 struct sas_task *t, *n;
342
343 mutex_lock(&core->task_queue_flush);
344 spin_lock_irqsave(&core->task_queue_lock, flags);
345 list_for_each_entry_safe(t, n, &core->task_queue, list)
346 if (task == t) {
347 list_del_init(&t->list);
348 break;
349 }
350 spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 mutex_unlock(&core->task_queue_flush);
352
353 if (task == t)
354 return TASK_IS_NOT_AT_HA;
355 }
356
357 for (i = 0; i < 5; i++) { 309 for (i = 0; i < 5; i++) {
358 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 310 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
359 res = si->dft->lldd_abort_task(task); 311 res = si->dft->lldd_abort_task(task);
@@ -667,14 +619,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
667 cmd->eh_eflags = 0; 619 cmd->eh_eflags = 0;
668 620
669 switch (res) { 621 switch (res) {
670 case TASK_IS_NOT_AT_HA:
671 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
672 __func__, task,
673 cmd->retries ? "retry" : "aborted");
674 if (cmd->retries)
675 cmd->retries--;
676 sas_eh_finish_cmd(cmd);
677 continue;
678 case TASK_IS_DONE: 622 case TASK_IS_DONE:
679 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
680 task); 624 task);
@@ -836,9 +780,6 @@ retry:
836 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 780 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
837 781
838out: 782out:
839 if (ha->lldd_max_execute_num > 1)
840 wake_up_process(ha->core.queue_thread);
841
842 sas_eh_handle_resets(shost); 783 sas_eh_handle_resets(shost);
843 784
844 /* now link into libata eh --- if we have any ata devices */ 785 /* now link into libata eh --- if we have any ata devices */
@@ -940,15 +881,12 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
940 sas_read_port_mode_page(scsi_dev); 881 sas_read_port_mode_page(scsi_dev);
941 882
942 if (scsi_dev->tagged_supported) { 883 if (scsi_dev->tagged_supported) {
943 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); 884 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
944 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
945 } else { 885 } else {
946 SAS_DPRINTK("device %llx, LUN %llx doesn't support " 886 SAS_DPRINTK("device %llx, LUN %llx doesn't support "
947 "TCQ\n", SAS_ADDR(dev->sas_addr), 887 "TCQ\n", SAS_ADDR(dev->sas_addr),
948 scsi_dev->lun); 888 scsi_dev->lun);
949 scsi_dev->tagged_supported = 0; 889 scsi_change_queue_depth(scsi_dev, 1);
950 scsi_set_tag_type(scsi_dev, 0);
951 scsi_deactivate_tcq(scsi_dev, 1);
952 } 890 }
953 891
954 scsi_dev->allow_restart = 1; 892 scsi_dev->allow_restart = 1;
@@ -956,47 +894,23 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
956 return 0; 894 return 0;
957} 895}
958 896
959int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) 897int sas_change_queue_depth(struct scsi_device *sdev, int depth)
960{ 898{
961 struct domain_device *dev = sdev_to_domain_dev(sdev); 899 struct domain_device *dev = sdev_to_domain_dev(sdev);
962 900
963 if (dev_is_sata(dev)) 901 if (dev_is_sata(dev))
964 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth, 902 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
965 reason);
966
967 switch (reason) {
968 case SCSI_QDEPTH_DEFAULT:
969 case SCSI_QDEPTH_RAMP_UP:
970 if (!sdev->tagged_supported)
971 depth = 1;
972 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
973 break;
974 case SCSI_QDEPTH_QFULL:
975 scsi_track_queue_full(sdev, depth);
976 break;
977 default:
978 return -EOPNOTSUPP;
979 }
980 903
981 return depth; 904 if (!sdev->tagged_supported)
905 depth = 1;
906 return scsi_change_queue_depth(sdev, depth);
982} 907}
983 908
984int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) 909int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
985{ 910{
986 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 911 if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
987
988 if (dev_is_sata(dev))
989 return -EINVAL; 912 return -EINVAL;
990 913 return scsi_change_queue_type(scsi_dev, type);
991 if (!scsi_dev->tagged_supported)
992 return 0;
993
994 scsi_deactivate_tcq(scsi_dev, 1);
995
996 scsi_set_tag_type(scsi_dev, qt);
997 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
998
999 return qt;
1000} 914}
1001 915
1002int sas_bios_param(struct scsi_device *scsi_dev, 916int sas_bios_param(struct scsi_device *scsi_dev,
@@ -1011,121 +925,6 @@ int sas_bios_param(struct scsi_device *scsi_dev,
1011 return 0; 925 return 0;
1012} 926}
1013 927
1014/* ---------- Task Collector Thread implementation ---------- */
1015
1016static void sas_queue(struct sas_ha_struct *sas_ha)
1017{
1018 struct scsi_core *core = &sas_ha->core;
1019 unsigned long flags;
1020 LIST_HEAD(q);
1021 int can_queue;
1022 int res;
1023 struct sas_internal *i = to_sas_internal(core->shost->transportt);
1024
1025 mutex_lock(&core->task_queue_flush);
1026 spin_lock_irqsave(&core->task_queue_lock, flags);
1027 while (!kthread_should_stop() &&
1028 !list_empty(&core->task_queue) &&
1029 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
1030
1031 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
1032 if (can_queue >= 0) {
1033 can_queue = core->task_queue_size;
1034 list_splice_init(&core->task_queue, &q);
1035 } else {
1036 struct list_head *a, *n;
1037
1038 can_queue = sas_ha->lldd_queue_size;
1039 list_for_each_safe(a, n, &core->task_queue) {
1040 list_move_tail(a, &q);
1041 if (--can_queue == 0)
1042 break;
1043 }
1044 can_queue = sas_ha->lldd_queue_size;
1045 }
1046 core->task_queue_size -= can_queue;
1047 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1048 {
1049 struct sas_task *task = list_entry(q.next,
1050 struct sas_task,
1051 list);
1052 list_del_init(&q);
1053 res = i->dft->lldd_execute_task(task, can_queue,
1054 GFP_KERNEL);
1055 if (unlikely(res))
1056 __list_add(&q, task->list.prev, &task->list);
1057 }
1058 spin_lock_irqsave(&core->task_queue_lock, flags);
1059 if (res) {
1060 list_splice_init(&q, &core->task_queue); /*at head*/
1061 core->task_queue_size += can_queue;
1062 }
1063 }
1064 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1065 mutex_unlock(&core->task_queue_flush);
1066}
1067
1068/**
1069 * sas_queue_thread -- The Task Collector thread
1070 * @_sas_ha: pointer to struct sas_ha
1071 */
1072static int sas_queue_thread(void *_sas_ha)
1073{
1074 struct sas_ha_struct *sas_ha = _sas_ha;
1075
1076 while (1) {
1077 set_current_state(TASK_INTERRUPTIBLE);
1078 schedule();
1079 sas_queue(sas_ha);
1080 if (kthread_should_stop())
1081 break;
1082 }
1083
1084 return 0;
1085}
1086
1087int sas_init_queue(struct sas_ha_struct *sas_ha)
1088{
1089 struct scsi_core *core = &sas_ha->core;
1090
1091 spin_lock_init(&core->task_queue_lock);
1092 mutex_init(&core->task_queue_flush);
1093 core->task_queue_size = 0;
1094 INIT_LIST_HEAD(&core->task_queue);
1095
1096 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
1097 "sas_queue_%d", core->shost->host_no);
1098 if (IS_ERR(core->queue_thread))
1099 return PTR_ERR(core->queue_thread);
1100 return 0;
1101}
1102
1103void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
1104{
1105 unsigned long flags;
1106 struct scsi_core *core = &sas_ha->core;
1107 struct sas_task *task, *n;
1108
1109 kthread_stop(core->queue_thread);
1110
1111 if (!list_empty(&core->task_queue))
1112 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
1113 SAS_ADDR(sas_ha->sas_addr));
1114
1115 spin_lock_irqsave(&core->task_queue_lock, flags);
1116 list_for_each_entry_safe(task, n, &core->task_queue, list) {
1117 struct scsi_cmnd *cmd = task->uldd_task;
1118
1119 list_del_init(&task->list);
1120
1121 ASSIGN_SAS_TASK(cmd, NULL);
1122 sas_free_task(task);
1123 cmd->result = DID_ABORT << 16;
1124 cmd->scsi_done(cmd);
1125 }
1126 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1127}
1128
1129/* 928/*
1130 * Tell an upper layer that it needs to initiate an abort for a given task. 929 * Tell an upper layer that it needs to initiate an abort for a given task.
1131 * This should only ever be called by an LLDD. 930 * This should only ever be called by an LLDD.
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 786a2aff7b59..5633e7dadc08 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -968,8 +968,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
968 goto out; 968 goto out;
969 969
970 /* Round to page boundary */ 970 /* Round to page boundary */
971 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", 971 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
972 __func__, _dump_buf_dif, file->f_dentry->d_name.name); 972 __func__, _dump_buf_dif, file);
973 debug->buffer = _dump_buf_dif; 973 debug->buffer = _dump_buf_dif;
974 if (!debug->buffer) { 974 if (!debug->buffer) {
975 kfree(debug); 975 kfree(debug);
@@ -1011,7 +1011,7 @@ static ssize_t
1011lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, 1011lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
1012 size_t nbytes, loff_t *ppos) 1012 size_t nbytes, loff_t *ppos)
1013{ 1013{
1014 struct dentry *dent = file->f_dentry; 1014 struct dentry *dent = file->f_path.dentry;
1015 struct lpfc_hba *phba = file->private_data; 1015 struct lpfc_hba *phba = file->private_data;
1016 char cbuf[32]; 1016 char cbuf[32];
1017 uint64_t tmp = 0; 1017 uint64_t tmp = 0;
@@ -1052,7 +1052,7 @@ static ssize_t
1052lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, 1052lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1053 size_t nbytes, loff_t *ppos) 1053 size_t nbytes, loff_t *ppos)
1054{ 1054{
1055 struct dentry *dent = file->f_dentry; 1055 struct dentry *dent = file->f_path.dentry;
1056 struct lpfc_hba *phba = file->private_data; 1056 struct lpfc_hba *phba = file->private_data;
1057 char dstbuf[32]; 1057 char dstbuf[32];
1058 uint64_t tmp = 0; 1058 uint64_t tmp = 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b99399fe2548..fd85952b621d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -243,128 +243,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
243} 243}
244 244
245/** 245/**
246 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
247 * @phba: Pointer to HBA context object.
248 * @vport: Pointer to vport object.
249 * @ndlp: Pointer to FC node associated with the target.
250 * @lun: Lun number of the scsi device.
251 * @old_val: Old value of the queue depth.
252 * @new_val: New value of the queue depth.
253 *
254 * This function sends an event to the mgmt application indicating
255 * there is a change in the scsi device queue depth.
256 **/
257static void
258lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
259 struct lpfc_vport *vport,
260 struct lpfc_nodelist *ndlp,
261 uint64_t lun,
262 uint32_t old_val,
263 uint32_t new_val)
264{
265 struct lpfc_fast_path_event *fast_path_evt;
266 unsigned long flags;
267
268 fast_path_evt = lpfc_alloc_fast_evt(phba);
269 if (!fast_path_evt)
270 return;
271
272 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
273 FC_REG_SCSI_EVENT;
274 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
275 LPFC_EVENT_VARQUEDEPTH;
276
277 /* Report all luns with change in queue depth */
278 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
279 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
280 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
281 &ndlp->nlp_portname, sizeof(struct lpfc_name));
282 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
283 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
284 }
285
286 fast_path_evt->un.queue_depth_evt.oldval = old_val;
287 fast_path_evt->un.queue_depth_evt.newval = new_val;
288 fast_path_evt->vport = vport;
289
290 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
291 spin_lock_irqsave(&phba->hbalock, flags);
292 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
293 spin_unlock_irqrestore(&phba->hbalock, flags);
294 lpfc_worker_wake_up(phba);
295
296 return;
297}
298
299/**
300 * lpfc_change_queue_depth - Alter scsi device queue depth
301 * @sdev: Pointer the scsi device on which to change the queue depth.
302 * @qdepth: New queue depth to set the sdev to.
303 * @reason: The reason for the queue depth change.
304 *
305 * This function is called by the midlayer and the LLD to alter the queue
306 * depth for a scsi device. This function sets the queue depth to the new
307 * value and sends an event out to log the queue depth change.
308 **/
309static int
310lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
311{
312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
313 struct lpfc_hba *phba = vport->phba;
314 struct lpfc_rport_data *rdata;
315 unsigned long new_queue_depth, old_queue_depth;
316
317 old_queue_depth = sdev->queue_depth;
318
319 switch (reason) {
320 case SCSI_QDEPTH_DEFAULT:
321 /* change request from sysfs, fall through */
322 case SCSI_QDEPTH_RAMP_UP:
323 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
324 break;
325 case SCSI_QDEPTH_QFULL:
326 if (scsi_track_queue_full(sdev, qdepth) == 0)
327 return sdev->queue_depth;
328
329 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
330 "0711 detected queue full - lun queue "
331 "depth adjusted to %d.\n", sdev->queue_depth);
332 break;
333 default:
334 return -EOPNOTSUPP;
335 }
336
337 new_queue_depth = sdev->queue_depth;
338 rdata = lpfc_rport_data_from_scsi_device(sdev);
339 if (rdata)
340 lpfc_send_sdev_queuedepth_change_event(phba, vport,
341 rdata->pnode, sdev->lun,
342 old_queue_depth,
343 new_queue_depth);
344 return sdev->queue_depth;
345}
346
347/**
348 * lpfc_change_queue_type() - Change a device's scsi tag queuing type
349 * @sdev: Pointer the scsi device whose queue depth is to change
350 * @tag_type: Identifier for queue tag type
351 */
352static int
353lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
354{
355 if (sdev->tagged_supported) {
356 scsi_set_tag_type(sdev, tag_type);
357 if (tag_type)
358 scsi_activate_tcq(sdev, sdev->queue_depth);
359 else
360 scsi_deactivate_tcq(sdev, sdev->queue_depth);
361 } else
362 tag_type = 0;
363
364 return tag_type;
365}
366
367/**
368 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 246 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
369 * @phba: The Hba for which this call is being executed. 247 * @phba: The Hba for which this call is being executed.
370 * 248 *
@@ -449,8 +327,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
449 else 327 else
450 new_queue_depth = sdev->queue_depth - 328 new_queue_depth = sdev->queue_depth -
451 new_queue_depth; 329 new_queue_depth;
452 lpfc_change_queue_depth(sdev, new_queue_depth, 330 scsi_change_queue_depth(sdev, new_queue_depth);
453 SCSI_QDEPTH_DEFAULT);
454 } 331 }
455 } 332 }
456 lpfc_destroy_vport_work_array(phba, vports); 333 lpfc_destroy_vport_work_array(phba, vports);
@@ -4286,7 +4163,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4286 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4163 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4287 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 4164 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4288 int datadir = scsi_cmnd->sc_data_direction; 4165 int datadir = scsi_cmnd->sc_data_direction;
4289 char tag[2];
4290 uint8_t *ptr; 4166 uint8_t *ptr;
4291 bool sli4; 4167 bool sli4;
4292 uint32_t fcpdl; 4168 uint32_t fcpdl;
@@ -4308,20 +4184,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4308 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4184 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4309 } 4185 }
4310 4186
4311 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 4187 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4312 switch (tag[0]) {
4313 case HEAD_OF_QUEUE_TAG:
4314 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4315 break;
4316 case ORDERED_QUEUE_TAG:
4317 fcp_cmnd->fcpCntl1 = ORDERED_Q;
4318 break;
4319 default:
4320 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4321 break;
4322 }
4323 } else
4324 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4325 4188
4326 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 4189 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4327 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4190 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
@@ -5632,10 +5495,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
5632 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5495 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5633 struct lpfc_hba *phba = vport->phba; 5496 struct lpfc_hba *phba = vport->phba;
5634 5497
5635 if (sdev->tagged_supported) 5498 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5636 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5637 else
5638 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5639 5499
5640 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5500 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5641 lpfc_sli_handle_fast_ring_event(phba, 5501 lpfc_sli_handle_fast_ring_event(phba,
@@ -6018,8 +5878,10 @@ struct scsi_host_template lpfc_template = {
6018 .shost_attrs = lpfc_hba_attrs, 5878 .shost_attrs = lpfc_hba_attrs,
6019 .max_sectors = 0xFFFF, 5879 .max_sectors = 0xFFFF,
6020 .vendor_id = LPFC_NL_VENDOR_ID, 5880 .vendor_id = LPFC_NL_VENDOR_ID,
6021 .change_queue_depth = lpfc_change_queue_depth, 5881 .change_queue_depth = scsi_change_queue_depth,
6022 .change_queue_type = lpfc_change_queue_type, 5882 .change_queue_type = scsi_change_queue_type,
5883 .use_blk_tags = 1,
5884 .track_queue_depth = 1,
6023}; 5885};
6024 5886
6025struct scsi_host_template lpfc_vport_template = { 5887struct scsi_host_template lpfc_vport_template = {
@@ -6041,6 +5903,8 @@ struct scsi_host_template lpfc_vport_template = {
6041 .use_clustering = ENABLE_CLUSTERING, 5903 .use_clustering = ENABLE_CLUSTERING,
6042 .shost_attrs = lpfc_vport_attrs, 5904 .shost_attrs = lpfc_vport_attrs,
6043 .max_sectors = 0xFFFF, 5905 .max_sectors = 0xFFFF,
6044 .change_queue_depth = lpfc_change_queue_depth, 5906 .change_queue_depth = scsi_change_queue_depth,
6045 .change_queue_type = lpfc_change_queue_type, 5907 .change_queue_type = scsi_change_queue_type,
5908 .use_blk_tags = 1,
5909 .track_queue_depth = 1,
6046}; 5910};
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 6a039eb1cbce..953fd9b953c7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -9,69 +9,62 @@
9 * Generic Generic NCR5380 driver 9 * Generic Generic NCR5380 driver
10 * 10 *
11 * Copyright 1995, Russell King 11 * Copyright 1995, Russell King
12 *
13 * ALPHA RELEASE 1.
14 *
15 * For more information, please consult
16 *
17 * NCR 5380 Family
18 * SCSI Protocol Controller
19 * Databook
20 *
21 * NCR Microelectronics
22 * 1635 Aeroplaza Drive
23 * Colorado Springs, CO 80916
24 * 1+ (719) 578-3400
25 * 1+ (800) 334-5454
26 */ 12 */
27 13
28#include <linux/types.h> 14#include <linux/types.h>
29#include <linux/stddef.h>
30#include <linux/ctype.h>
31#include <linux/delay.h> 15#include <linux/delay.h>
32
33#include <linux/module.h> 16#include <linux/module.h>
34#include <linux/signal.h>
35#include <linux/ioport.h> 17#include <linux/ioport.h>
36#include <linux/init.h> 18#include <linux/init.h>
37#include <linux/blkdev.h> 19#include <linux/blkdev.h>
38#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
39 22
23#include <asm/hwtest.h>
40#include <asm/io.h> 24#include <asm/io.h>
41#include <asm/irq.h>
42
43#include <asm/macintosh.h>
44#include <asm/macints.h> 25#include <asm/macints.h>
45#include <asm/mac_via.h> 26#include <asm/setup.h>
46 27
47#include "scsi.h"
48#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
49#include "mac_scsi.h"
50 29
51/* These control the behaviour of the generic 5380 core */ 30/* Definitions for the core NCR5380 driver. */
52#define AUTOSENSE 31
53#define PSEUDO_DMA 32#define PSEUDO_DMA
54 33
55#include "NCR5380.h" 34#define NCR5380_implementation_fields unsigned char *pdma_base
35#define NCR5380_local_declare() struct Scsi_Host *_instance
36#define NCR5380_setup(instance) _instance = instance
56 37
57#define RESET_BOOT 38#define NCR5380_read(reg) macscsi_read(_instance, reg)
58#define DRIVER_SETUP 39#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value)
59 40
60extern void via_scsi_clear(void); 41#define NCR5380_pread macscsi_pread
42#define NCR5380_pwrite macscsi_pwrite
61 43
62#ifdef RESET_BOOT 44#define NCR5380_intr macscsi_intr
63static void mac_scsi_reset_boot(struct Scsi_Host *instance); 45#define NCR5380_queue_command macscsi_queue_command
64#endif 46#define NCR5380_abort macscsi_abort
47#define NCR5380_bus_reset macscsi_bus_reset
48#define NCR5380_info macscsi_info
49#define NCR5380_show_info macscsi_show_info
50#define NCR5380_write_info macscsi_write_info
51
52#include "NCR5380.h"
53
54#define RESET_BOOT
65 55
66static int setup_called = 0;
67static int setup_can_queue = -1; 56static int setup_can_queue = -1;
57module_param(setup_can_queue, int, 0);
68static int setup_cmd_per_lun = -1; 58static int setup_cmd_per_lun = -1;
59module_param(setup_cmd_per_lun, int, 0);
69static int setup_sg_tablesize = -1; 60static int setup_sg_tablesize = -1;
61module_param(setup_sg_tablesize, int, 0);
70static int setup_use_pdma = -1; 62static int setup_use_pdma = -1;
71#ifdef SUPPORT_TAGS 63module_param(setup_use_pdma, int, 0);
72static int setup_use_tagged_queuing = -1; 64static int setup_use_tagged_queuing = -1;
73#endif 65module_param(setup_use_tagged_queuing, int, 0);
74static int setup_hostid = -1; 66static int setup_hostid = -1;
67module_param(setup_hostid, int, 0);
75 68
76/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, 69/* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms,
77 * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more 70 * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more
@@ -84,232 +77,48 @@ static int setup_hostid = -1;
84#define AFTER_RESET_DELAY (HZ/2) 77#define AFTER_RESET_DELAY (HZ/2)
85#endif 78#endif
86 79
87static volatile unsigned char *mac_scsi_regp = NULL;
88static volatile unsigned char *mac_scsi_drq = NULL;
89static volatile unsigned char *mac_scsi_nodrq = NULL;
90
91
92/* 80/*
93 * NCR 5380 register access functions 81 * NCR 5380 register access functions
94 */ 82 */
95 83
96#if 0 84static inline char macscsi_read(struct Scsi_Host *instance, int reg)
97/* Debug versions */
98#define CTRL(p,v) (*ctrl = (v))
99
100static char macscsi_read(struct Scsi_Host *instance, int reg)
101{ 85{
102 int iobase = instance->io_port; 86 return in_8(instance->base + (reg << 4));
103 int i;
104 int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
105
106 CTRL(iobase, 0);
107 i = in_8(iobase + (reg<<4));
108 CTRL(iobase, 0x40);
109
110 return i;
111} 87}
112 88
113static void macscsi_write(struct Scsi_Host *instance, int reg, int value) 89static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
114{
115 int iobase = instance->io_port;
116 int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
117
118 CTRL(iobase, 0);
119 out_8(iobase + (reg<<4), value);
120 CTRL(iobase, 0x40);
121}
122#else
123
124/* Fast versions */
125static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg)
126{ 90{
127 return in_8(instance->io_port + (reg<<4)); 91 out_8(instance->base + (reg << 4), value);
128} 92}
129 93
130static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value) 94#ifndef MODULE
95static int __init mac_scsi_setup(char *str)
131{ 96{
132 out_8(instance->io_port + (reg<<4), value); 97 int ints[7];
133}
134#endif
135
136 98
137/* 99 (void)get_options(str, ARRAY_SIZE(ints), ints);
138 * Function : mac_scsi_setup(char *str)
139 *
140 * Purpose : booter command line initialization of the overrides array,
141 *
142 * Inputs : str - comma delimited list of options
143 *
144 */
145 100
146static int __init mac_scsi_setup(char *str) { 101 if (ints[0] < 1 || ints[0] > 6) {
147#ifdef DRIVER_SETUP 102 pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>]]]]]\n");
148 int ints[7]; 103 return 0;
149
150 (void)get_options( str, ARRAY_SIZE(ints), ints);
151
152 if (setup_called++ || ints[0] < 1 || ints[0] > 6) {
153 printk(KERN_WARNING "scsi: <mac5380>"
154 " Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n");
155 printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n");
156 return 0;
157 }
158
159 if (ints[0] >= 1) {
160 if (ints[1] > 0)
161 /* no limits on this, just > 0 */
162 setup_can_queue = ints[1];
163 }
164 if (ints[0] >= 2) {
165 if (ints[2] > 0)
166 setup_cmd_per_lun = ints[2];
167 }
168 if (ints[0] >= 3) {
169 if (ints[3] >= 0) {
170 setup_sg_tablesize = ints[3];
171 /* Must be <= SG_ALL (255) */
172 if (setup_sg_tablesize > SG_ALL)
173 setup_sg_tablesize = SG_ALL;
174 }
175 }
176 if (ints[0] >= 4) {
177 /* Must be between 0 and 7 */
178 if (ints[4] >= 0 && ints[4] <= 7)
179 setup_hostid = ints[4];
180 else if (ints[4] > 7)
181 printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] );
182 }
183#ifdef SUPPORT_TAGS
184 if (ints[0] >= 5) {
185 if (ints[5] >= 0)
186 setup_use_tagged_queuing = !!ints[5];
187 } 104 }
188 105 if (ints[0] >= 1)
189 if (ints[0] == 6) { 106 setup_can_queue = ints[1];
190 if (ints[6] >= 0) 107 if (ints[0] >= 2)
108 setup_cmd_per_lun = ints[2];
109 if (ints[0] >= 3)
110 setup_sg_tablesize = ints[3];
111 if (ints[0] >= 4)
112 setup_hostid = ints[4];
113 if (ints[0] >= 5)
114 setup_use_tagged_queuing = ints[5];
115 if (ints[0] >= 6)
191 setup_use_pdma = ints[6]; 116 setup_use_pdma = ints[6];
192 }
193#else
194 if (ints[0] == 5) {
195 if (ints[5] >= 0)
196 setup_use_pdma = ints[5];
197 }
198#endif /* SUPPORT_TAGS */
199
200#endif /* DRIVER_SETUP */
201 return 1; 117 return 1;
202} 118}
203 119
204__setup("mac5380=", mac_scsi_setup); 120__setup("mac5380=", mac_scsi_setup);
205 121#endif /* !MODULE */
206/*
207 * Function : int macscsi_detect(struct scsi_host_template * tpnt)
208 *
209 * Purpose : initializes mac NCR5380 driver based on the
210 * command line / compile time port and irq definitions.
211 *
212 * Inputs : tpnt - template for this SCSI adapter.
213 *
214 * Returns : 1 if a host adapter was found, 0 if not.
215 *
216 */
217
218int __init macscsi_detect(struct scsi_host_template * tpnt)
219{
220 static int called = 0;
221 int flags = 0;
222 struct Scsi_Host *instance;
223
224 if (!MACH_IS_MAC || called)
225 return( 0 );
226
227 if (macintosh_config->scsi_type != MAC_SCSI_OLD)
228 return( 0 );
229
230 /* setup variables */
231 tpnt->can_queue =
232 (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
233 tpnt->cmd_per_lun =
234 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
235 tpnt->sg_tablesize =
236 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
237
238 if (setup_hostid >= 0)
239 tpnt->this_id = setup_hostid;
240 else {
241 /* use 7 as default */
242 tpnt->this_id = 7;
243 }
244
245#ifdef SUPPORT_TAGS
246 if (setup_use_tagged_queuing < 0)
247 setup_use_tagged_queuing = USE_TAGGED_QUEUING;
248#endif
249
250 /* Once we support multiple 5380s (e.g. DuoDock) we'll do
251 something different here */
252 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
253 if (instance == NULL)
254 return 0;
255
256 if (macintosh_config->ident == MAC_MODEL_IIFX) {
257 mac_scsi_regp = via1+0x8000;
258 mac_scsi_drq = via1+0xE000;
259 mac_scsi_nodrq = via1+0xC000;
260 /* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */
261 flags = FLAG_NO_PSEUDO_DMA;
262 } else {
263 mac_scsi_regp = via1+0x10000;
264 mac_scsi_drq = via1+0x6000;
265 mac_scsi_nodrq = via1+0x12000;
266 }
267
268 if (! setup_use_pdma)
269 flags = FLAG_NO_PSEUDO_DMA;
270
271 instance->io_port = (unsigned long) mac_scsi_regp;
272 instance->irq = IRQ_MAC_SCSI;
273
274#ifdef RESET_BOOT
275 mac_scsi_reset_boot(instance);
276#endif
277
278 NCR5380_init(instance, flags);
279
280 instance->n_io_port = 255;
281
282 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
283
284 if (instance->irq != SCSI_IRQ_NONE)
285 if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
286 printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
287 instance->host_no, instance->irq);
288 instance->irq = SCSI_IRQ_NONE;
289 }
290
291 printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port);
292 if (instance->irq == SCSI_IRQ_NONE)
293 printk (KERN_INFO "s disabled");
294 else
295 printk (KERN_INFO " %d", instance->irq);
296 printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
297 instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE);
298 printk(KERN_INFO "\nscsi%d:", instance->host_no);
299 NCR5380_print_options(instance);
300 printk("\n");
301 called = 1;
302 return 1;
303}
304
305int macscsi_release (struct Scsi_Host *shpnt)
306{
307 if (shpnt->irq != SCSI_IRQ_NONE)
308 free_irq(shpnt->irq, shpnt);
309 NCR5380_exit(shpnt);
310
311 return 0;
312}
313 122
314#ifdef RESET_BOOT 123#ifdef RESET_BOOT
315/* 124/*
@@ -349,10 +158,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
349} 158}
350#endif 159#endif
351 160
352const char * macscsi_info (struct Scsi_Host *spnt) { 161#ifdef PSEUDO_DMA
353 return "";
354}
355
356/* 162/*
357 Pseudo-DMA: (Ove Edlund) 163 Pseudo-DMA: (Ove Edlund)
358 The code attempts to catch bus errors that occur if one for example 164 The code attempts to catch bus errors that occur if one for example
@@ -422,38 +228,39 @@ __asm__ __volatile__ \
422 : "0"(s), "1"(d), "2"(len) \ 228 : "0"(s), "1"(d), "2"(len) \
423 : "d0") 229 : "d0")
424 230
425 231static int macscsi_pread(struct Scsi_Host *instance,
426static int macscsi_pread (struct Scsi_Host *instance, 232 unsigned char *dst, int len)
427 unsigned char *dst, int len)
428{ 233{
429 unsigned char *d; 234 struct NCR5380_hostdata *hostdata = shost_priv(instance);
430 volatile unsigned char *s; 235 unsigned char *d;
431 236 unsigned char *s;
432 NCR5380_local_declare(); 237
433 NCR5380_setup(instance); 238 NCR5380_local_declare();
434 239 NCR5380_setup(instance);
435 s = mac_scsi_drq+0x60; 240
436 d = dst; 241 s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
437 242 d = dst;
438/* These conditions are derived from MacOS */ 243
439 244 /* These conditions are derived from MacOS */
440 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 245
441 && !(NCR5380_read(STATUS_REG) & SR_REQ)) 246 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
442 ; 247 !(NCR5380_read(STATUS_REG) & SR_REQ))
443 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 248 ;
444 && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { 249
445 printk(KERN_ERR "Error in macscsi_pread\n"); 250 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
446 return -1; 251 (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
447 } 252 pr_err("Error in macscsi_pread\n");
448 253 return -1;
449 CP_IO_TO_MEM(s, d, len); 254 }
450 255
451 if (len != 0) { 256 CP_IO_TO_MEM(s, d, len);
452 printk(KERN_NOTICE "Bus error in macscsi_pread\n"); 257
453 return -1; 258 if (len != 0) {
454 } 259 pr_notice("Bus error in macscsi_pread\n");
455 260 return -1;
456 return 0; 261 }
262
263 return 0;
457} 264}
458 265
459 266
@@ -515,59 +322,172 @@ __asm__ __volatile__ \
515 : "0"(s), "1"(d), "2"(len) \ 322 : "0"(s), "1"(d), "2"(len) \
516 : "d0") 323 : "d0")
517 324
518static int macscsi_pwrite (struct Scsi_Host *instance, 325static int macscsi_pwrite(struct Scsi_Host *instance,
519 unsigned char *src, int len) 326 unsigned char *src, int len)
520{ 327{
521 unsigned char *s; 328 struct NCR5380_hostdata *hostdata = shost_priv(instance);
522 volatile unsigned char *d; 329 unsigned char *s;
523 330 unsigned char *d;
524 NCR5380_local_declare(); 331
525 NCR5380_setup(instance); 332 NCR5380_local_declare();
526 333 NCR5380_setup(instance);
527 s = src; 334
528 d = mac_scsi_drq; 335 s = src;
529 336 d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
530/* These conditions are derived from MacOS */ 337
531 338 /* These conditions are derived from MacOS */
532 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) 339
533 && (!(NCR5380_read(STATUS_REG) & SR_REQ) 340 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
534 || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) 341 (!(NCR5380_read(STATUS_REG) & SR_REQ) ||
535 ; 342 (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
536 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { 343 ;
537 printk(KERN_ERR "Error in macscsi_pwrite\n"); 344
538 return -1; 345 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
539 } 346 pr_err("Error in macscsi_pwrite\n");
540 347 return -1;
541 CP_MEM_TO_IO(s, d, len); 348 }
542 349
543 if (len != 0) { 350 CP_MEM_TO_IO(s, d, len);
544 printk(KERN_NOTICE "Bus error in macscsi_pwrite\n"); 351
545 return -1; 352 if (len != 0) {
546 } 353 pr_notice("Bus error in macscsi_pwrite\n");
547 354 return -1;
548 return 0; 355 }
549}
550 356
357 return 0;
358}
359#endif
551 360
552#include "NCR5380.c" 361#include "NCR5380.c"
553 362
554static struct scsi_host_template driver_template = { 363#define DRV_MODULE_NAME "mac_scsi"
555 .proc_name = "Mac5380", 364#define PFX DRV_MODULE_NAME ": "
365
366static struct scsi_host_template mac_scsi_template = {
367 .module = THIS_MODULE,
368 .proc_name = DRV_MODULE_NAME,
556 .show_info = macscsi_show_info, 369 .show_info = macscsi_show_info,
557 .write_info = macscsi_write_info, 370 .write_info = macscsi_write_info,
558 .name = "Macintosh NCR5380 SCSI", 371 .name = "Macintosh NCR5380 SCSI",
559 .detect = macscsi_detect,
560 .release = macscsi_release,
561 .info = macscsi_info, 372 .info = macscsi_info,
562 .queuecommand = macscsi_queue_command, 373 .queuecommand = macscsi_queue_command,
563 .eh_abort_handler = macscsi_abort, 374 .eh_abort_handler = macscsi_abort,
564 .eh_bus_reset_handler = macscsi_bus_reset, 375 .eh_bus_reset_handler = macscsi_bus_reset,
565 .can_queue = CAN_QUEUE, 376 .can_queue = 16,
566 .this_id = 7, 377 .this_id = 7,
567 .sg_tablesize = SG_ALL, 378 .sg_tablesize = SG_ALL,
568 .cmd_per_lun = CMD_PER_LUN, 379 .cmd_per_lun = 2,
569 .use_clustering = DISABLE_CLUSTERING 380 .use_clustering = DISABLE_CLUSTERING
570}; 381};
571 382
383static int __init mac_scsi_probe(struct platform_device *pdev)
384{
385 struct Scsi_Host *instance;
386 int error;
387 int host_flags = 0;
388 struct resource *irq, *pio_mem, *pdma_mem = NULL;
389
390 pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
391 if (!pio_mem)
392 return -ENODEV;
393
394#ifdef PSEUDO_DMA
395 pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
396#endif
397
398 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
399
400 if (!hwreg_present((unsigned char *)pio_mem->start +
401 (STATUS_REG << 4))) {
402 pr_info(PFX "no device detected at %pap\n", &pio_mem->start);
403 return -ENODEV;
404 }
405
406 if (setup_can_queue > 0)
407 mac_scsi_template.can_queue = setup_can_queue;
408 if (setup_cmd_per_lun > 0)
409 mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
410 if (setup_sg_tablesize >= 0)
411 mac_scsi_template.sg_tablesize = setup_sg_tablesize;
412 if (setup_hostid >= 0)
413 mac_scsi_template.this_id = setup_hostid & 7;
414 if (setup_use_pdma < 0)
415 setup_use_pdma = 0;
416
417 instance = scsi_host_alloc(&mac_scsi_template,
418 sizeof(struct NCR5380_hostdata));
419 if (!instance)
420 return -ENOMEM;
421
422 instance->base = pio_mem->start;
423 if (irq)
424 instance->irq = irq->start;
425 else
426 instance->irq = NO_IRQ;
427
428 if (pdma_mem && setup_use_pdma) {
429 struct NCR5380_hostdata *hostdata = shost_priv(instance);
430
431 hostdata->pdma_base = (unsigned char *)pdma_mem->start;
432 } else
433 host_flags |= FLAG_NO_PSEUDO_DMA;
434
435#ifdef RESET_BOOT
436 mac_scsi_reset_boot(instance);
437#endif
438
439#ifdef SUPPORT_TAGS
440 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
441#endif
442
443 NCR5380_init(instance, host_flags);
444
445 if (instance->irq != NO_IRQ) {
446 error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED,
447 "NCR5380", instance);
448 if (error)
449 goto fail_irq;
450 }
451
452 error = scsi_add_host(instance, NULL);
453 if (error)
454 goto fail_host;
455
456 platform_set_drvdata(pdev, instance);
457
458 scsi_scan_host(instance);
459 return 0;
460
461fail_host:
462 if (instance->irq != NO_IRQ)
463 free_irq(instance->irq, instance);
464fail_irq:
465 NCR5380_exit(instance);
466 scsi_host_put(instance);
467 return error;
468}
469
470static int __exit mac_scsi_remove(struct platform_device *pdev)
471{
472 struct Scsi_Host *instance = platform_get_drvdata(pdev);
473
474 scsi_remove_host(instance);
475 if (instance->irq != NO_IRQ)
476 free_irq(instance->irq, instance);
477 NCR5380_exit(instance);
478 scsi_host_put(instance);
479 return 0;
480}
481
482static struct platform_driver mac_scsi_driver = {
483 .remove = __exit_p(mac_scsi_remove),
484 .driver = {
485 .name = DRV_MODULE_NAME,
486 .owner = THIS_MODULE,
487 },
488};
489
490module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe);
572 491
573#include "scsi_module.c" 492MODULE_ALIAS("platform:" DRV_MODULE_NAME);
493MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h
deleted file mode 100644
index 06969b06e54b..000000000000
--- a/drivers/scsi/mac_scsi.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * Cumana Generic NCR5380 driver defines
3 *
4 * Copyright 1993, Drew Eckhardt
5 * Visionary Computing
6 * (Unix and Linux consulting and custom programming)
7 * drew@colorado.edu
8 * +1 (303) 440-4894
9 *
10 * ALPHA RELEASE 1.
11 *
12 * For more information, please consult
13 *
14 * NCR 5380 Family
15 * SCSI Protocol Controller
16 * Databook
17 *
18 * NCR Microelectronics
19 * 1635 Aeroplaza Drive
20 * Colorado Springs, CO 80916
21 * 1+ (719) 578-3400
22 * 1+ (800) 334-5454
23 */
24
25#ifndef MAC_NCR5380_H
26#define MAC_NCR5380_H
27
28#define MACSCSI_PUBLIC_RELEASE 2
29
30#ifndef ASM
31
32#ifndef CMD_PER_LUN
33#define CMD_PER_LUN 2
34#endif
35
36#ifndef CAN_QUEUE
37#define CAN_QUEUE 16
38#endif
39
40#ifndef SG_TABLESIZE
41#define SG_TABLESIZE SG_NONE
42#endif
43
44#ifndef USE_TAGGED_QUEUING
45#define USE_TAGGED_QUEUING 0
46#endif
47
48#include <scsi/scsicam.h>
49
50#define NCR5380_implementation_fields \
51 int port, ctrl
52
53#define NCR5380_local_declare() \
54 struct Scsi_Host *_instance
55
56#define NCR5380_setup(instance) \
57 _instance = instance
58
59#define NCR5380_read(reg) macscsi_read(_instance, reg)
60#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value)
61
62#define NCR5380_pread macscsi_pread
63#define NCR5380_pwrite macscsi_pwrite
64
65#define NCR5380_intr macscsi_intr
66#define NCR5380_queue_command macscsi_queue_command
67#define NCR5380_abort macscsi_abort
68#define NCR5380_bus_reset macscsi_bus_reset
69#define NCR5380_show_info macscsi_show_info
70#define NCR5380_write_info macscsi_write_info
71
72#endif /* ndef ASM */
73#endif /* MAC_NCR5380_H */
74
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ac5d94cfd52f..2485255f3414 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1945,7 +1945,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1945 cmd->device->id, (u32)cmd->device->lun); 1945 cmd->device->id, (u32)cmd->device->lun);
1946 1946
1947 if(list_empty(&adapter->pending_list)) 1947 if(list_empty(&adapter->pending_list))
1948 return FALSE; 1948 return FAILED;
1949 1949
1950 list_for_each_safe(pos, next, &adapter->pending_list) { 1950 list_for_each_safe(pos, next, &adapter->pending_list) {
1951 1951
@@ -1968,7 +1968,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1968 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1968 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1969 scb->idx); 1969 scb->idx);
1970 1970
1971 return FALSE; 1971 return FAILED;
1972 } 1972 }
1973 else { 1973 else {
1974 1974
@@ -1993,12 +1993,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1993 list_add_tail(SCSI_LIST(cmd), 1993 list_add_tail(SCSI_LIST(cmd),
1994 &adapter->completed_list); 1994 &adapter->completed_list);
1995 1995
1996 return TRUE; 1996 return SUCCESS;
1997 } 1997 }
1998 } 1998 }
1999 } 1999 }
2000 2000
2001 return FALSE; 2001 return FAILED;
2002} 2002}
2003 2003
2004static inline int 2004static inline int
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 531dce419c18..f0987f22ea70 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -332,27 +332,6 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
332 NULL, 332 NULL,
333}; 333};
334 334
335/**
336 * megaraid_change_queue_depth - Change the device's queue depth
337 * @sdev: scsi device struct
338 * @qdepth: depth to set
339 * @reason: calling context
340 *
341 * Return value:
342 * actual depth set
343 */
344static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
345 int reason)
346{
347 if (reason != SCSI_QDEPTH_DEFAULT)
348 return -EOPNOTSUPP;
349
350 if (qdepth > MBOX_MAX_SCSI_CMDS)
351 qdepth = MBOX_MAX_SCSI_CMDS;
352 scsi_adjust_queue_depth(sdev, 0, qdepth);
353 return sdev->queue_depth;
354}
355
356/* 335/*
357 * Scsi host template for megaraid unified driver 336 * Scsi host template for megaraid unified driver
358 */ 337 */
@@ -365,7 +344,7 @@ static struct scsi_host_template megaraid_template_g = {
365 .eh_device_reset_handler = megaraid_reset_handler, 344 .eh_device_reset_handler = megaraid_reset_handler,
366 .eh_bus_reset_handler = megaraid_reset_handler, 345 .eh_bus_reset_handler = megaraid_reset_handler,
367 .eh_host_reset_handler = megaraid_reset_handler, 346 .eh_host_reset_handler = megaraid_reset_handler,
368 .change_queue_depth = megaraid_change_queue_depth, 347 .change_queue_depth = scsi_change_queue_depth,
369 .use_clustering = ENABLE_CLUSTERING, 348 .use_clustering = ENABLE_CLUSTERING,
370 .no_write_same = 1, 349 .no_write_same = 1,
371 .sdev_attrs = megaraid_sdev_attrs, 350 .sdev_attrs = megaraid_sdev_attrs,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a49914de4b95..0d44d91c2fce 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2012 LSI Corporation. 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,17 +15,18 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas.h 20 * FILE: megaraid_sas.h
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Kashyap Desai <kashyap.desai@avagotech.com>
24 * Sumit Saxena <sumit.saxena@avagotech.com>
23 * 25 *
24 * Send feedback to: <megaraidlinux@lsi.com> 26 * Send feedback to: megaraidlinux.pdl@avagotech.com
25 * 27 *
26 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 28 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
27 * ATTN: Linuxraid 29 * San Jose, California 95131
28 */ 30 */
29 31
30#ifndef LSI_MEGARAID_SAS_H 32#ifndef LSI_MEGARAID_SAS_H
@@ -33,9 +35,7 @@
33/* 35/*
34 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
35 */ 37 */
36#define MEGASAS_VERSION "06.805.06.00-rc1" 38#define MEGASAS_VERSION "06.805.06.01-rc1"
37#define MEGASAS_RELDATE "Sep. 4, 2014"
38#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -1931,8 +1931,7 @@ u16 get_updated_dev_handle(struct megasas_instance *instance,
1931 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); 1931 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
1932void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, 1932void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
1933 struct LD_LOAD_BALANCE_INFO *lbInfo); 1933 struct LD_LOAD_BALANCE_INFO *lbInfo);
1934int megasas_get_ctrl_info(struct megasas_instance *instance, 1934int megasas_get_ctrl_info(struct megasas_instance *instance);
1935 struct megasas_ctrl_info *ctrl_info);
1936int megasas_set_crash_dump_params(struct megasas_instance *instance, 1935int megasas_set_crash_dump_params(struct megasas_instance *instance,
1937 u8 crash_buf_state); 1936 u8 crash_buf_state);
1938void megasas_free_host_crash_buffer(struct megasas_instance *instance); 1937void megasas_free_host_crash_buffer(struct megasas_instance *instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5640ad1c8214..f05580e693d0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2012 LSI Corporation. 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,22 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * Authors: Avago Technologies
21 * Version : 06.805.06.00-rc1
22 *
23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 21 * Sreenivas Bagalkote
25 * Sumant Patro 22 * Sumant Patro
26 * Bo Yang 23 * Bo Yang
27 * Adam Radford <linuxraid@lsi.com> 24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
28 * 27 *
29 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
30 * 29 *
31 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
32 * ATTN: Linuxraid 31 * San Jose, California 95131
33 */ 32 */
34 33
35#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -1008,7 +1007,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1008 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1007 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1009 1008
1010 cmd->sync_cmd = 1; 1009 cmd->sync_cmd = 1;
1011 cmd->cmd_status = 0xFF; 1010 cmd->cmd_status = ENODATA;
1012 1011
1013 instance->instancet->issue_dcmd(instance, cmd); 1012 instance->instancet->issue_dcmd(instance, cmd);
1014 1013
@@ -1572,6 +1571,12 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1572 instance = (struct megasas_instance *) 1571 instance = (struct megasas_instance *)
1573 scmd->device->host->hostdata; 1572 scmd->device->host->hostdata;
1574 1573
1574 if (instance->unload == 1) {
1575 scmd->result = DID_NO_CONNECT << 16;
1576 scmd->scsi_done(scmd);
1577 return 0;
1578 }
1579
1575 if (instance->issuepend_done == 0) 1580 if (instance->issuepend_done == 0)
1576 return SCSI_MLQUEUE_HOST_BUSY; 1581 return SCSI_MLQUEUE_HOST_BUSY;
1577 1582
@@ -2586,20 +2591,6 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2586 } 2591 }
2587} 2592}
2588 2593
2589static int megasas_change_queue_depth(struct scsi_device *sdev,
2590 int queue_depth, int reason)
2591{
2592 if (reason != SCSI_QDEPTH_DEFAULT)
2593 return -EOPNOTSUPP;
2594
2595 if (queue_depth > sdev->host->can_queue)
2596 queue_depth = sdev->host->can_queue;
2597 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2598 queue_depth);
2599
2600 return queue_depth;
2601}
2602
2603static ssize_t 2594static ssize_t
2604megasas_fw_crash_buffer_store(struct device *cdev, 2595megasas_fw_crash_buffer_store(struct device *cdev,
2605 struct device_attribute *attr, const char *buf, size_t count) 2596 struct device_attribute *attr, const char *buf, size_t count)
@@ -2764,7 +2755,7 @@ static struct scsi_host_template megasas_template = {
2764 .shost_attrs = megaraid_host_attrs, 2755 .shost_attrs = megaraid_host_attrs,
2765 .bios_param = megasas_bios_param, 2756 .bios_param = megasas_bios_param,
2766 .use_clustering = ENABLE_CLUSTERING, 2757 .use_clustering = ENABLE_CLUSTERING,
2767 .change_queue_depth = megasas_change_queue_depth, 2758 .change_queue_depth = scsi_change_queue_depth,
2768 .no_write_same = 1, 2759 .no_write_same = 1,
2769}; 2760};
2770 2761
@@ -4028,25 +4019,83 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4028 return ret; 4019 return ret;
4029} 4020}
4030 4021
4022/*
4023 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4024 * instance : Controller's instance
4025*/
4026static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4027{
4028 struct fusion_context *fusion;
4029 u32 old_map_sz;
4030 u32 new_map_sz;
4031
4032 fusion = instance->ctrl_context;
4033 /* For MFI based controllers return dummy success */
4034 if (!fusion)
4035 return;
4036
4037 instance->supportmax256vd =
4038 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4039 /* Below is additional check to address future FW enhancement */
4040 if (instance->ctrl_info->max_lds > 64)
4041 instance->supportmax256vd = 1;
4042
4043 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4044 * MEGASAS_MAX_DEV_PER_CHANNEL;
4045 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4046 * MEGASAS_MAX_DEV_PER_CHANNEL;
4047 if (instance->supportmax256vd) {
4048 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4049 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4050 } else {
4051 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4052 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4053 }
4054 dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n",
4055 instance->fw_supported_vd_count,
4056 instance->fw_supported_pd_count);
4057 dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n",
4058 instance->drv_supported_vd_count,
4059 instance->drv_supported_pd_count);
4060
4061 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4062 (sizeof(struct MR_LD_SPAN_MAP) *
4063 (instance->fw_supported_vd_count - 1));
4064 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4065 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4066 (sizeof(struct MR_LD_SPAN_MAP) *
4067 (instance->drv_supported_vd_count - 1));
4068
4069 fusion->max_map_sz = max(old_map_sz, new_map_sz);
4070
4071
4072 if (instance->supportmax256vd)
4073 fusion->current_map_sz = new_map_sz;
4074 else
4075 fusion->current_map_sz = old_map_sz;
4076
4077}
4078
4031/** 4079/**
4032 * megasas_get_controller_info - Returns FW's controller structure 4080 * megasas_get_controller_info - Returns FW's controller structure
4033 * @instance: Adapter soft state 4081 * @instance: Adapter soft state
4034 * @ctrl_info: Controller information structure
4035 * 4082 *
4036 * Issues an internal command (DCMD) to get the FW's controller structure. 4083 * Issues an internal command (DCMD) to get the FW's controller structure.
4037 * This information is mainly used to find out the maximum IO transfer per 4084 * This information is mainly used to find out the maximum IO transfer per
4038 * command supported by the FW. 4085 * command supported by the FW.
4039 */ 4086 */
4040int 4087int
4041megasas_get_ctrl_info(struct megasas_instance *instance, 4088megasas_get_ctrl_info(struct megasas_instance *instance)
4042 struct megasas_ctrl_info *ctrl_info)
4043{ 4089{
4044 int ret = 0; 4090 int ret = 0;
4045 struct megasas_cmd *cmd; 4091 struct megasas_cmd *cmd;
4046 struct megasas_dcmd_frame *dcmd; 4092 struct megasas_dcmd_frame *dcmd;
4047 struct megasas_ctrl_info *ci; 4093 struct megasas_ctrl_info *ci;
4094 struct megasas_ctrl_info *ctrl_info;
4048 dma_addr_t ci_h = 0; 4095 dma_addr_t ci_h = 0;
4049 4096
4097 ctrl_info = instance->ctrl_info;
4098
4050 cmd = megasas_get_cmd(instance); 4099 cmd = megasas_get_cmd(instance);
4051 4100
4052 if (!cmd) { 4101 if (!cmd) {
@@ -4086,8 +4135,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
4086 else 4135 else
4087 ret = megasas_issue_polled(instance, cmd); 4136 ret = megasas_issue_polled(instance, cmd);
4088 4137
4089 if (!ret) 4138 if (!ret) {
4090 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4139 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4140 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4141 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4142 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4143 megasas_update_ext_vd_details(instance);
4144 }
4091 4145
4092 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4146 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4093 ci, ci_h); 4147 ci, ci_h);
@@ -4289,7 +4343,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4289 if (megasas_issue_init_mfi(instance)) 4343 if (megasas_issue_init_mfi(instance))
4290 goto fail_fw_init; 4344 goto fail_fw_init;
4291 4345
4292 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { 4346 if (megasas_get_ctrl_info(instance)) {
4293 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4347 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4294 "Fail from %s %d\n", instance->unique_id, 4348 "Fail from %s %d\n", instance->unique_id,
4295 __func__, __LINE__); 4349 __func__, __LINE__);
@@ -4527,12 +4581,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
4527 dev_info(&instance->pdev->dev, 4581 dev_info(&instance->pdev->dev,
4528 "Controller type: iMR\n"); 4582 "Controller type: iMR\n");
4529 } 4583 }
4530 /* OnOffProperties are converted into CPU arch*/
4531 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4532 instance->disableOnlineCtrlReset = 4584 instance->disableOnlineCtrlReset =
4533 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4585 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4534 /* adapterOperations2 are converted into CPU arch*/
4535 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4536 instance->mpio = ctrl_info->adapterOperations2.mpio; 4586 instance->mpio = ctrl_info->adapterOperations2.mpio;
4537 instance->UnevenSpanSupport = 4587 instance->UnevenSpanSupport =
4538 ctrl_info->adapterOperations2.supportUnevenSpans; 4588 ctrl_info->adapterOperations2.supportUnevenSpans;
@@ -4562,7 +4612,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4562 "requestorId %d\n", instance->requestorId); 4612 "requestorId %d\n", instance->requestorId);
4563 } 4613 }
4564 4614
4565 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4566 instance->crash_dump_fw_support = 4615 instance->crash_dump_fw_support =
4567 ctrl_info->adapterOperations3.supportCrashDump; 4616 ctrl_info->adapterOperations3.supportCrashDump;
4568 instance->crash_dump_drv_support = 4617 instance->crash_dump_drv_support =
@@ -4587,8 +4636,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4587 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 4636 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
4588 instance->max_sectors_per_req = tmp_sectors; 4637 instance->max_sectors_per_req = tmp_sectors;
4589 4638
4590 kfree(ctrl_info);
4591
4592 /* Check for valid throttlequeuedepth module parameter */ 4639 /* Check for valid throttlequeuedepth module parameter */
4593 if (instance->is_imr) { 4640 if (instance->is_imr) {
4594 if (throttlequeuedepth > (instance->max_fw_cmds - 4641 if (throttlequeuedepth > (instance->max_fw_cmds -
@@ -4957,10 +5004,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
4957 return -ENODEV; 5004 return -ENODEV;
4958 } 5005 }
4959 5006
4960 /*
4961 * Trigger SCSI to scan our drives
4962 */
4963 scsi_scan_host(host);
4964 return 0; 5007 return 0;
4965} 5008}
4966 5009
@@ -5083,6 +5126,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
5083 goto fail_alloc_dma_buf; 5126 goto fail_alloc_dma_buf;
5084 } 5127 }
5085 fusion = instance->ctrl_context; 5128 fusion = instance->ctrl_context;
5129 memset(fusion, 0,
5130 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5086 INIT_LIST_HEAD(&fusion->cmd_pool); 5131 INIT_LIST_HEAD(&fusion->cmd_pool);
5087 spin_lock_init(&fusion->mpt_pool_lock); 5132 spin_lock_init(&fusion->mpt_pool_lock);
5088 memset(fusion->load_balance_info, 0, 5133 memset(fusion->load_balance_info, 0,
@@ -5288,6 +5333,10 @@ retry_irq_register:
5288 goto fail_io_attach; 5333 goto fail_io_attach;
5289 5334
5290 instance->unload = 0; 5335 instance->unload = 0;
5336 /*
5337 * Trigger SCSI to scan our drives
5338 */
5339 scsi_scan_host(host);
5291 5340
5292 /* 5341 /*
5293 * Initiate AEN (Asynchronous Event Notification) 5342 * Initiate AEN (Asynchronous Event Notification)
@@ -6051,6 +6100,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6051 megasas_issue_blocked_cmd(instance, cmd, 0); 6100 megasas_issue_blocked_cmd(instance, cmd, 0);
6052 cmd->sync_cmd = 0; 6101 cmd->sync_cmd = 0;
6053 6102
6103 if (instance->unload == 1) {
6104 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6105 "don't submit data to application\n");
6106 goto out;
6107 }
6054 /* 6108 /*
6055 * copy out the kernel buffers to user buffers 6109 * copy out the kernel buffers to user buffers
6056 */ 6110 */
@@ -6400,16 +6454,6 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6400static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 6454static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
6401 6455
6402static ssize_t 6456static ssize_t
6403megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
6404{
6405 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
6406 MEGASAS_RELDATE);
6407}
6408
6409static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
6410 NULL);
6411
6412static ssize_t
6413megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 6457megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
6414{ 6458{
6415 return sprintf(buf, "%u\n", support_poll_for_event); 6459 return sprintf(buf, "%u\n", support_poll_for_event);
@@ -6712,8 +6756,7 @@ static int __init megasas_init(void)
6712 /* 6756 /*
6713 * Announce driver version and other information 6757 * Announce driver version and other information
6714 */ 6758 */
6715 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, 6759 pr_info("megasas: %s\n", MEGASAS_VERSION);
6716 MEGASAS_EXT_VERSION);
6717 6760
6718 spin_lock_init(&poll_aen_lock); 6761 spin_lock_init(&poll_aen_lock);
6719 6762
@@ -6748,10 +6791,6 @@ static int __init megasas_init(void)
6748 &driver_attr_version); 6791 &driver_attr_version);
6749 if (rval) 6792 if (rval)
6750 goto err_dcf_attr_ver; 6793 goto err_dcf_attr_ver;
6751 rval = driver_create_file(&megasas_pci_driver.driver,
6752 &driver_attr_release_date);
6753 if (rval)
6754 goto err_dcf_rel_date;
6755 6794
6756 rval = driver_create_file(&megasas_pci_driver.driver, 6795 rval = driver_create_file(&megasas_pci_driver.driver,
6757 &driver_attr_support_poll_for_event); 6796 &driver_attr_support_poll_for_event);
@@ -6775,12 +6814,7 @@ err_dcf_support_device_change:
6775err_dcf_dbg_lvl: 6814err_dcf_dbg_lvl:
6776 driver_remove_file(&megasas_pci_driver.driver, 6815 driver_remove_file(&megasas_pci_driver.driver,
6777 &driver_attr_support_poll_for_event); 6816 &driver_attr_support_poll_for_event);
6778
6779err_dcf_support_poll_for_event: 6817err_dcf_support_poll_for_event:
6780 driver_remove_file(&megasas_pci_driver.driver,
6781 &driver_attr_release_date);
6782
6783err_dcf_rel_date:
6784 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6818 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6785err_dcf_attr_ver: 6819err_dcf_attr_ver:
6786 pci_unregister_driver(&megasas_pci_driver); 6820 pci_unregister_driver(&megasas_pci_driver);
@@ -6800,8 +6834,6 @@ static void __exit megasas_exit(void)
6800 &driver_attr_support_poll_for_event); 6834 &driver_attr_support_poll_for_event);
6801 driver_remove_file(&megasas_pci_driver.driver, 6835 driver_remove_file(&megasas_pci_driver.driver,
6802 &driver_attr_support_device_change); 6836 &driver_attr_support_device_change);
6803 driver_remove_file(&megasas_pci_driver.driver,
6804 &driver_attr_release_date);
6805 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 6837 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
6806 6838
6807 pci_unregister_driver(&megasas_pci_driver); 6839 pci_unregister_driver(&megasas_pci_driver);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 685e6f391fe4..460c6a3d4ade 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,20 +15,21 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fp.c 20 * FILE: megaraid_sas_fp.c
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Sumant Patro 23 * Sumant Patro
24 * Varad Talamacki 24 * Varad Talamacki
25 * Manoj Jose 25 * Manoj Jose
26 * Kashyap Desai <kashyap.desai@avagotech.com>
27 * Sumit Saxena <sumit.saxena@avagotech.com>
26 * 28 *
27 * Send feedback to: <megaraidlinux@lsi.com> 29 * Send feedback to: megaraidlinux.pdl@avagotech.com
28 * 30 *
29 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 31 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
30 * ATTN: Linuxraid 32 * San Jose, California 95131
31 */ 33 */
32 34
33#include <linux/kernel.h> 35#include <linux/kernel.h>
@@ -183,14 +185,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
183 /* New Raid map will not set totalSize, so keep expected value 185 /* New Raid map will not set totalSize, so keep expected value
184 * for legacy code in ValidateMapInfo 186 * for legacy code in ValidateMapInfo
185 */ 187 */
186 pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT); 188 pDrvRaidMap->totalSize =
189 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
187 } else { 190 } else {
188 fw_map_old = (struct MR_FW_RAID_MAP_ALL *) 191 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
189 fusion->ld_map[(instance->map_id & 1)]; 192 fusion->ld_map[(instance->map_id & 1)];
190 pFwRaidMap = &fw_map_old->raidMap; 193 pFwRaidMap = &fw_map_old->raidMap;
191 194
192#if VD_EXT_DEBUG 195#if VD_EXT_DEBUG
193 for (i = 0; i < pFwRaidMap->ldCount; i++) { 196 for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
194 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " 197 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
195 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", 198 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
196 instance->unique_id, i, 199 instance->unique_id, i,
@@ -202,12 +205,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
202 205
203 memset(drv_map, 0, fusion->drv_map_sz); 206 memset(drv_map, 0, fusion->drv_map_sz);
204 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 207 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
205 pDrvRaidMap->ldCount = pFwRaidMap->ldCount; 208 pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
206 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 209 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
207 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) 210 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
208 pDrvRaidMap->ldTgtIdToLd[i] = 211 pDrvRaidMap->ldTgtIdToLd[i] =
209 (u8)pFwRaidMap->ldTgtIdToLd[i]; 212 (u8)pFwRaidMap->ldTgtIdToLd[i];
210 for (i = 0; i < pDrvRaidMap->ldCount; i++) { 213 for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
211 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; 214 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
212#if VD_EXT_DEBUG 215#if VD_EXT_DEBUG
213 dev_dbg(&instance->pdev->dev, 216 dev_dbg(&instance->pdev->dev,
@@ -268,7 +271,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
268 else 271 else
269 expected_size = 272 expected_size =
270 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + 273 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
271 (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount))); 274 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
272 275
273 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { 276 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
274 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", 277 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
@@ -284,7 +287,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
284 287
285 mr_update_load_balance_params(drv_map, lbInfo); 288 mr_update_load_balance_params(drv_map, lbInfo);
286 289
287 num_lds = le32_to_cpu(drv_map->raidMap.ldCount); 290 num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
288 291
289 /*Convert Raid capability values to CPU arch */ 292 /*Convert Raid capability values to CPU arch */
290 for (ldCount = 0; ldCount < num_lds; ldCount++) { 293 for (ldCount = 0; ldCount < num_lds; ldCount++) {
@@ -457,7 +460,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
457 quad = &map->raidMap.ldSpanMap[ld]. 460 quad = &map->raidMap.ldSpanMap[ld].
458 spanBlock[span]. 461 spanBlock[span].
459 block_span_info.quad[info]; 462 block_span_info.quad[info];
460 if (le32_to_cpu(quad->diff == 0)) 463 if (le32_to_cpu(quad->diff) == 0)
461 return SPAN_INVALID; 464 return SPAN_INVALID;
462 if (le64_to_cpu(quad->logStart) <= row && 465 if (le64_to_cpu(quad->logStart) <= row &&
463 row <= le64_to_cpu(quad->logEnd) && 466 row <= le64_to_cpu(quad->logEnd) &&
@@ -520,7 +523,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
520 span_set->span_row_data_width) * span_set->diff; 523 span_set->span_row_data_width) * span_set->diff;
521 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 524 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
522 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 525 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
523 block_span_info.noElements >= info+1)) { 526 block_span_info.noElements) >= info+1) {
524 if (strip_offset >= 527 if (strip_offset >=
525 span_set->strip_offset[span]) 528 span_set->strip_offset[span])
526 span_offset++; 529 span_offset++;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f37eed682c75..71557f64bb5e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,19 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fusion.c 20 * FILE: megaraid_sas_fusion.c
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Sumant Patro 23 * Sumant Patro
24 * Adam Radford <linuxraid@lsi.com> 24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
25 * 27 *
26 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
27 * 29 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
29 * ATTN: Linuxraid 31 * San Jose, California 95131
30 */ 32 */
31 33
32#include <linux/kernel.h> 34#include <linux/kernel.h>
@@ -880,7 +882,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
880 882
881 map = fusion->ld_drv_map[instance->map_id & 1]; 883 map = fusion->ld_drv_map[instance->map_id & 1];
882 884
883 num_lds = le32_to_cpu(map->raidMap.ldCount); 885 num_lds = le16_to_cpu(map->raidMap.ldCount);
884 886
885 dcmd = &cmd->frame->dcmd; 887 dcmd = &cmd->frame->dcmd;
886 888
@@ -1065,48 +1067,16 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1065 goto fail_ioc_init; 1067 goto fail_ioc_init;
1066 1068
1067 megasas_display_intel_branding(instance); 1069 megasas_display_intel_branding(instance);
1068 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { 1070 if (megasas_get_ctrl_info(instance)) {
1069 dev_err(&instance->pdev->dev, 1071 dev_err(&instance->pdev->dev,
1070 "Could not get controller info. Fail from %s %d\n", 1072 "Could not get controller info. Fail from %s %d\n",
1071 __func__, __LINE__); 1073 __func__, __LINE__);
1072 goto fail_ioc_init; 1074 goto fail_ioc_init;
1073 } 1075 }
1074 1076
1075 instance->supportmax256vd =
1076 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
1077 /* Below is additional check to address future FW enhancement */
1078 if (instance->ctrl_info->max_lds > 64)
1079 instance->supportmax256vd = 1;
1080 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
1081 * MEGASAS_MAX_DEV_PER_CHANNEL;
1082 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
1083 * MEGASAS_MAX_DEV_PER_CHANNEL;
1084 if (instance->supportmax256vd) {
1085 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1086 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1087 } else {
1088 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1089 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1090 }
1091 dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
1092 "Driver supports %d VDs %d PDs\n",
1093 instance->fw_supported_vd_count,
1094 instance->fw_supported_pd_count,
1095 instance->drv_supported_vd_count,
1096 instance->drv_supported_pd_count);
1097
1098 instance->flag_ieee = 1; 1077 instance->flag_ieee = 1;
1099 fusion->fast_path_io = 0; 1078 fusion->fast_path_io = 0;
1100 1079
1101 fusion->old_map_sz =
1102 sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1103 (instance->fw_supported_vd_count - 1));
1104 fusion->new_map_sz =
1105 sizeof(struct MR_FW_RAID_MAP_EXT);
1106 fusion->drv_map_sz =
1107 sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1108 (instance->drv_supported_vd_count - 1));
1109
1110 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1080 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1111 for (i = 0; i < 2; i++) { 1081 for (i = 0; i < 2; i++) {
1112 fusion->ld_map[i] = NULL; 1082 fusion->ld_map[i] = NULL;
@@ -1121,16 +1091,10 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1121 fusion->drv_map_pages); 1091 fusion->drv_map_pages);
1122 goto fail_ioc_init; 1092 goto fail_ioc_init;
1123 } 1093 }
1094 memset(fusion->ld_drv_map[i], 0,
1095 ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1124 } 1096 }
1125 1097
1126 fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
1127
1128 if (instance->supportmax256vd)
1129 fusion->current_map_sz = fusion->new_map_sz;
1130 else
1131 fusion->current_map_sz = fusion->old_map_sz;
1132
1133
1134 for (i = 0; i < 2; i++) { 1098 for (i = 0; i < 2; i++) {
1135 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1099 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1136 fusion->max_map_sz, 1100 fusion->max_map_sz,
@@ -1173,9 +1137,10 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1173 struct megasas_register_set __iomem *regs) 1137 struct megasas_register_set __iomem *regs)
1174{ 1138{
1175#if defined(writeq) && defined(CONFIG_64BIT) 1139#if defined(writeq) && defined(CONFIG_64BIT)
1176 u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo); 1140 u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
1141 le32_to_cpu(req_desc_lo));
1177 1142
1178 writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port); 1143 writeq(req_data, &(regs)->inbound_low_queue_port);
1179#else 1144#else
1180 unsigned long flags; 1145 unsigned long flags;
1181 1146
@@ -1373,7 +1338,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1373 /* Logical block reference tag */ 1338 /* Logical block reference tag */
1374 io_request->CDB.EEDP32.PrimaryReferenceTag = 1339 io_request->CDB.EEDP32.PrimaryReferenceTag =
1375 cpu_to_be32(ref_tag); 1340 cpu_to_be32(ref_tag);
1376 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1341 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1377 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1342 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1378 1343
1379 /* Transfer length */ 1344 /* Transfer length */
@@ -1769,7 +1734,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1769 1734
1770 /* set RAID context values */ 1735 /* set RAID context values */
1771 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1736 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1772 pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; 1737 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1773 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1738 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1774 pRAID_Context->regLockRowLBA = 0; 1739 pRAID_Context->regLockRowLBA = 0;
1775 pRAID_Context->regLockLength = 0; 1740 pRAID_Context->regLockLength = 0;
@@ -2254,7 +2219,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2254 * megasas_complete_cmd 2219 * megasas_complete_cmd
2255 */ 2220 */
2256 2221
2257 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 2222 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2258 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2223 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2259 2224
2260 fusion = instance->ctrl_context; 2225 fusion = instance->ctrl_context;
@@ -2385,6 +2350,8 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2385 "memory allocation failed at index %d\n", i); 2350 "memory allocation failed at index %d\n", i);
2386 break; 2351 break;
2387 } 2352 }
2353 memset(instance->crash_buf[i], 0,
2354 ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2388 } 2355 }
2389 instance->drv_buf_alloc = i; 2356 instance->drv_buf_alloc = i;
2390} 2357}
@@ -2837,11 +2804,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2837 } 2804 }
2838 } 2805 }
2839 2806
2840 clear_bit(MEGASAS_FUSION_IN_RESET, 2807 if (megasas_get_ctrl_info(instance)) {
2841 &instance->reset_flags); 2808 dev_info(&instance->pdev->dev,
2842 instance->instancet->enable_intr(instance); 2809 "Failed from %s %d\n",
2843 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2810 __func__, __LINE__);
2844 2811 instance->adprecovery =
2812 MEGASAS_HW_CRITICAL_ERROR;
2813 megaraid_sas_kill_hba(instance);
2814 retval = FAILED;
2815 }
2845 /* Reset load balance info */ 2816 /* Reset load balance info */
2846 memset(fusion->load_balance_info, 0, 2817 memset(fusion->load_balance_info, 0,
2847 sizeof(struct LD_LOAD_BALANCE_INFO) 2818 sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2850,6 +2821,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2850 if (!megasas_get_map_info(instance)) 2821 if (!megasas_get_map_info(instance))
2851 megasas_sync_map_info(instance); 2822 megasas_sync_map_info(instance);
2852 2823
2824 clear_bit(MEGASAS_FUSION_IN_RESET,
2825 &instance->reset_flags);
2826 instance->instancet->enable_intr(instance);
2827 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2828
2853 /* Restart SR-IOV heartbeat */ 2829 /* Restart SR-IOV heartbeat */
2854 if (instance->requestorId) { 2830 if (instance->requestorId) {
2855 if (!megasas_sriov_start_heartbeat(instance, 0)) 2831 if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -2866,14 +2842,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2866 "successful for scsi%d.\n", 2842 "successful for scsi%d.\n",
2867 instance->host->host_no); 2843 instance->host->host_no);
2868 2844
2869 if (instance->crash_dump_drv_support) { 2845 if (instance->crash_dump_drv_support &&
2870 if (instance->crash_dump_app_support) 2846 instance->crash_dump_app_support)
2871 megasas_set_crash_dump_params(instance, 2847 megasas_set_crash_dump_params(instance,
2872 MR_CRASH_BUF_TURN_ON); 2848 MR_CRASH_BUF_TURN_ON);
2873 else 2849 else
2874 megasas_set_crash_dump_params(instance, 2850 megasas_set_crash_dump_params(instance,
2875 MR_CRASH_BUF_TURN_OFF); 2851 MR_CRASH_BUF_TURN_OFF);
2876 } 2852
2877 retval = SUCCESS; 2853 retval = SUCCESS;
2878 goto out; 2854 goto out;
2879 } 2855 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 0d183d521bdd..5ab7daee11be 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2012 LSI Corporation. 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -14,19 +15,20 @@
14 * GNU General Public License for more details. 15 * GNU General Public License for more details.
15 * 16 *
16 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_fusion.h 20 * FILE: megaraid_sas_fusion.h
21 * 21 *
22 * Authors: LSI Corporation 22 * Authors: Avago Technologies
23 * Manoj Jose 23 * Manoj Jose
24 * Sumant Patro 24 * Sumant Patro
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
25 * 27 *
26 * Send feedback to: <megaraidlinux@lsi.com> 28 * Send feedback to: megaraidlinux.pdl@avagotech.com
27 * 29 *
28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
29 * ATTN: Linuxraid 31 * San Jose, California 95131
30 */ 32 */
31 33
32#ifndef _MEGARAID_SAS_FUSION_H_ 34#ifndef _MEGARAID_SAS_FUSION_H_
@@ -834,8 +836,6 @@ struct fusion_context {
834 836
835 u32 max_map_sz; 837 u32 max_map_sz;
836 u32 current_map_sz; 838 u32 current_map_sz;
837 u32 old_map_sz;
838 u32 new_map_sz;
839 u32 drv_map_sz; 839 u32 drv_map_sz;
840 u32 drv_map_pages; 840 u32 drv_map_pages;
841 u8 fast_path_io; 841 u8 fast_path_io;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index c80ed0482649..8431eb10bbb1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -1179,15 +1179,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1179} 1179}
1180 1180
1181/** 1181/**
1182 * _scsih_adjust_queue_depth - setting device queue depth 1182 * _scsih_change_queue_depth - setting device queue depth
1183 * @sdev: scsi device struct 1183 * @sdev: scsi device struct
1184 * @qdepth: requested queue depth 1184 * @qdepth: requested queue depth
1185 * 1185 *
1186 * 1186 * Returns queue depth.
1187 * Returns nothing
1188 */ 1187 */
1189static void 1188static int
1190_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) 1189_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1191{ 1190{
1192 struct Scsi_Host *shost = sdev->host; 1191 struct Scsi_Host *shost = sdev->host;
1193 int max_depth; 1192 int max_depth;
@@ -1217,63 +1216,11 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1217 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1216 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1218 1217
1219 not_sata: 1218 not_sata:
1220
1221 if (!sdev->tagged_supported) 1219 if (!sdev->tagged_supported)
1222 max_depth = 1; 1220 max_depth = 1;
1223 if (qdepth > max_depth) 1221 if (qdepth > max_depth)
1224 qdepth = max_depth; 1222 qdepth = max_depth;
1225 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1223 return scsi_change_queue_depth(sdev, qdepth);
1226}
1227
1228/**
1229 * _scsih_change_queue_depth - setting device queue depth
1230 * @sdev: scsi device struct
1231 * @qdepth: requested queue depth
1232 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1233 * (see include/scsi/scsi_host.h for definition)
1234 *
1235 * Returns queue depth.
1236 */
1237static int
1238_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1239{
1240 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
1241 _scsih_adjust_queue_depth(sdev, qdepth);
1242 else if (reason == SCSI_QDEPTH_QFULL)
1243 scsi_track_queue_full(sdev, qdepth);
1244 else
1245 return -EOPNOTSUPP;
1246
1247 if (sdev->inquiry_len > 7)
1248 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), "
1249 "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
1250 sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
1251 sdev->ordered_tags, sdev->scsi_level,
1252 (sdev->inquiry[7] & 2) >> 1);
1253
1254 return sdev->queue_depth;
1255}
1256
1257/**
1258 * _scsih_change_queue_type - changing device queue tag type
1259 * @sdev: scsi device struct
1260 * @tag_type: requested tag type
1261 *
1262 * Returns queue tag type.
1263 */
1264static int
1265_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1266{
1267 if (sdev->tagged_supported) {
1268 scsi_set_tag_type(sdev, tag_type);
1269 if (tag_type)
1270 scsi_activate_tcq(sdev, sdev->queue_depth);
1271 else
1272 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1273 } else
1274 tag_type = 0;
1275
1276 return tag_type;
1277} 1224}
1278 1225
1279/** 1226/**
@@ -2104,7 +2051,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
2104 r_level, raid_device->handle, 2051 r_level, raid_device->handle,
2105 (unsigned long long)raid_device->wwid, 2052 (unsigned long long)raid_device->wwid,
2106 raid_device->num_pds, ds); 2053 raid_device->num_pds, ds);
2107 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2054 _scsih_change_queue_depth(sdev, qdepth);
2108 /* raid transport support */ 2055 /* raid transport support */
2109 if (!ioc->is_warpdrive) 2056 if (!ioc->is_warpdrive)
2110 _scsih_set_level(sdev, raid_device->volume_type); 2057 _scsih_set_level(sdev, raid_device->volume_type);
@@ -2169,7 +2116,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
2169 _scsih_display_sata_capabilities(ioc, handle, sdev); 2116 _scsih_display_sata_capabilities(ioc, handle, sdev);
2170 2117
2171 2118
2172 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 2119 _scsih_change_queue_depth(sdev, qdepth);
2173 2120
2174 if (ssp_target) { 2121 if (ssp_target) {
2175 sas_read_port_mode_page(sdev); 2122 sas_read_port_mode_page(sdev);
@@ -3966,16 +3913,8 @@ _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3966 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 3913 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
3967 3914
3968 /* set tags */ 3915 /* set tags */
3969 if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { 3916 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3970 if (scmd->device->tagged_supported) { 3917
3971 if (scmd->device->ordered_tags)
3972 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
3973 else
3974 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3975 } else
3976 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3977 } else
3978 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3979 /* Make sure Device is not raid volume. 3918 /* Make sure Device is not raid volume.
3980 * We do not expose raid functionality to upper layer for warpdrive. 3919 * We do not expose raid functionality to upper layer for warpdrive.
3981 */ 3920 */
@@ -7653,7 +7592,7 @@ static struct scsi_host_template scsih_driver_template = {
7653 .scan_finished = _scsih_scan_finished, 7592 .scan_finished = _scsih_scan_finished,
7654 .scan_start = _scsih_scan_start, 7593 .scan_start = _scsih_scan_start,
7655 .change_queue_depth = _scsih_change_queue_depth, 7594 .change_queue_depth = _scsih_change_queue_depth,
7656 .change_queue_type = _scsih_change_queue_type, 7595 .change_queue_type = scsi_change_queue_type,
7657 .eh_abort_handler = _scsih_abort, 7596 .eh_abort_handler = _scsih_abort,
7658 .eh_device_reset_handler = _scsih_dev_reset, 7597 .eh_device_reset_handler = _scsih_dev_reset,
7659 .eh_target_reset_handler = _scsih_target_reset, 7598 .eh_target_reset_handler = _scsih_target_reset,
@@ -7667,6 +7606,7 @@ static struct scsi_host_template scsih_driver_template = {
7667 .use_clustering = ENABLE_CLUSTERING, 7606 .use_clustering = ENABLE_CLUSTERING,
7668 .shost_attrs = mpt2sas_host_attrs, 7607 .shost_attrs = mpt2sas_host_attrs,
7669 .sdev_attrs = mpt2sas_dev_attrs, 7608 .sdev_attrs = mpt2sas_dev_attrs,
7609 .track_queue_depth = 1,
7670}; 7610};
7671 7611
7672/** 7612/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 857276b8880f..a2b60991efd4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1053,9 +1053,15 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1053 return found; 1053 return found;
1054} 1054}
1055 1055
1056 1056/**
1057static void 1057 * _scsih_change_queue_depth - setting device queue depth
1058_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) 1058 * @sdev: scsi device struct
1059 * @qdepth: requested queue depth
1060 *
1061 * Returns queue depth.
1062 */
1063static int
1064_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1059{ 1065{
1060 struct Scsi_Host *shost = sdev->host; 1066 struct Scsi_Host *shost = sdev->host;
1061 int max_depth; 1067 int max_depth;
@@ -1090,62 +1096,10 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
1090 max_depth = 1; 1096 max_depth = 1;
1091 if (qdepth > max_depth) 1097 if (qdepth > max_depth)
1092 qdepth = max_depth; 1098 qdepth = max_depth;
1093 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 1099 return scsi_change_queue_depth(sdev, qdepth);
1094} 1100}
1095 1101
1096/** 1102/**
1097 * _scsih_change_queue_depth - setting device queue depth
1098 * @sdev: scsi device struct
1099 * @qdepth: requested queue depth
1100 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1101 * (see include/scsi/scsi_host.h for definition)
1102 *
1103 * Returns queue depth.
1104 */
1105static int
1106_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1107{
1108 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
1109 _scsih_adjust_queue_depth(sdev, qdepth);
1110 else if (reason == SCSI_QDEPTH_QFULL)
1111 scsi_track_queue_full(sdev, qdepth);
1112 else
1113 return -EOPNOTSUPP;
1114
1115 if (sdev->inquiry_len > 7)
1116 sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
1117 "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
1118 sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
1119 sdev->ordered_tags, sdev->scsi_level,
1120 (sdev->inquiry[7] & 2) >> 1);
1121
1122 return sdev->queue_depth;
1123}
1124
1125/**
1126 * _scsih_change_queue_type - changing device queue tag type
1127 * @sdev: scsi device struct
1128 * @tag_type: requested tag type
1129 *
1130 * Returns queue tag type.
1131 */
1132static int
1133_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1134{
1135 if (sdev->tagged_supported) {
1136 scsi_set_tag_type(sdev, tag_type);
1137 if (tag_type)
1138 scsi_activate_tcq(sdev, sdev->queue_depth);
1139 else
1140 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1141 } else
1142 tag_type = 0;
1143
1144 return tag_type;
1145}
1146
1147
1148/**
1149 * _scsih_target_alloc - target add routine 1103 * _scsih_target_alloc - target add routine
1150 * @starget: scsi target struct 1104 * @starget: scsi target struct
1151 * 1105 *
@@ -1762,7 +1716,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
1762 raid_device->num_pds, ds); 1716 raid_device->num_pds, ds);
1763 1717
1764 1718
1765 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1719 _scsih_change_queue_depth(sdev, qdepth);
1766 1720
1767/* raid transport support */ 1721/* raid transport support */
1768 _scsih_set_level(sdev, raid_device->volume_type); 1722 _scsih_set_level(sdev, raid_device->volume_type);
@@ -1828,7 +1782,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
1828 _scsih_display_sata_capabilities(ioc, handle, sdev); 1782 _scsih_display_sata_capabilities(ioc, handle, sdev);
1829 1783
1830 1784
1831 _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); 1785 _scsih_change_queue_depth(sdev, qdepth);
1832 1786
1833 if (ssp_target) { 1787 if (ssp_target) {
1834 sas_read_port_mode_page(sdev); 1788 sas_read_port_mode_page(sdev);
@@ -3586,16 +3540,7 @@ _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3586 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 3540 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
3587 3541
3588 /* set tags */ 3542 /* set tags */
3589 if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { 3543 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3590 if (scmd->device->tagged_supported) {
3591 if (scmd->device->ordered_tags)
3592 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
3593 else
3594 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3595 } else
3596 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3597 } else
3598 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
3599 3544
3600 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && 3545 if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
3601 scmd->cmd_len != 32) 3546 scmd->cmd_len != 32)
@@ -7284,7 +7229,7 @@ static struct scsi_host_template scsih_driver_template = {
7284 .scan_finished = _scsih_scan_finished, 7229 .scan_finished = _scsih_scan_finished,
7285 .scan_start = _scsih_scan_start, 7230 .scan_start = _scsih_scan_start,
7286 .change_queue_depth = _scsih_change_queue_depth, 7231 .change_queue_depth = _scsih_change_queue_depth,
7287 .change_queue_type = _scsih_change_queue_type, 7232 .change_queue_type = scsi_change_queue_type,
7288 .eh_abort_handler = _scsih_abort, 7233 .eh_abort_handler = _scsih_abort,
7289 .eh_device_reset_handler = _scsih_dev_reset, 7234 .eh_device_reset_handler = _scsih_dev_reset,
7290 .eh_target_reset_handler = _scsih_target_reset, 7235 .eh_target_reset_handler = _scsih_target_reset,
@@ -7298,6 +7243,7 @@ static struct scsi_host_template scsih_driver_template = {
7298 .use_clustering = ENABLE_CLUSTERING, 7243 .use_clustering = ENABLE_CLUSTERING,
7299 .shost_attrs = mpt3sas_host_attrs, 7244 .shost_attrs = mpt3sas_host_attrs,
7300 .sdev_attrs = mpt3sas_dev_attrs, 7245 .sdev_attrs = mpt3sas_dev_attrs,
7246 .track_queue_depth = 1,
7301}; 7247};
7302 7248
7303/** 7249/**
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index eacee48a955c..f15df3de6790 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -26,18 +26,9 @@
26 26
27#include "mv_sas.h" 27#include "mv_sas.h"
28 28
29static int lldd_max_execute_num = 1;
30module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
31MODULE_PARM_DESC(collector, "\n"
32 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
33 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n");
36
37int interrupt_coalescing = 0x80; 29int interrupt_coalescing = 0x80;
38 30
39static struct scsi_transport_template *mvs_stt; 31static struct scsi_transport_template *mvs_stt;
40struct kmem_cache *mvs_task_list_cache;
41static const struct mvs_chip_info mvs_chips[] = { 32static const struct mvs_chip_info mvs_chips[] = {
42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 33 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 34 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
@@ -76,6 +67,8 @@ static struct scsi_host_template mvs_sht = {
76 .target_destroy = sas_target_destroy, 67 .target_destroy = sas_target_destroy,
77 .ioctl = sas_ioctl, 68 .ioctl = sas_ioctl,
78 .shost_attrs = mvst_host_attrs, 69 .shost_attrs = mvst_host_attrs,
70 .use_blk_tags = 1,
71 .track_queue_depth = 1,
79}; 72};
80 73
81static struct sas_domain_function_template mvs_transport_ops = { 74static struct sas_domain_function_template mvs_transport_ops = {
@@ -511,14 +504,11 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
511 504
512 sha->num_phys = nr_core * chip_info->n_phy; 505 sha->num_phys = nr_core * chip_info->n_phy;
513 506
514 sha->lldd_max_execute_num = lldd_max_execute_num;
515
516 if (mvi->flags & MVF_FLAG_SOC) 507 if (mvi->flags & MVF_FLAG_SOC)
517 can_queue = MVS_SOC_CAN_QUEUE; 508 can_queue = MVS_SOC_CAN_QUEUE;
518 else 509 else
519 can_queue = MVS_CHIP_SLOT_SZ; 510 can_queue = MVS_CHIP_SLOT_SZ;
520 511
521 sha->lldd_queue_size = can_queue;
522 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); 512 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
523 shost->can_queue = can_queue; 513 shost->can_queue = can_queue;
524 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; 514 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
@@ -831,16 +821,7 @@ static int __init mvs_init(void)
831 if (!mvs_stt) 821 if (!mvs_stt)
832 return -ENOMEM; 822 return -ENOMEM;
833 823
834 mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list),
835 0, SLAB_HWCACHE_ALIGN, NULL);
836 if (!mvs_task_list_cache) {
837 rc = -ENOMEM;
838 mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__);
839 goto err_out;
840 }
841
842 rc = pci_register_driver(&mvs_pci_driver); 824 rc = pci_register_driver(&mvs_pci_driver);
843
844 if (rc) 825 if (rc)
845 goto err_out; 826 goto err_out;
846 827
@@ -855,7 +836,6 @@ static void __exit mvs_exit(void)
855{ 836{
856 pci_unregister_driver(&mvs_pci_driver); 837 pci_unregister_driver(&mvs_pci_driver);
857 sas_release_transport(mvs_stt); 838 sas_release_transport(mvs_stt);
858 kmem_cache_destroy(mvs_task_list_cache);
859} 839}
860 840
861struct device_attribute *mvst_host_attrs[] = { 841struct device_attribute *mvst_host_attrs[] = {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index ac52f7c99513..85d86a5cdb60 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -852,43 +852,7 @@ prep_out:
852 return rc; 852 return rc;
853} 853}
854 854
855static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) 855static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
856{
857 struct mvs_task_list *first = NULL;
858
859 for (; *num > 0; --*num) {
860 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
861
862 if (!mvs_list)
863 break;
864
865 INIT_LIST_HEAD(&mvs_list->list);
866 if (!first)
867 first = mvs_list;
868 else
869 list_add_tail(&mvs_list->list, &first->list);
870
871 }
872
873 return first;
874}
875
876static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
877{
878 LIST_HEAD(list);
879 struct list_head *pos, *a;
880 struct mvs_task_list *mlist = NULL;
881
882 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
883
884 list_for_each_safe(pos, a, &list) {
885 list_del_init(pos);
886 mlist = list_entry(pos, struct mvs_task_list, list);
887 kmem_cache_free(mvs_task_list_cache, mlist);
888 }
889}
890
891static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
892 struct completion *completion, int is_tmf, 856 struct completion *completion, int is_tmf,
893 struct mvs_tmf_task *tmf) 857 struct mvs_tmf_task *tmf)
894{ 858{
@@ -912,74 +876,9 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
912 return rc; 876 return rc;
913} 877}
914 878
915static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 879int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
916 struct completion *completion, int is_tmf,
917 struct mvs_tmf_task *tmf)
918{ 880{
919 struct domain_device *dev = task->dev; 881 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
920 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
921 struct mvs_info *mvi = NULL;
922 struct sas_task *t = task;
923 struct mvs_task_list *mvs_list = NULL, *a;
924 LIST_HEAD(q);
925 int pass[2] = {0};
926 u32 rc = 0;
927 u32 n = num;
928 unsigned long flags = 0;
929
930 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
931 if (n) {
932 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
933 rc = -ENOMEM;
934 goto free_list;
935 }
936
937 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
938
939 list_for_each_entry(a, &q, list) {
940 a->task = t;
941 t = list_entry(t->list.next, struct sas_task, list);
942 }
943
944 list_for_each_entry(a, &q , list) {
945
946 t = a->task;
947 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
948
949 spin_lock_irqsave(&mvi->lock, flags);
950 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
951 if (rc)
952 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
953 spin_unlock_irqrestore(&mvi->lock, flags);
954 }
955
956 if (likely(pass[0]))
957 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
958 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
959
960 if (likely(pass[1]))
961 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
962 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
963
964 list_del_init(&q);
965
966free_list:
967 if (mvs_list)
968 mvs_task_free_list(mvs_list);
969
970 return rc;
971}
972
973int mvs_queue_command(struct sas_task *task, const int num,
974 gfp_t gfp_flags)
975{
976 struct mvs_device *mvi_dev = task->dev->lldd_dev;
977 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
978
979 if (sas->lldd_max_execute_num < 2)
980 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
981 else
982 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
983} 882}
984 883
985static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 884static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1411,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1411 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1310 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1412 add_timer(&task->slow_task->timer); 1311 add_timer(&task->slow_task->timer);
1413 1312
1414 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); 1313 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
1415 1314
1416 if (res) { 1315 if (res) {
1417 del_timer(&task->slow_task->timer); 1316 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index d6b19dc80bee..dc409c04747a 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -65,7 +65,6 @@ extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch; 67extern const struct mvs_dispatch mvs_94xx_dispatch;
68extern struct kmem_cache *mvs_task_list_cache;
69 68
70#define DEV_IS_EXPANDER(type) \ 69#define DEV_IS_EXPANDER(type) \
71 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) 70 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
@@ -440,12 +439,6 @@ struct mvs_task_exec_info {
440 int n_elem; 439 int n_elem;
441}; 440};
442 441
443struct mvs_task_list {
444 struct sas_task *task;
445 struct list_head list;
446};
447
448
449/******************** function prototype *********************/ 442/******************** function prototype *********************/
450void mvs_get_sas_addr(void *buf, u32 buflen); 443void mvs_get_sas_addr(void *buf, u32 buflen);
451void mvs_tag_clear(struct mvs_info *mvi, u32 tag); 444void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
@@ -462,8 +455,7 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
462 u32 off_hi, u64 sas_addr); 455 u32 off_hi, u64 sas_addr);
463void mvs_scan_start(struct Scsi_Host *shost); 456void mvs_scan_start(struct Scsi_Host *shost);
464int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 457int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
465int mvs_queue_command(struct sas_task *task, const int num, 458int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
466 gfp_t gfp_flags);
467int mvs_abort_task(struct sas_task *task); 459int mvs_abort_task(struct sas_task *task);
468int mvs_abort_task_set(struct domain_device *dev, u8 *lun); 460int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
469int mvs_clear_aca(struct domain_device *dev, u8 *lun); 461int mvs_clear_aca(struct domain_device *dev, u8 *lun);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index a7305ffc359d..5b93ed810f6e 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7997,10 +7997,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
7997 if (depth_to_use > MAX_TAGS) 7997 if (depth_to_use > MAX_TAGS)
7998 depth_to_use = MAX_TAGS; 7998 depth_to_use = MAX_TAGS;
7999 7999
8000 scsi_adjust_queue_depth(device, 8000 scsi_change_queue_depth(device, depth_to_use);
8001 (device->tagged_supported ?
8002 MSG_SIMPLE_TAG : 0),
8003 depth_to_use);
8004 8001
8005 /* 8002 /*
8006 ** Since the queue depth is not tunable under Linux, 8003 ** Since the queue depth is not tunable under Linux,
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 92cdd4b06526..243eab3d10d0 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -540,9 +540,9 @@ static int osd_remove(struct device *dev)
540 */ 540 */
541 541
542static struct scsi_driver osd_driver = { 542static struct scsi_driver osd_driver = {
543 .owner = THIS_MODULE,
544 .gendrv = { 543 .gendrv = {
545 .name = osd_name, 544 .name = osd_name,
545 .owner = THIS_MODULE,
546 .probe = osd_probe, 546 .probe = osd_probe,
547 .remove = osd_remove, 547 .remove = osd_remove,
548 } 548 }
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index dff37a250d79..5033223f6287 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -172,9 +172,9 @@ static int osst_probe(struct device *);
172static int osst_remove(struct device *); 172static int osst_remove(struct device *);
173 173
174static struct scsi_driver osst_template = { 174static struct scsi_driver osst_template = {
175 .owner = THIS_MODULE,
176 .gendrv = { 175 .gendrv = {
177 .name = "osst", 176 .name = "osst",
177 .owner = THIS_MODULE,
178 .probe = osst_probe, 178 .probe = osst_probe,
179 .remove = osst_remove, 179 .remove = osst_remove,
180 } 180 }
@@ -259,9 +259,10 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
259 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 259 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
260 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 260 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
261 if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n", 261 if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n",
262 name, scode, sense[12], sense[13]); 262 name, scode, sense[12], sense[13]);
263 if (cmdstatp->have_sense) 263 if (cmdstatp->have_sense)
264 __scsi_print_sense("osst ", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 264 __scsi_print_sense(STp->device, name,
265 SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
265 } 266 }
266 else 267 else
267#endif 268#endif
@@ -275,7 +276,8 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
275 SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */ 276 SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */
276 if (cmdstatp->have_sense) { 277 if (cmdstatp->have_sense) {
277 printk(KERN_WARNING "%s:W: Command with sense data:\n", name); 278 printk(KERN_WARNING "%s:W: Command with sense data:\n", name);
278 __scsi_print_sense("osst ", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 279 __scsi_print_sense(STp->device, name,
280 SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
279 } 281 }
280 else { 282 else {
281 static int notyetprinted = 1; 283 static int notyetprinted = 1;
@@ -3325,19 +3327,18 @@ static int osst_write_frame(struct osst_tape * STp, struct osst_request ** aSRpn
3325/* Lock or unlock the drive door. Don't use when struct osst_request allocated. */ 3327/* Lock or unlock the drive door. Don't use when struct osst_request allocated. */
3326static int do_door_lock(struct osst_tape * STp, int do_lock) 3328static int do_door_lock(struct osst_tape * STp, int do_lock)
3327{ 3329{
3328 int retval, cmd; 3330 int retval;
3329 3331
3330 cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK;
3331#if DEBUG 3332#if DEBUG
3332 printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl"); 3333 printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl");
3333#endif 3334#endif
3334 retval = scsi_ioctl(STp->device, cmd, NULL); 3335
3335 if (!retval) { 3336 retval = scsi_set_medium_removal(STp->device,
3337 do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
3338 if (!retval)
3336 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; 3339 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
3337 } 3340 else
3338 else {
3339 STp->door_locked = ST_LOCK_FAILS; 3341 STp->door_locked = ST_LOCK_FAILS;
3340 }
3341 return retval; 3342 return retval;
3342} 3343}
3343 3344
@@ -4967,10 +4968,10 @@ static long osst_ioctl(struct file * file,
4967 * may try and take the device offline, in which case all further 4968 * may try and take the device offline, in which case all further
4968 * access to the device is prohibited. 4969 * access to the device is prohibited.
4969 */ 4970 */
4970 if( !scsi_block_when_processing_errors(STp->device) ) { 4971 retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
4971 retval = (-ENXIO); 4972 file->f_flags & O_NDELAY);
4973 if (retval)
4972 goto out; 4974 goto out;
4973 }
4974 4975
4975 cmd_type = _IOC_TYPE(cmd_in); 4976 cmd_type = _IOC_TYPE(cmd_in);
4976 cmd_nr = _IOC_NR(cmd_in); 4977 cmd_nr = _IOC_NR(cmd_in);
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 80bacb5dc1d4..e81eadd08afc 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -1,6 +1,4 @@
1#define AUTOSENSE
2#define PSEUDO_DMA 1#define PSEUDO_DMA
3#define FOO
4#define UNSAFE /* Not unsafe for PAS16 -- use it */ 2#define UNSAFE /* Not unsafe for PAS16 -- use it */
5#define PDEBUG 0 3#define PDEBUG 0
6 4
@@ -24,47 +22,9 @@
24 * Media Vision 22 * Media Vision
25 * (510) 770-8600 23 * (510) 770-8600
26 * (800) 348-7116 24 * (800) 348-7116
27 *
28 * and
29 *
30 * NCR 5380 Family
31 * SCSI Protocol Controller
32 * Databook
33 *
34 * NCR Microelectronics
35 * 1635 Aeroplaza Drive
36 * Colorado Springs, CO 80916
37 * 1+ (719) 578-3400
38 * 1+ (800) 334-5454
39 */ 25 */
40 26
41/* 27/*
42 * Options :
43 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
44 * for commands that return with a CHECK CONDITION status.
45 *
46 * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
47 * bytes at a time. Since interrupts are disabled by default during
48 * these transfers, we might need this to give reasonable interrupt
49 * service time if the transfer size gets too large.
50 *
51 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
52 * increase compared to polled I/O.
53 *
54 * PARITY - enable parity checking. Not supported.
55 *
56 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
57 *
58 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This
59 * parameter comes from the NCR5380 code. It is NOT unsafe with
60 * the PAS16 and you should use it. If you don't you will have
61 * a problem with dropped characters during high speed
62 * communications during SCSI transfers. If you really don't
63 * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or
64 * twiddle with the transfer size in the high level code.
65 *
66 * USLEEP - enable support for devices that don't disconnect. Untested.
67 *
68 * The card is detected and initialized in one of several ways : 28 * The card is detected and initialized in one of several ways :
69 * 1. Autoprobe (default) - There are many different models of 29 * 1. Autoprobe (default) - There are many different models of
70 * the Pro Audio Spectrum/Studio 16, and I only have one of 30 * the Pro Audio Spectrum/Studio 16, and I only have one of
@@ -102,13 +62,11 @@
102 * If you have problems with your card not being recognized, use 62 * If you have problems with your card not being recognized, use
103 * the LILO command line override. Try to get it recognized without 63 * the LILO command line override. Try to get it recognized without
104 * interrupts. Ie, for a board at the default 0x388 base port, 64 * interrupts. Ie, for a board at the default 0x388 base port,
105 * boot: linux pas16=0x388,255 65 * boot: linux pas16=0x388,0
106 * 66 *
107 * SCSI_IRQ_NONE (255) should be specified for no interrupt, 67 * NO_IRQ (0) should be specified for no interrupt,
108 * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden 68 * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
109 * on the command line. 69 * on the command line.
110 *
111 * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h)
112 */ 70 */
113 71
114#include <linux/module.h> 72#include <linux/module.h>
@@ -123,15 +81,12 @@
123#include <linux/stat.h> 81#include <linux/stat.h>
124#include <linux/init.h> 82#include <linux/init.h>
125 83
126#include "scsi.h"
127#include <scsi/scsi_host.h> 84#include <scsi/scsi_host.h>
128#include "pas16.h" 85#include "pas16.h"
129#define AUTOPROBE_IRQ 86#define AUTOPROBE_IRQ
130#include "NCR5380.h" 87#include "NCR5380.h"
131 88
132 89
133static int pas_maxi = 0;
134static int pas_wmaxi = 0;
135static unsigned short pas16_addr = 0; 90static unsigned short pas16_addr = 0;
136static int pas16_irq = 0; 91static int pas16_irq = 0;
137 92
@@ -337,6 +292,7 @@ static int __init
337} 292}
338 293
339 294
295#ifndef MODULE
340/* 296/*
341 * Function : pas16_setup(char *str, int *ints) 297 * Function : pas16_setup(char *str, int *ints)
342 * 298 *
@@ -347,10 +303,13 @@ static int __init
347 * 303 *
348 */ 304 */
349 305
350void __init pas16_setup(char *str, int *ints) 306static int __init pas16_setup(char *str)
351{ 307{
352 static int commandline_current = 0; 308 static int commandline_current = 0;
353 int i; 309 int i;
310 int ints[10];
311
312 get_options(str, ARRAY_SIZE(ints), ints);
354 if (ints[0] != 2) 313 if (ints[0] != 2)
355 printk("pas16_setup : usage pas16=io_port,irq\n"); 314 printk("pas16_setup : usage pas16=io_port,irq\n");
356 else 315 else
@@ -364,8 +323,12 @@ void __init pas16_setup(char *str, int *ints)
364 } 323 }
365 ++commandline_current; 324 ++commandline_current;
366 } 325 }
326 return 1;
367} 327}
368 328
329__setup("pas16=", pas16_setup);
330#endif
331
369/* 332/*
370 * Function : int pas16_detect(struct scsi_host_template * tpnt) 333 * Function : int pas16_detect(struct scsi_host_template * tpnt)
371 * 334 *
@@ -379,7 +342,7 @@ void __init pas16_setup(char *str, int *ints)
379 * 342 *
380 */ 343 */
381 344
382int __init pas16_detect(struct scsi_host_template * tpnt) 345static int __init pas16_detect(struct scsi_host_template *tpnt)
383{ 346{
384 static int current_override = 0; 347 static int current_override = 0;
385 static unsigned short current_base = 0; 348 static unsigned short current_base = 0;
@@ -387,10 +350,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
387 unsigned short io_port; 350 unsigned short io_port;
388 int count; 351 int count;
389 352
390 tpnt->proc_name = "pas16";
391 tpnt->show_info = pas16_show_info;
392 tpnt->write_info = pas16_write_info;
393
394 if (pas16_addr != 0) { 353 if (pas16_addr != 0) {
395 overrides[0].io_port = pas16_addr; 354 overrides[0].io_port = pas16_addr;
396 /* 355 /*
@@ -452,15 +411,19 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
452 else 411 else
453 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); 412 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
454 413
455 if (instance->irq != SCSI_IRQ_NONE) 414 /* Compatibility with documented NCR5380 kernel parameters */
415 if (instance->irq == 255)
416 instance->irq = NO_IRQ;
417
418 if (instance->irq != NO_IRQ)
456 if (request_irq(instance->irq, pas16_intr, 0, 419 if (request_irq(instance->irq, pas16_intr, 0,
457 "pas16", instance)) { 420 "pas16", instance)) {
458 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 421 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
459 instance->host_no, instance->irq); 422 instance->host_no, instance->irq);
460 instance->irq = SCSI_IRQ_NONE; 423 instance->irq = NO_IRQ;
461 } 424 }
462 425
463 if (instance->irq == SCSI_IRQ_NONE) { 426 if (instance->irq == NO_IRQ) {
464 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 427 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
465 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 428 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
466 /* Disable 5380 interrupts, leave drive params the same */ 429 /* Disable 5380 interrupts, leave drive params the same */
@@ -472,17 +435,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
472 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 435 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
473#endif 436#endif
474 437
475 printk("scsi%d : at 0x%04x", instance->host_no, (int)
476 instance->io_port);
477 if (instance->irq == SCSI_IRQ_NONE)
478 printk (" interrupts disabled");
479 else
480 printk (" irq %d", instance->irq);
481 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
482 CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE);
483 NCR5380_print_options(instance);
484 printk("\n");
485
486 ++current_override; 438 ++current_override;
487 ++count; 439 ++count;
488 } 440 }
@@ -509,8 +461,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
509 * and matching the H_C_S coordinates to what DOS uses. 461 * and matching the H_C_S coordinates to what DOS uses.
510 */ 462 */
511 463
512int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, 464static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
513 sector_t capacity, int * ip) 465 sector_t capacity, int *ip)
514{ 466{
515 int size = capacity; 467 int size = capacity;
516 ip[0] = 64; 468 ip[0] = 64;
@@ -547,6 +499,7 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
547 P_DATA_REG_OFFSET); 499 P_DATA_REG_OFFSET);
548 register int i = len; 500 register int i = len;
549 int ii = 0; 501 int ii = 0;
502 struct NCR5380_hostdata *hostdata = shost_priv(instance);
550 503
551 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) 504 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
552 ++ii; 505 ++ii;
@@ -559,8 +512,8 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
559 instance->host_no); 512 instance->host_no);
560 return -1; 513 return -1;
561 } 514 }
562 if (ii > pas_maxi) 515 if (ii > hostdata->spin_max_r)
563 pas_maxi = ii; 516 hostdata->spin_max_r = ii;
564 return 0; 517 return 0;
565} 518}
566 519
@@ -583,6 +536,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
583 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); 536 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
584 register int i = len; 537 register int i = len;
585 int ii = 0; 538 int ii = 0;
539 struct NCR5380_hostdata *hostdata = shost_priv(instance);
586 540
587 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) 541 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
588 ++ii; 542 ++ii;
@@ -595,8 +549,8 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
595 instance->host_no); 549 instance->host_no);
596 return -1; 550 return -1;
597 } 551 }
598 if (ii > pas_maxi) 552 if (ii > hostdata->spin_max_w)
599 pas_wmaxi = ii; 553 hostdata->spin_max_w = ii;
600 return 0; 554 return 0;
601} 555}
602 556
@@ -604,7 +558,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
604 558
605static int pas16_release(struct Scsi_Host *shost) 559static int pas16_release(struct Scsi_Host *shost)
606{ 560{
607 if (shost->irq) 561 if (shost->irq != NO_IRQ)
608 free_irq(shost->irq, shost); 562 free_irq(shost->irq, shost);
609 NCR5380_exit(shost); 563 NCR5380_exit(shost);
610 if (shost->io_port && shost->n_io_port) 564 if (shost->io_port && shost->n_io_port)
@@ -617,6 +571,10 @@ static struct scsi_host_template driver_template = {
617 .name = "Pro Audio Spectrum-16 SCSI", 571 .name = "Pro Audio Spectrum-16 SCSI",
618 .detect = pas16_detect, 572 .detect = pas16_detect,
619 .release = pas16_release, 573 .release = pas16_release,
574 .proc_name = "pas16",
575 .show_info = pas16_show_info,
576 .write_info = pas16_write_info,
577 .info = pas16_info,
620 .queuecommand = pas16_queue_command, 578 .queuecommand = pas16_queue_command,
621 .eh_abort_handler = pas16_abort, 579 .eh_abort_handler = pas16_abort,
622 .eh_bus_reset_handler = pas16_bus_reset, 580 .eh_bus_reset_handler = pas16_bus_reset,
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index aa528f53c533..c6109c80050b 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -18,26 +18,12 @@
18 * Media Vision 18 * Media Vision
19 * (510) 770-8600 19 * (510) 770-8600
20 * (800) 348-7116 20 * (800) 348-7116
21 *
22 * and
23 *
24 * NCR 5380 Family
25 * SCSI Protocol Controller
26 * Databook
27 *
28 * NCR Microelectronics
29 * 1635 Aeroplaza Drive
30 * Colorado Springs, CO 80916
31 * 1+ (719) 578-3400
32 * 1+ (800) 334-5454
33 */ 21 */
34 22
35 23
36#ifndef PAS16_H 24#ifndef PAS16_H
37#define PAS16_H 25#define PAS16_H
38 26
39#define PAS16_PUBLIC_RELEASE 3
40
41#define PDEBUG_INIT 0x1 27#define PDEBUG_INIT 0x1
42#define PDEBUG_TRANSFER 0x2 28#define PDEBUG_TRANSFER 0x2
43 29
@@ -114,12 +100,6 @@
114 100
115 101
116#ifndef ASM 102#ifndef ASM
117static int pas16_abort(Scsi_Cmnd *);
118static int pas16_biosparam(struct scsi_device *, struct block_device *,
119 sector_t, int*);
120static int pas16_detect(struct scsi_host_template *);
121static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
122static int pas16_bus_reset(Scsi_Cmnd *);
123 103
124#ifndef CMD_PER_LUN 104#ifndef CMD_PER_LUN
125#define CMD_PER_LUN 2 105#define CMD_PER_LUN 2
@@ -161,6 +141,7 @@ static int pas16_bus_reset(Scsi_Cmnd *);
161#define NCR5380_queue_command pas16_queue_command 141#define NCR5380_queue_command pas16_queue_command
162#define NCR5380_abort pas16_abort 142#define NCR5380_abort pas16_abort
163#define NCR5380_bus_reset pas16_bus_reset 143#define NCR5380_bus_reset pas16_bus_reset
144#define NCR5380_info pas16_info
164#define NCR5380_show_info pas16_show_info 145#define NCR5380_show_info pas16_show_info
165#define NCR5380_write_info pas16_write_info 146#define NCR5380_write_info pas16_write_info
166 147
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 666bf5af06e2..329aba0083ab 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -89,6 +89,8 @@ static struct scsi_host_template pm8001_sht = {
89 .target_destroy = sas_target_destroy, 89 .target_destroy = sas_target_destroy,
90 .ioctl = sas_ioctl, 90 .ioctl = sas_ioctl,
91 .shost_attrs = pm8001_host_attrs, 91 .shost_attrs = pm8001_host_attrs,
92 .use_blk_tags = 1,
93 .track_queue_depth = 1,
92}; 94};
93 95
94/** 96/**
@@ -599,8 +601,6 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
599 sha->lldd_module = THIS_MODULE; 601 sha->lldd_module = THIS_MODULE;
600 sha->sas_addr = &pm8001_ha->sas_addr[0]; 602 sha->sas_addr = &pm8001_ha->sas_addr[0];
601 sha->num_phys = chip_info->n_phy; 603 sha->num_phys = chip_info->n_phy;
602 sha->lldd_max_execute_num = 1;
603 sha->lldd_queue_size = PM8001_CAN_QUEUE;
604 sha->core.shost = shost; 604 sha->core.shost = shost;
605} 605}
606 606
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 76570e6a547d..b93f289b42b3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -350,7 +350,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
350 */ 350 */
351#define DEV_IS_GONE(pm8001_dev) \ 351#define DEV_IS_GONE(pm8001_dev) \
352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
353static int pm8001_task_exec(struct sas_task *task, const int num, 353static int pm8001_task_exec(struct sas_task *task,
354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
355{ 355{
356 struct domain_device *dev = task->dev; 356 struct domain_device *dev = task->dev;
@@ -360,7 +360,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
360 struct sas_task *t = task; 360 struct sas_task *t = task;
361 struct pm8001_ccb_info *ccb; 361 struct pm8001_ccb_info *ccb;
362 u32 tag = 0xdeadbeef, rc, n_elem = 0; 362 u32 tag = 0xdeadbeef, rc, n_elem = 0;
363 u32 n = num;
364 unsigned long flags = 0; 363 unsigned long flags = 0;
365 364
366 if (!dev->port) { 365 if (!dev->port) {
@@ -387,18 +386,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
387 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 386 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
388 t->task_done(t); 387 t->task_done(t);
389 spin_lock_irqsave(&pm8001_ha->lock, flags); 388 spin_lock_irqsave(&pm8001_ha->lock, flags);
390 if (n > 1)
391 t = list_entry(t->list.next,
392 struct sas_task, list);
393 continue; 389 continue;
394 } else { 390 } else {
395 struct task_status_struct *ts = &t->task_status; 391 struct task_status_struct *ts = &t->task_status;
396 ts->resp = SAS_TASK_UNDELIVERED; 392 ts->resp = SAS_TASK_UNDELIVERED;
397 ts->stat = SAS_PHY_DOWN; 393 ts->stat = SAS_PHY_DOWN;
398 t->task_done(t); 394 t->task_done(t);
399 if (n > 1)
400 t = list_entry(t->list.next,
401 struct sas_task, list);
402 continue; 395 continue;
403 } 396 }
404 } 397 }
@@ -460,9 +453,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
460 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 453 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
461 spin_unlock(&t->task_state_lock); 454 spin_unlock(&t->task_state_lock);
462 pm8001_dev->running_req++; 455 pm8001_dev->running_req++;
463 if (n > 1) 456 } while (0);
464 t = list_entry(t->list.next, struct sas_task, list);
465 } while (--n);
466 rc = 0; 457 rc = 0;
467 goto out_done; 458 goto out_done;
468 459
@@ -483,14 +474,11 @@ out_done:
483 * pm8001_queue_command - register for upper layer used, all IO commands sent 474 * pm8001_queue_command - register for upper layer used, all IO commands sent
484 * to HBA are from this interface. 475 * to HBA are from this interface.
485 * @task: the task to be execute. 476 * @task: the task to be execute.
486 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
487 * we always execute one one time
488 * @gfp_flags: gfp_flags 477 * @gfp_flags: gfp_flags
489 */ 478 */
490int pm8001_queue_command(struct sas_task *task, const int num, 479int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
491 gfp_t gfp_flags)
492{ 480{
493 return pm8001_task_exec(task, num, gfp_flags, 0, NULL); 481 return pm8001_task_exec(task, gfp_flags, 0, NULL);
494} 482}
495 483
496/** 484/**
@@ -708,7 +696,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
708 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 696 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
709 add_timer(&task->slow_task->timer); 697 add_timer(&task->slow_task->timer);
710 698
711 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); 699 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
712 700
713 if (res) { 701 if (res) {
714 del_timer(&task->slow_task->timer); 702 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index f6b2ac59dae4..8dd8b7840f04 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -623,8 +623,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
623 void *funcdata); 623 void *funcdata);
624void pm8001_scan_start(struct Scsi_Host *shost); 624void pm8001_scan_start(struct Scsi_Host *shost);
625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); 625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
626int pm8001_queue_command(struct sas_task *task, const int num, 626int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
627 gfp_t gfp_flags);
628int pm8001_abort_task(struct sas_task *task); 627int pm8001_abort_task(struct sas_task *task);
629int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); 628int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
630int pm8001_clear_aca(struct domain_device *dev, u8 *lun); 629int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bcb64eb1387f..b1b1f66b1ab7 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -249,15 +249,11 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
249 PMCRAID_VSET_MAX_SECTORS); 249 PMCRAID_VSET_MAX_SECTORS);
250 } 250 }
251 251
252 if (scsi_dev->tagged_supported && 252 /*
253 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) { 253 * We never want to report TCQ support for these types of devices.
254 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); 254 */
255 scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG, 255 if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry))
256 scsi_dev->host->cmd_per_lun); 256 scsi_dev->tagged_supported = 0;
257 } else {
258 scsi_adjust_queue_depth(scsi_dev, 0,
259 scsi_dev->host->cmd_per_lun);
260 }
261 257
262 return 0; 258 return 0;
263} 259}
@@ -289,55 +285,18 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
289 * pmcraid_change_queue_depth - Change the device's queue depth 285 * pmcraid_change_queue_depth - Change the device's queue depth
290 * @scsi_dev: scsi device struct 286 * @scsi_dev: scsi device struct
291 * @depth: depth to set 287 * @depth: depth to set
292 * @reason: calling context
293 * 288 *
294 * Return value 289 * Return value
295 * actual depth set 290 * actual depth set
296 */ 291 */
297static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, 292static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
298 int reason)
299{ 293{
300 if (reason != SCSI_QDEPTH_DEFAULT)
301 return -EOPNOTSUPP;
302
303 if (depth > PMCRAID_MAX_CMD_PER_LUN) 294 if (depth > PMCRAID_MAX_CMD_PER_LUN)
304 depth = PMCRAID_MAX_CMD_PER_LUN; 295 depth = PMCRAID_MAX_CMD_PER_LUN;
305 296 return scsi_change_queue_depth(scsi_dev, depth);
306 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
307
308 return scsi_dev->queue_depth;
309} 297}
310 298
311/** 299/**
312 * pmcraid_change_queue_type - Change the device's queue type
313 * @scsi_dev: scsi device struct
314 * @tag: type of tags to use
315 *
316 * Return value:
317 * actual queue type set
318 */
319static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
320{
321 struct pmcraid_resource_entry *res;
322
323 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
324
325 if ((res) && scsi_dev->tagged_supported &&
326 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
327 scsi_set_tag_type(scsi_dev, tag);
328
329 if (tag)
330 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
331 else
332 scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
333 } else
334 tag = 0;
335
336 return tag;
337}
338
339
340/**
341 * pmcraid_init_cmdblk - initializes a command block 300 * pmcraid_init_cmdblk - initializes a command block
342 * 301 *
343 * @cmd: pointer to struct pmcraid_cmd to be initialized 302 * @cmd: pointer to struct pmcraid_cmd to be initialized
@@ -3175,36 +3134,6 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
3175} 3134}
3176 3135
3177/** 3136/**
3178 * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
3179 * @scsi_cmd: scsi command struct
3180 *
3181 * Return value
3182 * number of tags or 0 if the task is not tagged
3183 */
3184static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
3185{
3186 char tag[2];
3187 u8 rc = 0;
3188
3189 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3190 switch (tag[0]) {
3191 case MSG_SIMPLE_TAG:
3192 rc = TASK_TAG_SIMPLE;
3193 break;
3194 case MSG_HEAD_TAG:
3195 rc = TASK_TAG_QUEUE_HEAD;
3196 break;
3197 case MSG_ORDERED_TAG:
3198 rc = TASK_TAG_ORDERED;
3199 break;
3200 };
3201 }
3202
3203 return rc;
3204}
3205
3206
3207/**
3208 * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB 3137 * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
3209 * @cmd: pmcraid command struct 3138 * @cmd: pmcraid command struct
3210 * @sgcount: count of scatter-gather elements 3139 * @sgcount: count of scatter-gather elements
@@ -3559,7 +3488,9 @@ static int pmcraid_queuecommand_lck(
3559 } 3488 }
3560 3489
3561 ioarcb->request_flags0 |= NO_LINK_DESCS; 3490 ioarcb->request_flags0 |= NO_LINK_DESCS;
3562 ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd); 3491
3492 if (scsi_cmd->flags & SCMD_TAGGED)
3493 ioarcb->request_flags1 |= TASK_TAG_SIMPLE;
3563 3494
3564 if (RES_IS_GSCSI(res->cfg_entry)) 3495 if (RES_IS_GSCSI(res->cfg_entry))
3565 ioarcb->request_flags1 |= DELAY_AFTER_RESET; 3496 ioarcb->request_flags1 |= DELAY_AFTER_RESET;
@@ -4320,7 +4251,7 @@ static struct scsi_host_template pmcraid_host_template = {
4320 .slave_configure = pmcraid_slave_configure, 4251 .slave_configure = pmcraid_slave_configure,
4321 .slave_destroy = pmcraid_slave_destroy, 4252 .slave_destroy = pmcraid_slave_destroy,
4322 .change_queue_depth = pmcraid_change_queue_depth, 4253 .change_queue_depth = pmcraid_change_queue_depth,
4323 .change_queue_type = pmcraid_change_queue_type, 4254 .change_queue_type = scsi_change_queue_type,
4324 .can_queue = PMCRAID_MAX_IO_CMD, 4255 .can_queue = PMCRAID_MAX_IO_CMD,
4325 .this_id = -1, 4256 .this_id = -1,
4326 .sg_tablesize = PMCRAID_MAX_IOADLS, 4257 .sg_tablesize = PMCRAID_MAX_IOADLS,
@@ -4329,7 +4260,8 @@ static struct scsi_host_template pmcraid_host_template = {
4329 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4260 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4330 .use_clustering = ENABLE_CLUSTERING, 4261 .use_clustering = ENABLE_CLUSTERING,
4331 .shost_attrs = pmcraid_host_attrs, 4262 .shost_attrs = pmcraid_host_attrs,
4332 .proc_name = PMCRAID_DRIVER_NAME 4263 .proc_name = PMCRAID_DRIVER_NAME,
4264 .use_blk_tags = 1,
4333}; 4265};
4334 4266
4335/* 4267/*
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ef23fabe3924..b3b48b5a984c 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -220,10 +220,6 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
220 unsigned char opcode; 220 unsigned char opcode;
221 int res; 221 int res;
222 222
223#ifdef DEBUG
224 scsi_print_command(cmd);
225#endif
226
227 priv->curr_cmd = cmd; 223 priv->curr_cmd = cmd;
228 cmd->scsi_done = done; 224 cmd->scsi_done = done;
229 225
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 158020522dfb..c68a66e8cfc1 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1224,10 +1224,9 @@ qla1280_slave_configure(struct scsi_device *device)
1224 1224
1225 if (device->tagged_supported && 1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1228 ha->bus_settings[bus].hiwat);
1229 } else { 1228 } else {
1230 scsi_adjust_queue_depth(device, 0, default_depth); 1229 scsi_change_queue_depth(device, default_depth);
1231 } 1230 }
1232 1231
1233 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b1865a72ce59..7686bfe9a4a9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -752,8 +752,6 @@ extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
752extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); 752extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
753extern void qla8044_wr_direct(struct scsi_qla_host *vha, 753extern void qla8044_wr_direct(struct scsi_qla_host *vha,
754 const uint32_t crb_reg, const uint32_t value); 754 const uint32_t crb_reg, const uint32_t value);
755extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
756extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
757extern int qla8044_device_state_handler(struct scsi_qla_host *vha); 755extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
758extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); 756extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
759extern void qla8044_clear_drv_active(struct qla_hw_data *); 757extern void qla8044_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index f0edb07f3198..a1ab25fca874 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -325,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp)
325 struct qla_hw_data *ha; 325 struct qla_hw_data *ha;
326 struct req_que *req; 326 struct req_que *req;
327 struct rsp_que *rsp; 327 struct rsp_que *rsp;
328 char tag[2];
329 328
330 /* Setup device pointers. */ 329 /* Setup device pointers. */
331 ret = 0; 330 ret = 0;
@@ -404,26 +403,7 @@ qla2x00_start_scsi(srb_t *sp)
404 /* Set target ID and LUN number*/ 403 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407 406 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) {
410 switch (tag[0]) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
414 break;
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 break;
419 default:
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 break;
423 }
424 } else {
425 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426 }
427 407
428 /* Load SCSI command packet. */ 408 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -1264,7 +1244,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1264 uint16_t fcp_cmnd_len; 1244 uint16_t fcp_cmnd_len;
1265 struct fcp_cmnd *fcp_cmnd; 1245 struct fcp_cmnd *fcp_cmnd;
1266 dma_addr_t crc_ctx_dma; 1246 dma_addr_t crc_ctx_dma;
1267 char tag[2];
1268 1247
1269 cmd = GET_CMD_SP(sp); 1248 cmd = GET_CMD_SP(sp);
1270 1249
@@ -1356,25 +1335,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1356 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( 1335 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1357 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1336 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1358 fcp_cmnd->task_management = 0; 1337 fcp_cmnd->task_management = 0;
1359 1338 fcp_cmnd->task_attribute = TSK_SIMPLE;
1360 /*
1361 * Update tagged queuing modifier if using command tag queuing
1362 */
1363 if (scsi_populate_tag_msg(cmd, tag)) {
1364 switch (tag[0]) {
1365 case HEAD_OF_QUEUE_TAG:
1366 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1367 break;
1368 case ORDERED_QUEUE_TAG:
1369 fcp_cmnd->task_attribute = TSK_ORDERED;
1370 break;
1371 default:
1372 fcp_cmnd->task_attribute = TSK_SIMPLE;
1373 break;
1374 }
1375 } else {
1376 fcp_cmnd->task_attribute = TSK_SIMPLE;
1377 }
1378 1339
1379 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1340 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1380 1341
@@ -1495,7 +1456,6 @@ qla24xx_start_scsi(srb_t *sp)
1495 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1456 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1496 struct scsi_qla_host *vha = sp->fcport->vha; 1457 struct scsi_qla_host *vha = sp->fcport->vha;
1497 struct qla_hw_data *ha = vha->hw; 1458 struct qla_hw_data *ha = vha->hw;
1498 char tag[2];
1499 1459
1500 /* Setup device pointers. */ 1460 /* Setup device pointers. */
1501 ret = 0; 1461 ret = 0;
@@ -1578,22 +1538,7 @@ qla24xx_start_scsi(srb_t *sp)
1578 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1538 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1579 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1539 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1580 1540
1581 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1541 cmd_pkt->task = TSK_SIMPLE;
1582 if (scsi_populate_tag_msg(cmd, tag)) {
1583 switch (tag[0]) {
1584 case HEAD_OF_QUEUE_TAG:
1585 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1586 break;
1587 case ORDERED_QUEUE_TAG:
1588 cmd_pkt->task = TSK_ORDERED;
1589 break;
1590 default:
1591 cmd_pkt->task = TSK_SIMPLE;
1592 break;
1593 }
1594 } else {
1595 cmd_pkt->task = TSK_SIMPLE;
1596 }
1597 1542
1598 /* Load SCSI command packet. */ 1543 /* Load SCSI command packet. */
1599 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1544 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
@@ -2310,7 +2255,6 @@ qla82xx_start_scsi(srb_t *sp)
2310 struct qla_hw_data *ha = vha->hw; 2255 struct qla_hw_data *ha = vha->hw;
2311 struct req_que *req = NULL; 2256 struct req_que *req = NULL;
2312 struct rsp_que *rsp = NULL; 2257 struct rsp_que *rsp = NULL;
2313 char tag[2];
2314 2258
2315 /* Setup device pointers. */ 2259 /* Setup device pointers. */
2316 ret = 0; 2260 ret = 0;
@@ -2489,22 +2433,6 @@ sufficient_dsds:
2489 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2433 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2490 ctx->fcp_cmnd->additional_cdb_len |= 2; 2434 ctx->fcp_cmnd->additional_cdb_len |= 2;
2491 2435
2492 /*
2493 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2494 */
2495 if (scsi_populate_tag_msg(cmd, tag)) {
2496 switch (tag[0]) {
2497 case HEAD_OF_QUEUE_TAG:
2498 ctx->fcp_cmnd->task_attribute =
2499 TSK_HEAD_OF_QUEUE;
2500 break;
2501 case ORDERED_QUEUE_TAG:
2502 ctx->fcp_cmnd->task_attribute =
2503 TSK_ORDERED;
2504 break;
2505 }
2506 }
2507
2508 /* Populate the FCP_PRIO. */ 2436 /* Populate the FCP_PRIO. */
2509 if (ha->flags.fcp_prio_enabled) 2437 if (ha->flags.fcp_prio_enabled)
2510 ctx->fcp_cmnd->task_attribute |= 2438 ctx->fcp_cmnd->task_attribute |=
@@ -2565,20 +2493,6 @@ sufficient_dsds:
2565 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2493 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2566 sizeof(cmd_pkt->lun)); 2494 sizeof(cmd_pkt->lun));
2567 2495
2568 /*
2569 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2570 */
2571 if (scsi_populate_tag_msg(cmd, tag)) {
2572 switch (tag[0]) {
2573 case HEAD_OF_QUEUE_TAG:
2574 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2575 break;
2576 case ORDERED_QUEUE_TAG:
2577 cmd_pkt->task = TSK_ORDERED;
2578 break;
2579 }
2580 }
2581
2582 /* Populate the FCP_PRIO. */ 2496 /* Populate the FCP_PRIO. */
2583 if (ha->flags.fcp_prio_enabled) 2497 if (ha->flags.fcp_prio_enabled)
2584 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 2498 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 80867599527d..6d190b4b82a0 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -3086,7 +3086,6 @@ qlafx00_start_scsi(srb_t *sp)
3086 struct cmd_type_7_fx00 *cmd_pkt; 3086 struct cmd_type_7_fx00 *cmd_pkt;
3087 struct cmd_type_7_fx00 lcmd_pkt; 3087 struct cmd_type_7_fx00 lcmd_pkt;
3088 struct scsi_lun llun; 3088 struct scsi_lun llun;
3089 char tag[2];
3090 3089
3091 /* Setup device pointers. */ 3090 /* Setup device pointers. */
3092 ret = 0; 3091 ret = 0;
@@ -3157,18 +3156,6 @@ qlafx00_start_scsi(srb_t *sp)
3157 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, 3156 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3158 sizeof(lcmd_pkt.lun)); 3157 sizeof(lcmd_pkt.lun));
3159 3158
3160 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3161 if (scsi_populate_tag_msg(cmd, tag)) {
3162 switch (tag[0]) {
3163 case HEAD_OF_QUEUE_TAG:
3164 lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
3165 break;
3166 case ORDERED_QUEUE_TAG:
3167 lcmd_pkt.task = TSK_ORDERED;
3168 break;
3169 }
3170 }
3171
3172 /* Load SCSI command packet. */ 3159 /* Load SCSI command packet. */
3173 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); 3160 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3174 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3161 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 24a852828b5d..ed4d6b6b53e3 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -238,7 +238,7 @@ qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
238 return; 238 return;
239} 239}
240 240
241inline void 241static inline void
242qla8044_set_qsnt_ready(struct scsi_qla_host *vha) 242qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
243{ 243{
244 uint32_t qsnt_state; 244 uint32_t qsnt_state;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index db3dbd999cb6..6b4d9235368a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -236,8 +236,6 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
236static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); 236static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
237static int qla2xxx_eh_host_reset(struct scsi_cmnd *); 237static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
238 238
239static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
240static int qla2x00_change_queue_type(struct scsi_device *, int);
241static void qla2x00_clear_drv_active(struct qla_hw_data *); 239static void qla2x00_clear_drv_active(struct qla_hw_data *);
242static void qla2x00_free_device(scsi_qla_host_t *); 240static void qla2x00_free_device(scsi_qla_host_t *);
243static void qla83xx_disable_laser(scsi_qla_host_t *vha); 241static void qla83xx_disable_laser(scsi_qla_host_t *vha);
@@ -259,8 +257,8 @@ struct scsi_host_template qla2xxx_driver_template = {
259 .slave_destroy = qla2xxx_slave_destroy, 257 .slave_destroy = qla2xxx_slave_destroy,
260 .scan_finished = qla2xxx_scan_finished, 258 .scan_finished = qla2xxx_scan_finished,
261 .scan_start = qla2xxx_scan_start, 259 .scan_start = qla2xxx_scan_start,
262 .change_queue_depth = qla2x00_change_queue_depth, 260 .change_queue_depth = scsi_change_queue_depth,
263 .change_queue_type = qla2x00_change_queue_type, 261 .change_queue_type = scsi_change_queue_type,
264 .this_id = -1, 262 .this_id = -1,
265 .cmd_per_lun = 3, 263 .cmd_per_lun = 3,
266 .use_clustering = ENABLE_CLUSTERING, 264 .use_clustering = ENABLE_CLUSTERING,
@@ -270,6 +268,8 @@ struct scsi_host_template qla2xxx_driver_template = {
270 .shost_attrs = qla2x00_host_attrs, 268 .shost_attrs = qla2x00_host_attrs,
271 269
272 .supported_mode = MODE_INITIATOR, 270 .supported_mode = MODE_INITIATOR,
271 .use_blk_tags = 1,
272 .track_queue_depth = 1,
273}; 273};
274 274
275static struct scsi_transport_template *qla2xxx_transport_template = NULL; 275static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -1405,10 +1405,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1405 if (IS_T10_PI_CAPABLE(vha->hw)) 1405 if (IS_T10_PI_CAPABLE(vha->hw))
1406 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1406 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1407 1407
1408 if (sdev->tagged_supported) 1408 scsi_change_queue_depth(sdev, req->max_q_depth);
1409 scsi_activate_tcq(sdev, req->max_q_depth);
1410 else
1411 scsi_deactivate_tcq(sdev, req->max_q_depth);
1412 return 0; 1409 return 0;
1413} 1410}
1414 1411
@@ -1418,76 +1415,6 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1418 sdev->hostdata = NULL; 1415 sdev->hostdata = NULL;
1419} 1416}
1420 1417
1421static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1422{
1423 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1424
1425 if (!scsi_track_queue_full(sdev, qdepth))
1426 return;
1427
1428 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1429 "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n",
1430 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1431}
1432
1433static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1434{
1435 fc_port_t *fcport = sdev->hostdata;
1436 struct scsi_qla_host *vha = fcport->vha;
1437 struct req_que *req = NULL;
1438
1439 req = vha->req;
1440 if (!req)
1441 return;
1442
1443 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1444 return;
1445
1446 if (sdev->ordered_tags)
1447 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1448 else
1449 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1450
1451 ql_dbg(ql_dbg_io, vha, 0x302a,
1452 "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n",
1453 sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
1454}
1455
1456static int
1457qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1458{
1459 switch (reason) {
1460 case SCSI_QDEPTH_DEFAULT:
1461 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1462 break;
1463 case SCSI_QDEPTH_QFULL:
1464 qla2x00_handle_queue_full(sdev, qdepth);
1465 break;
1466 case SCSI_QDEPTH_RAMP_UP:
1467 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1468 break;
1469 default:
1470 return -EOPNOTSUPP;
1471 }
1472
1473 return sdev->queue_depth;
1474}
1475
1476static int
1477qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1478{
1479 if (sdev->tagged_supported) {
1480 scsi_set_tag_type(sdev, tag_type);
1481 if (tag_type)
1482 scsi_activate_tcq(sdev, sdev->queue_depth);
1483 else
1484 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1485 } else
1486 tag_type = 0;
1487
1488 return tag_type;
1489}
1490
1491/** 1418/**
1492 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. 1419 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1493 * @ha: HA context 1420 * @ha: HA context
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 08ab6dac226d..17222eb49762 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -280,7 +280,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
280 uint16_t req_cnt; 280 uint16_t req_cnt;
281 unsigned long flags; 281 unsigned long flags;
282 uint32_t index; 282 uint32_t index;
283 char tag[2];
284 283
285 /* Get real lun and adapter */ 284 /* Get real lun and adapter */
286 ddb_entry = srb->ddb; 285 ddb_entry = srb->ddb;
@@ -352,15 +351,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
352 351
353 /* Set tagged queueing control flags */ 352 /* Set tagged queueing control flags */
354 cmd_entry->control_flags |= CF_SIMPLE_TAG; 353 cmd_entry->control_flags |= CF_SIMPLE_TAG;
355 if (scsi_populate_tag_msg(cmd, tag))
356 switch (tag[0]) {
357 case MSG_HEAD_TAG:
358 cmd_entry->control_flags |= CF_HEAD_TAG;
359 break;
360 case MSG_ORDERED_TAG:
361 cmd_entry->control_flags |= CF_ORDERED_TAG;
362 break;
363 }
364 354
365 qla4xxx_advance_req_ring_ptr(ha); 355 qla4xxx_advance_req_ring_ptr(ha);
366 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); 356 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 199fcf79a051..6d25879d87c8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -162,12 +162,8 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
162static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 162static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
163static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 163static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164static int qla4xxx_slave_alloc(struct scsi_device *device); 164static int qla4xxx_slave_alloc(struct scsi_device *device);
165static int qla4xxx_slave_configure(struct scsi_device *device);
166static void qla4xxx_slave_destroy(struct scsi_device *sdev);
167static umode_t qla4_attr_is_visible(int param_type, int param); 165static umode_t qla4_attr_is_visible(int param_type, int param);
168static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 166static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
169static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
170 int reason);
171 167
172/* 168/*
173 * iSCSI Flash DDB sysfs entry points 169 * iSCSI Flash DDB sysfs entry points
@@ -204,10 +200,8 @@ static struct scsi_host_template qla4xxx_driver_template = {
204 .eh_host_reset_handler = qla4xxx_eh_host_reset, 200 .eh_host_reset_handler = qla4xxx_eh_host_reset,
205 .eh_timed_out = qla4xxx_eh_cmd_timed_out, 201 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
206 202
207 .slave_configure = qla4xxx_slave_configure,
208 .slave_alloc = qla4xxx_slave_alloc, 203 .slave_alloc = qla4xxx_slave_alloc,
209 .slave_destroy = qla4xxx_slave_destroy, 204 .change_queue_depth = scsi_change_queue_depth,
210 .change_queue_depth = qla4xxx_change_queue_depth,
211 205
212 .this_id = -1, 206 .this_id = -1,
213 .cmd_per_lun = 3, 207 .cmd_per_lun = 3,
@@ -218,6 +212,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
218 .shost_attrs = qla4xxx_host_attrs, 212 .shost_attrs = qla4xxx_host_attrs,
219 .host_reset = qla4xxx_host_reset, 213 .host_reset = qla4xxx_host_reset,
220 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 214 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
215 .use_blk_tags = 1,
221}; 216};
222 217
223static struct iscsi_transport qla4xxx_iscsi_transport = { 218static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -9060,35 +9055,14 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9060 ddb = sess->dd_data; 9055 ddb = sess->dd_data;
9061 9056
9062 sdev->hostdata = ddb; 9057 sdev->hostdata = ddb;
9063 sdev->tagged_supported = 1;
9064 9058
9065 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) 9059 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9066 queue_depth = ql4xmaxqdepth; 9060 queue_depth = ql4xmaxqdepth;
9067 9061
9068 scsi_activate_tcq(sdev, queue_depth); 9062 scsi_change_queue_depth(sdev, queue_depth);
9069 return 0; 9063 return 0;
9070} 9064}
9071 9065
9072static int qla4xxx_slave_configure(struct scsi_device *sdev)
9073{
9074 sdev->tagged_supported = 1;
9075 return 0;
9076}
9077
9078static void qla4xxx_slave_destroy(struct scsi_device *sdev)
9079{
9080 scsi_deactivate_tcq(sdev, 1);
9081}
9082
9083static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
9084 int reason)
9085{
9086 if (!ql4xqfulltracking)
9087 return -EOPNOTSUPP;
9088
9089 return iscsi_change_queue_depth(sdev, qdepth, reason);
9090}
9091
9092/** 9066/**
9093 * qla4xxx_del_from_active_array - returns an active srb 9067 * qla4xxx_del_from_active_array - returns an active srb
9094 * @ha: Pointer to host adapter structure. 9068 * @ha: Pointer to host adapter structure.
@@ -9888,6 +9862,9 @@ static int __init qla4xxx_module_init(void)
9888{ 9862{
9889 int ret; 9863 int ret;
9890 9864
9865 if (ql4xqfulltracking)
9866 qla4xxx_driver_template.track_queue_depth = 1;
9867
9891 /* Allocate cache for SRBs. */ 9868 /* Allocate cache for SRBs. */
9892 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 9869 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9893 SLAB_HWCACHE_ALIGN, NULL); 9870 SLAB_HWCACHE_ALIGN, NULL);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 79c77b485a67..1ad0c36375b8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -527,9 +527,9 @@ void scsi_log_send(struct scsi_cmnd *cmd)
527 * 527 *
528 * 1: nothing (match completion) 528 * 1: nothing (match completion)
529 * 529 *
530 * 2: log opcode + command of all commands 530 * 2: log opcode + command of all commands + cmd address
531 * 531 *
532 * 3: same as 2 plus dump cmd address 532 * 3: same as 2
533 * 533 *
534 * 4: same as 3 plus dump extra junk 534 * 4: same as 3 plus dump extra junk
535 */ 535 */
@@ -537,10 +537,8 @@ void scsi_log_send(struct scsi_cmnd *cmd)
537 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 537 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
538 SCSI_LOG_MLQUEUE_BITS); 538 SCSI_LOG_MLQUEUE_BITS);
539 if (level > 1) { 539 if (level > 1) {
540 scmd_printk(KERN_INFO, cmd, "Send: "); 540 scmd_printk(KERN_INFO, cmd,
541 if (level > 2) 541 "Send: scmd 0x%p\n", cmd);
542 printk("0x%p ", cmd);
543 printk("\n");
544 scsi_print_command(cmd); 542 scsi_print_command(cmd);
545 if (level > 3) { 543 if (level > 3) {
546 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 544 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
@@ -565,7 +563,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
565 * 563 *
566 * 2: same as 1 but for all command completions. 564 * 2: same as 1 but for all command completions.
567 * 565 *
568 * 3: same as 2 plus dump cmd address 566 * 3: same as 2
569 * 567 *
570 * 4: same as 3 plus dump extra junk 568 * 4: same as 3 plus dump extra junk
571 */ 569 */
@@ -574,39 +572,10 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
574 SCSI_LOG_MLCOMPLETE_BITS); 572 SCSI_LOG_MLCOMPLETE_BITS);
575 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 573 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
576 (level > 1)) { 574 (level > 1)) {
577 scmd_printk(KERN_INFO, cmd, "Done: "); 575 scsi_print_result(cmd, "Done: ", disposition);
578 if (level > 2)
579 printk("0x%p ", cmd);
580 /*
581 * Dump truncated values, so we usually fit within
582 * 80 chars.
583 */
584 switch (disposition) {
585 case SUCCESS:
586 printk("SUCCESS\n");
587 break;
588 case NEEDS_RETRY:
589 printk("RETRY\n");
590 break;
591 case ADD_TO_MLQUEUE:
592 printk("MLQUEUE\n");
593 break;
594 case FAILED:
595 printk("FAILED\n");
596 break;
597 case TIMEOUT_ERROR:
598 /*
599 * If called via scsi_times_out.
600 */
601 printk("TIMEOUT\n");
602 break;
603 default:
604 printk("UNKNOWN\n");
605 }
606 scsi_print_result(cmd);
607 scsi_print_command(cmd); 576 scsi_print_command(cmd);
608 if (status_byte(cmd->result) & CHECK_CONDITION) 577 if (status_byte(cmd->result) & CHECK_CONDITION)
609 scsi_print_sense("", cmd); 578 scsi_print_sense(cmd);
610 if (level > 3) 579 if (level > 3)
611 scmd_printk(KERN_INFO, cmd, 580 scmd_printk(KERN_INFO, cmd,
612 "scsi host busy %d failed %d\n", 581 "scsi host busy %d failed %d\n",
@@ -634,87 +603,6 @@ void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
634EXPORT_SYMBOL(scsi_cmd_get_serial); 603EXPORT_SYMBOL(scsi_cmd_get_serial);
635 604
636/** 605/**
637 * scsi_dispatch_command - Dispatch a command to the low-level driver.
638 * @cmd: command block we are dispatching.
639 *
640 * Return: nonzero return request was rejected and device's queue needs to be
641 * plugged.
642 */
643int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
644{
645 struct Scsi_Host *host = cmd->device->host;
646 int rtn = 0;
647
648 atomic_inc(&cmd->device->iorequest_cnt);
649
650 /* check if the device is still usable */
651 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
652 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
653 * returns an immediate error upwards, and signals
654 * that the device is no longer present */
655 cmd->result = DID_NO_CONNECT << 16;
656 goto done;
657 }
658
659 /* Check to see if the scsi lld made this device blocked. */
660 if (unlikely(scsi_device_blocked(cmd->device))) {
661 /*
662 * in blocked state, the command is just put back on
663 * the device queue. The suspend state has already
664 * blocked the queue so future requests should not
665 * occur until the device transitions out of the
666 * suspend state.
667 */
668 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
669 "queuecommand : device blocked\n"));
670 return SCSI_MLQUEUE_DEVICE_BUSY;
671 }
672
673 /* Store the LUN value in cmnd, if needed. */
674 if (cmd->device->lun_in_cdb)
675 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
676 (cmd->device->lun << 5 & 0xe0);
677
678 scsi_log_send(cmd);
679
680 /*
681 * Before we queue this command, check if the command
682 * length exceeds what the host adapter can handle.
683 */
684 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
685 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
686 "queuecommand : command too long. "
687 "cdb_size=%d host->max_cmd_len=%d\n",
688 cmd->cmd_len, cmd->device->host->max_cmd_len));
689 cmd->result = (DID_ABORT << 16);
690 goto done;
691 }
692
693 if (unlikely(host->shost_state == SHOST_DEL)) {
694 cmd->result = (DID_NO_CONNECT << 16);
695 goto done;
696
697 }
698
699 trace_scsi_dispatch_cmd_start(cmd);
700 rtn = host->hostt->queuecommand(host, cmd);
701 if (rtn) {
702 trace_scsi_dispatch_cmd_error(cmd, rtn);
703 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
704 rtn != SCSI_MLQUEUE_TARGET_BUSY)
705 rtn = SCSI_MLQUEUE_HOST_BUSY;
706
707 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
708 "queuecommand : request rejected\n"));
709 }
710
711 return rtn;
712 done:
713 cmd->scsi_done(cmd);
714 return 0;
715}
716
717/**
718 * scsi_finish_command - cleanup and pass command back to upper layer 606 * scsi_finish_command - cleanup and pass command back to upper layer
719 * @cmd: the command 607 * @cmd: the command
720 * 608 *
@@ -773,32 +661,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
773} 661}
774 662
775/** 663/**
776 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth 664 * scsi_change_queue_depth - change a device's queue depth
777 * @sdev: SCSI Device in question 665 * @sdev: SCSI Device in question
778 * @tagged: Do we use tagged queueing (non-0) or do we treat 666 * @depth: number of commands allowed to be queued to the driver
779 * this device as an untagged device (0)
780 * @tags: Number of tags allowed if tagged queueing enabled,
781 * or number of commands the low level driver can
782 * queue up in non-tagged mode (as per cmd_per_lun).
783 *
784 * Returns: Nothing
785 * 667 *
786 * Lock Status: None held on entry 668 * Sets the device queue depth and returns the new value.
787 *
788 * Notes: Low level drivers may call this at any time and we will do
789 * the right thing depending on whether or not the device is
790 * currently active and whether or not it even has the
791 * command blocks built yet.
792 */ 669 */
793void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 670int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
794{ 671{
795 unsigned long flags; 672 unsigned long flags;
796 673
797 /* 674 if (depth <= 0)
798 * refuse to set tagged depth to an unworkable size 675 goto out;
799 */
800 if (tags <= 0)
801 return;
802 676
803 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 677 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
804 678
@@ -813,35 +687,17 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
813 */ 687 */
814 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) { 688 if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
815 if (blk_queue_tagged(sdev->request_queue) && 689 if (blk_queue_tagged(sdev->request_queue) &&
816 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 690 blk_queue_resize_tags(sdev->request_queue, depth) != 0)
817 goto out; 691 goto out_unlock;
818 } 692 }
819 693
820 sdev->queue_depth = tags; 694 sdev->queue_depth = depth;
821 switch (tagged) { 695out_unlock:
822 case 0:
823 sdev->ordered_tags = 0;
824 sdev->simple_tags = 0;
825 break;
826 case MSG_ORDERED_TAG:
827 sdev->ordered_tags = 1;
828 sdev->simple_tags = 1;
829 break;
830 case MSG_SIMPLE_TAG:
831 sdev->ordered_tags = 0;
832 sdev->simple_tags = 1;
833 break;
834 default:
835 sdev->ordered_tags = 0;
836 sdev->simple_tags = 0;
837 sdev_printk(KERN_WARNING, sdev,
838 "scsi_adjust_queue_depth, bad queue type, "
839 "disabled\n");
840 }
841 out:
842 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 696 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
697out:
698 return sdev->queue_depth;
843} 699}
844EXPORT_SYMBOL(scsi_adjust_queue_depth); 700EXPORT_SYMBOL(scsi_change_queue_depth);
845 701
846/** 702/**
847 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth 703 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
@@ -885,19 +741,32 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
885 return 0; 741 return 0;
886 if (sdev->last_queue_full_depth < 8) { 742 if (sdev->last_queue_full_depth < 8) {
887 /* Drop back to untagged */ 743 /* Drop back to untagged */
888 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 744 scsi_set_tag_type(sdev, 0);
745 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
889 return -1; 746 return -1;
890 } 747 }
891 748
892 if (sdev->ordered_tags) 749 return scsi_change_queue_depth(sdev, depth);
893 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
894 else
895 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
896 return depth;
897} 750}
898EXPORT_SYMBOL(scsi_track_queue_full); 751EXPORT_SYMBOL(scsi_track_queue_full);
899 752
900/** 753/**
754 * scsi_change_queue_type() - Change a device's queue type
755 * @sdev: The SCSI device whose queue depth is to change
756 * @tag_type: Identifier for queue type
757 */
758int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
759{
760 if (!sdev->tagged_supported)
761 return 0;
762
763 scsi_set_tag_type(sdev, tag_type);
764 return tag_type;
765
766}
767EXPORT_SYMBOL(scsi_change_queue_type);
768
769/**
901 * scsi_vpd_inquiry - Request a device provide us with a VPD page 770 * scsi_vpd_inquiry - Request a device provide us with a VPD page
902 * @sdev: The device to ask 771 * @sdev: The device to ask
903 * @buffer: Where to put the result 772 * @buffer: Where to put the result
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 238e06f13b8a..aa4b6b80aade 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -63,8 +63,8 @@
63#include "sd.h" 63#include "sd.h"
64#include "scsi_logging.h" 64#include "scsi_logging.h"
65 65
66#define SCSI_DEBUG_VERSION "1.84" 66#define SCSI_DEBUG_VERSION "1.85"
67static const char *scsi_debug_version_date = "20140706"; 67static const char *scsi_debug_version_date = "20141022";
68 68
69#define MY_NAME "scsi_debug" 69#define MY_NAME "scsi_debug"
70 70
@@ -75,19 +75,22 @@ static const char *scsi_debug_version_date = "20140706";
75#define UNRECOVERED_READ_ERR 0x11 75#define UNRECOVERED_READ_ERR 0x11
76#define PARAMETER_LIST_LENGTH_ERR 0x1a 76#define PARAMETER_LIST_LENGTH_ERR 0x1a
77#define INVALID_OPCODE 0x20 77#define INVALID_OPCODE 0x20
78#define ADDR_OUT_OF_RANGE 0x21 78#define LBA_OUT_OF_RANGE 0x21
79#define INVALID_COMMAND_OPCODE 0x20
80#define INVALID_FIELD_IN_CDB 0x24 79#define INVALID_FIELD_IN_CDB 0x24
81#define INVALID_FIELD_IN_PARAM_LIST 0x26 80#define INVALID_FIELD_IN_PARAM_LIST 0x26
82#define UA_RESET_ASC 0x29 81#define UA_RESET_ASC 0x29
83#define UA_CHANGED_ASC 0x2a 82#define UA_CHANGED_ASC 0x2a
83#define INSUFF_RES_ASC 0x55
84#define INSUFF_RES_ASCQ 0x3
84#define POWER_ON_RESET_ASCQ 0x0 85#define POWER_ON_RESET_ASCQ 0x0
85#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ 86#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
86#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ 87#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88#define CAPACITY_CHANGED_ASCQ 0x9
87#define SAVING_PARAMS_UNSUP 0x39 89#define SAVING_PARAMS_UNSUP 0x39
88#define TRANSPORT_PROBLEM 0x4b 90#define TRANSPORT_PROBLEM 0x4b
89#define THRESHOLD_EXCEEDED 0x5d 91#define THRESHOLD_EXCEEDED 0x5d
90#define LOW_POWER_COND_ON 0x5e 92#define LOW_POWER_COND_ON 0x5e
93#define MISCOMPARE_VERIFY_ASC 0x1d
91 94
92/* Additional Sense Code Qualifier (ASCQ) */ 95/* Additional Sense Code Qualifier (ASCQ) */
93#define ACK_NAK_TO 0x3 96#define ACK_NAK_TO 0x3
@@ -133,6 +136,7 @@ static const char *scsi_debug_version_date = "20140706";
133#define DEF_VIRTUAL_GB 0 136#define DEF_VIRTUAL_GB 0
134#define DEF_VPD_USE_HOSTNO 1 137#define DEF_VPD_USE_HOSTNO 1
135#define DEF_WRITESAME_LENGTH 0xFFFF 138#define DEF_WRITESAME_LENGTH 0xFFFF
139#define DEF_STRICT 0
136#define DELAY_OVERRIDDEN -9999 140#define DELAY_OVERRIDDEN -9999
137 141
138/* bit mask values for scsi_debug_opts */ 142/* bit mask values for scsi_debug_opts */
@@ -176,11 +180,12 @@ static const char *scsi_debug_version_date = "20140706";
176#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ 180#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
177#define SDEBUG_UA_BUS_RESET 1 181#define SDEBUG_UA_BUS_RESET 1
178#define SDEBUG_UA_MODE_CHANGED 2 182#define SDEBUG_UA_MODE_CHANGED 2
179#define SDEBUG_NUM_UAS 3 183#define SDEBUG_UA_CAPACITY_CHANGED 3
184#define SDEBUG_NUM_UAS 4
180 185
181/* for check_readiness() */ 186/* for check_readiness() */
182#define UAS_ONLY 1 187#define UAS_ONLY 1 /* check for UAs only */
183#define UAS_TUR 0 188#define UAS_TUR 0 /* if no UAs then check if media access possible */
184 189
185/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this 190/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
186 * sector on read commands: */ 191 * sector on read commands: */
@@ -206,6 +211,301 @@ static const char *scsi_debug_version_date = "20140706";
206#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" 211#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
207#endif 212#endif
208 213
214/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
215enum sdeb_opcode_index {
216 SDEB_I_INVALID_OPCODE = 0,
217 SDEB_I_INQUIRY = 1,
218 SDEB_I_REPORT_LUNS = 2,
219 SDEB_I_REQUEST_SENSE = 3,
220 SDEB_I_TEST_UNIT_READY = 4,
221 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
222 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
223 SDEB_I_LOG_SENSE = 7,
224 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
225 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
226 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
227 SDEB_I_START_STOP = 11,
228 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
229 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
230 SDEB_I_MAINT_IN = 14,
231 SDEB_I_MAINT_OUT = 15,
232 SDEB_I_VERIFY = 16, /* 10 only */
233 SDEB_I_VARIABLE_LEN = 17,
234 SDEB_I_RESERVE = 18, /* 6, 10 */
235 SDEB_I_RELEASE = 19, /* 6, 10 */
236 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
237 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
238 SDEB_I_ATA_PT = 22, /* 12, 16 */
239 SDEB_I_SEND_DIAG = 23,
240 SDEB_I_UNMAP = 24,
241 SDEB_I_XDWRITEREAD = 25, /* 10 only */
242 SDEB_I_WRITE_BUFFER = 26,
243 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
244 SDEB_I_SYNC_CACHE = 28, /* 10 only */
245 SDEB_I_COMP_WRITE = 29,
246 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
247};
248
249static const unsigned char opcode_ind_arr[256] = {
250/* 0x0; 0x0->0x1f: 6 byte cdbs */
251 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
252 0, 0, 0, 0,
253 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
254 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
255 SDEB_I_RELEASE,
256 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
257 SDEB_I_ALLOW_REMOVAL, 0,
258/* 0x20; 0x20->0x3f: 10 byte cdbs */
259 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
260 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
261 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
262 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
263/* 0x40; 0x40->0x5f: 10 byte cdbs */
264 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
265 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
266 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
267 SDEB_I_RELEASE,
268 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
269/* 0x60; 0x60->0x7d are reserved */
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 0, SDEB_I_VARIABLE_LEN,
273/* 0x80; 0x80->0x9f: 16 byte cdbs */
274 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
275 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
276 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
278/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
279 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
280 SDEB_I_MAINT_OUT, 0, 0, 0,
281 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
282 0, 0, 0, 0,
283 0, 0, 0, 0, 0, 0, 0, 0,
284 0, 0, 0, 0, 0, 0, 0, 0,
285/* 0xc0; 0xc0->0xff: vendor specific */
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
290};
291
292#define F_D_IN 1
293#define F_D_OUT 2
294#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
295#define F_D_UNKN 8
296#define F_RL_WLUN_OK 0x10
297#define F_SKIP_UA 0x20
298#define F_DELAY_OVERR 0x40
299#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
300#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
301#define F_INV_OP 0x200
302#define F_FAKE_RW 0x400
303#define F_M_ACCESS 0x800 /* media access */
304
305#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
306#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
307#define FF_SA (F_SA_HIGH | F_SA_LOW)
308
309struct sdebug_dev_info;
310static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
311static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
312static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
313static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
314static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
315static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
316static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
317static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
319static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
320static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
321static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
322static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
323static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
324static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
325static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
326static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
327static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
328static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
329static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
330
331struct opcode_info_t {
332 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
333 * for terminating element */
334 u8 opcode; /* if num_attached > 0, preferred */
335 u16 sa; /* service action */
336 u32 flags; /* OR-ed set of SDEB_F_* */
337 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
338 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
339 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
340 /* ignore cdb bytes after position 15 */
341};
342
343static const struct opcode_info_t msense_iarr[1] = {
344 {0, 0x1a, 0, F_D_IN, NULL, NULL,
345 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
346};
347
348static const struct opcode_info_t mselect_iarr[1] = {
349 {0, 0x15, 0, F_D_OUT, NULL, NULL,
350 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
351};
352
353static const struct opcode_info_t read_iarr[3] = {
354 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
355 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
356 0, 0, 0, 0} },
357 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
358 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
359 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
360 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
361 0xc7, 0, 0, 0, 0} },
362};
363
364static const struct opcode_info_t write_iarr[3] = {
365 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
366 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
367 0, 0, 0, 0} },
368 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
369 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
370 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
371 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
372 0xc7, 0, 0, 0, 0} },
373};
374
375static const struct opcode_info_t sa_in_iarr[1] = {
376 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
377 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
378 0xff, 0xff, 0xff, 0, 0xc7} },
379};
380
381static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
382 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
383 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
384 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
385};
386
387static const struct opcode_info_t maint_in_iarr[2] = {
388 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
389 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
390 0xc7, 0, 0, 0, 0} },
391 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
392 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
393 0, 0} },
394};
395
396static const struct opcode_info_t write_same_iarr[1] = {
397 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
398 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
399 0xff, 0xff, 0xff, 0x1f, 0xc7} },
400};
401
402static const struct opcode_info_t reserve_iarr[1] = {
403 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
404 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
405};
406
407static const struct opcode_info_t release_iarr[1] = {
408 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
409 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
410};
411
412
413/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
414 * plus the terminating elements for logic that scans this table such as
415 * REPORT SUPPORTED OPERATION CODES. */
416static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
417/* 0 */
418 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
419 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
421 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
422 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
423 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
424 0, 0} },
425 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
426 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
428 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
430 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
431 0} },
432 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
433 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
434 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
435 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
436 0, 0, 0} },
437 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
438 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
439 0, 0} },
440 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
441 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
442 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
443/* 10 */
444 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
445 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
446 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
447 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
448 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
449 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
450 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
452 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
453 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
454 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
455 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
456 0} },
457 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
458 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
460 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
461 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
462 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
463 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
464 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
465 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
466 0} },
467 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
468 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
469 0} },
470/* 20 */
471 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
472 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
473 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
474 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
475 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
476 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
478 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
479 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
480 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
481 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
482 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
483 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
485 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
487 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
488 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
489 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
490 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
491 0, 0, 0, 0} },
492 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
493 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
494 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
495
496/* 30 */
497 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
498 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
499};
500
501struct sdebug_scmd_extra_t {
502 bool inj_recovered;
503 bool inj_transport;
504 bool inj_dif;
505 bool inj_dix;
506 bool inj_short;
507};
508
209static int scsi_debug_add_host = DEF_NUM_HOST; 509static int scsi_debug_add_host = DEF_NUM_HOST;
210static int scsi_debug_ato = DEF_ATO; 510static int scsi_debug_ato = DEF_ATO;
211static int scsi_debug_delay = DEF_DELAY; 511static int scsi_debug_delay = DEF_DELAY;
@@ -245,6 +545,8 @@ static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
245static bool scsi_debug_removable = DEF_REMOVABLE; 545static bool scsi_debug_removable = DEF_REMOVABLE;
246static bool scsi_debug_clustering; 546static bool scsi_debug_clustering;
247static bool scsi_debug_host_lock = DEF_HOST_LOCK; 547static bool scsi_debug_host_lock = DEF_HOST_LOCK;
548static bool scsi_debug_strict = DEF_STRICT;
549static bool sdebug_any_injecting_opt;
248 550
249static atomic_t sdebug_cmnd_count; 551static atomic_t sdebug_cmnd_count;
250static atomic_t sdebug_completions; 552static atomic_t sdebug_completions;
@@ -277,11 +579,10 @@ struct sdebug_dev_info {
277 unsigned int target; 579 unsigned int target;
278 u64 lun; 580 u64 lun;
279 struct sdebug_host_info *sdbg_host; 581 struct sdebug_host_info *sdbg_host;
280 u64 wlun;
281 unsigned long uas_bm[1]; 582 unsigned long uas_bm[1];
282 atomic_t num_in_q; 583 atomic_t num_in_q;
283 char stopped; 584 char stopped; /* TODO: should be atomic */
284 char used; 585 bool used;
285}; 586};
286 587
287struct sdebug_host_info { 588struct sdebug_host_info {
@@ -394,6 +695,50 @@ static void sdebug_max_tgts_luns(void)
394 spin_unlock(&sdebug_host_list_lock); 695 spin_unlock(&sdebug_host_list_lock);
395} 696}
396 697
698enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
699
700/* Set in_bit to -1 to indicate no bit position of invalid field */
701static void
702mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
703 int in_byte, int in_bit)
704{
705 unsigned char *sbuff;
706 u8 sks[4];
707 int sl, asc;
708
709 sbuff = scp->sense_buffer;
710 if (!sbuff) {
711 sdev_printk(KERN_ERR, scp->device,
712 "%s: sense_buffer is NULL\n", __func__);
713 return;
714 }
715 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
716 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
717 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
718 asc, 0);
719 memset(sks, 0, sizeof(sks));
720 sks[0] = 0x80;
721 if (c_d)
722 sks[0] |= 0x40;
723 if (in_bit >= 0) {
724 sks[0] |= 0x8;
725 sks[0] |= 0x7 & in_bit;
726 }
727 put_unaligned_be16(in_byte, sks + 1);
728 if (scsi_debug_dsense) {
729 sl = sbuff[7] + 8;
730 sbuff[7] = sl;
731 sbuff[sl] = 0x2;
732 sbuff[sl + 1] = 0x6;
733 memcpy(sbuff + sl + 4, sks, 3);
734 } else
735 memcpy(sbuff + 15, sks, 3);
736 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
737 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
738 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
739 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
740}
741
397static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) 742static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
398{ 743{
399 unsigned char *sbuff; 744 unsigned char *sbuff;
@@ -414,63 +759,10 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
414 my_name, key, asc, asq); 759 my_name, key, asc, asq);
415} 760}
416 761
417static void get_data_transfer_info(unsigned char *cmd, 762static void
418 unsigned long long *lba, unsigned int *num, 763mk_sense_invalid_opcode(struct scsi_cmnd *scp)
419 u32 *ei_lba)
420{ 764{
421 *ei_lba = 0; 765 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
422
423 switch (*cmd) {
424 case VARIABLE_LENGTH_CMD:
425 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
426 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
427 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
428 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
429
430 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
431 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
432
433 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
434 (u32)cmd[28] << 24;
435 break;
436
437 case WRITE_SAME_16:
438 case WRITE_16:
439 case READ_16:
440 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
441 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
442 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
443 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
444
445 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
446 (u32)cmd[10] << 24;
447 break;
448 case WRITE_12:
449 case READ_12:
450 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
451 (u32)cmd[2] << 24;
452
453 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
454 (u32)cmd[6] << 24;
455 break;
456 case WRITE_SAME:
457 case WRITE_10:
458 case READ_10:
459 case XDWRITEREAD_10:
460 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
461 (u32)cmd[2] << 24;
462
463 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
464 break;
465 case WRITE_6:
466 case READ_6:
467 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
468 (u32)(cmd[1] & 0x1f) << 16;
469 *num = (0 == cmd[4]) ? 256 : cmd[4];
470 break;
471 default:
472 break;
473 }
474} 766}
475 767
476static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 768static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
@@ -520,6 +812,11 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
520 if (debug) 812 if (debug)
521 cp = "mode parameters changed"; 813 cp = "mode parameters changed";
522 break; 814 break;
815 case SDEBUG_UA_CAPACITY_CHANGED:
816 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
817 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 if (debug)
819 cp = "capacity data changed";
523 default: 820 default:
524 pr_warn("%s: unexpected unit attention code=%d\n", 821 pr_warn("%s: unexpected unit attention code=%d\n",
525 __func__, k); 822 __func__, k);
@@ -924,19 +1221,20 @@ static int inquiry_evpd_b2(unsigned char *arr)
924#define SDEBUG_LONG_INQ_SZ 96 1221#define SDEBUG_LONG_INQ_SZ 96
925#define SDEBUG_MAX_INQ_ARR_SZ 584 1222#define SDEBUG_MAX_INQ_ARR_SZ 584
926 1223
927static int resp_inquiry(struct scsi_cmnd *scp, int target, 1224static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
928 struct sdebug_dev_info * devip)
929{ 1225{
930 unsigned char pq_pdt; 1226 unsigned char pq_pdt;
931 unsigned char * arr; 1227 unsigned char * arr;
932 unsigned char *cmd = scp->cmnd; 1228 unsigned char *cmd = scp->cmnd;
933 int alloc_len, n, ret; 1229 int alloc_len, n, ret;
1230 bool have_wlun;
934 1231
935 alloc_len = (cmd[3] << 8) + cmd[4]; 1232 alloc_len = (cmd[3] << 8) + cmd[4];
936 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1233 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
937 if (! arr) 1234 if (! arr)
938 return DID_REQUEUE << 16; 1235 return DID_REQUEUE << 16;
939 if (devip->wlun) 1236 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1237 if (have_wlun)
940 pq_pdt = 0x1e; /* present, wlun */ 1238 pq_pdt = 0x1e; /* present, wlun */
941 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 1239 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
942 pq_pdt = 0x7f; /* not present, no device type */ 1240 pq_pdt = 0x7f; /* not present, no device type */
@@ -944,8 +1242,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
944 pq_pdt = (scsi_debug_ptype & 0x1f); 1242 pq_pdt = (scsi_debug_ptype & 0x1f);
945 arr[0] = pq_pdt; 1243 arr[0] = pq_pdt;
946 if (0x2 & cmd[1]) { /* CMDDT bit set */ 1244 if (0x2 & cmd[1]) { /* CMDDT bit set */
947 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1245 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
948 0);
949 kfree(arr); 1246 kfree(arr);
950 return check_condition_result; 1247 return check_condition_result;
951 } else if (0x1 & cmd[1]) { /* EVPD bit set */ 1248 } else if (0x1 & cmd[1]) { /* EVPD bit set */
@@ -957,7 +1254,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
957 (devip->channel & 0x7f); 1254 (devip->channel & 0x7f);
958 if (0 == scsi_debug_vpd_use_hostno) 1255 if (0 == scsi_debug_vpd_use_hostno)
959 host_no = 0; 1256 host_no = 0;
960 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + 1257 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
961 (devip->target * 1000) + devip->lun); 1258 (devip->target * 1000) + devip->lun);
962 target_dev_id = ((host_no + 1) * 2000) + 1259 target_dev_id = ((host_no + 1) * 2000) +
963 (devip->target * 1000) - 3; 1260 (devip->target * 1000) - 3;
@@ -1029,9 +1326,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
1029 arr[1] = cmd[2]; /*sanity */ 1326 arr[1] = cmd[2]; /*sanity */
1030 arr[3] = inquiry_evpd_b2(&arr[4]); 1327 arr[3] = inquiry_evpd_b2(&arr[4]);
1031 } else { 1328 } else {
1032 /* Illegal request, invalid field in cdb */ 1329 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1033 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1034 INVALID_FIELD_IN_CDB, 0);
1035 kfree(arr); 1330 kfree(arr);
1036 return check_condition_result; 1331 return check_condition_result;
1037 } 1332 }
@@ -1077,18 +1372,20 @@ static int resp_requests(struct scsi_cmnd * scp,
1077 unsigned char * sbuff; 1372 unsigned char * sbuff;
1078 unsigned char *cmd = scp->cmnd; 1373 unsigned char *cmd = scp->cmnd;
1079 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1374 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080 int want_dsense; 1375 bool dsense, want_dsense;
1081 int len = 18; 1376 int len = 18;
1082 1377
1083 memset(arr, 0, sizeof(arr)); 1378 memset(arr, 0, sizeof(arr));
1084 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; 1379 dsense = !!(cmd[1] & 1);
1380 want_dsense = dsense || scsi_debug_dsense;
1085 sbuff = scp->sense_buffer; 1381 sbuff = scp->sense_buffer;
1086 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1382 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1087 if (want_dsense) { 1383 if (dsense) {
1088 arr[0] = 0x72; 1384 arr[0] = 0x72;
1089 arr[1] = 0x0; /* NO_SENSE in sense_key */ 1385 arr[1] = 0x0; /* NO_SENSE in sense_key */
1090 arr[2] = THRESHOLD_EXCEEDED; 1386 arr[2] = THRESHOLD_EXCEEDED;
1091 arr[3] = 0xff; /* TEST set and MRIE==6 */ 1387 arr[3] = 0xff; /* TEST set and MRIE==6 */
1388 len = 8;
1092 } else { 1389 } else {
1093 arr[0] = 0x70; 1390 arr[0] = 0x70;
1094 arr[2] = 0x0; /* NO_SENSE in sense_key */ 1391 arr[2] = 0x0; /* NO_SENSE in sense_key */
@@ -1098,15 +1395,34 @@ static int resp_requests(struct scsi_cmnd * scp,
1098 } 1395 }
1099 } else { 1396 } else {
1100 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); 1397 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1101 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1398 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1102 /* DESC bit set and sense_buff in fixed format */ 1399 ; /* have sense and formats match */
1103 memset(arr, 0, sizeof(arr)); 1400 else if (arr[0] <= 0x70) {
1401 if (dsense) {
1402 memset(arr, 0, 8);
1403 arr[0] = 0x72;
1404 len = 8;
1405 } else {
1406 memset(arr, 0, 18);
1407 arr[0] = 0x70;
1408 arr[7] = 0xa;
1409 }
1410 } else if (dsense) {
1411 memset(arr, 0, 8);
1104 arr[0] = 0x72; 1412 arr[0] = 0x72;
1105 arr[1] = sbuff[2]; /* sense key */ 1413 arr[1] = sbuff[2]; /* sense key */
1106 arr[2] = sbuff[12]; /* asc */ 1414 arr[2] = sbuff[12]; /* asc */
1107 arr[3] = sbuff[13]; /* ascq */ 1415 arr[3] = sbuff[13]; /* ascq */
1108 len = 8; 1416 len = 8;
1417 } else {
1418 memset(arr, 0, 18);
1419 arr[0] = 0x70;
1420 arr[2] = sbuff[1];
1421 arr[7] = 0xa;
1422 arr[12] = sbuff[1];
1423 arr[13] = sbuff[3];
1109 } 1424 }
1425
1110 } 1426 }
1111 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0); 1427 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1112 return fill_from_dev_buffer(scp, arr, len); 1428 return fill_from_dev_buffer(scp, arr, len);
@@ -1116,15 +1432,11 @@ static int resp_start_stop(struct scsi_cmnd * scp,
1116 struct sdebug_dev_info * devip) 1432 struct sdebug_dev_info * devip)
1117{ 1433{
1118 unsigned char *cmd = scp->cmnd; 1434 unsigned char *cmd = scp->cmnd;
1119 int power_cond, errsts, start; 1435 int power_cond, start;
1120 1436
1121 errsts = check_readiness(scp, UAS_ONLY, devip);
1122 if (errsts)
1123 return errsts;
1124 power_cond = (cmd[4] & 0xf0) >> 4; 1437 power_cond = (cmd[4] & 0xf0) >> 4;
1125 if (power_cond) { 1438 if (power_cond) {
1126 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1439 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1127 0);
1128 return check_condition_result; 1440 return check_condition_result;
1129 } 1441 }
1130 start = cmd[4] & 1; 1442 start = cmd[4] & 1;
@@ -1148,11 +1460,7 @@ static int resp_readcap(struct scsi_cmnd * scp,
1148{ 1460{
1149 unsigned char arr[SDEBUG_READCAP_ARR_SZ]; 1461 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1150 unsigned int capac; 1462 unsigned int capac;
1151 int errsts;
1152 1463
1153 errsts = check_readiness(scp, UAS_ONLY, devip);
1154 if (errsts)
1155 return errsts;
1156 /* following just in case virtual_gb changed */ 1464 /* following just in case virtual_gb changed */
1157 sdebug_capacity = get_sdebug_capacity(); 1465 sdebug_capacity = get_sdebug_capacity();
1158 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1466 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
@@ -1180,11 +1488,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1180 unsigned char *cmd = scp->cmnd; 1488 unsigned char *cmd = scp->cmnd;
1181 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1489 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182 unsigned long long capac; 1490 unsigned long long capac;
1183 int errsts, k, alloc_len; 1491 int k, alloc_len;
1184 1492
1185 errsts = check_readiness(scp, UAS_ONLY, devip);
1186 if (errsts)
1187 return errsts;
1188 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) 1493 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1189 + cmd[13]); 1494 + cmd[13]);
1190 /* following just in case virtual_gb changed */ 1495 /* following just in case virtual_gb changed */
@@ -1300,6 +1605,184 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1300 return ret; 1605 return ret;
1301} 1606}
1302 1607
1608static int
1609resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1610{
1611 bool rctd;
1612 u8 reporting_opts, req_opcode, sdeb_i, supp;
1613 u16 req_sa, u;
1614 u32 alloc_len, a_len;
1615 int k, offset, len, errsts, count, bump, na;
1616 const struct opcode_info_t *oip;
1617 const struct opcode_info_t *r_oip;
1618 u8 *arr;
1619 u8 *cmd = scp->cmnd;
1620
1621 rctd = !!(cmd[2] & 0x80);
1622 reporting_opts = cmd[2] & 0x7;
1623 req_opcode = cmd[3];
1624 req_sa = get_unaligned_be16(cmd + 4);
1625 alloc_len = get_unaligned_be32(cmd + 6);
1626 if (alloc_len < 4 && alloc_len > 0xffff) {
1627 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628 return check_condition_result;
1629 }
1630 if (alloc_len > 8192)
1631 a_len = 8192;
1632 else
1633 a_len = alloc_len;
1634 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
1635 if (NULL == arr) {
1636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637 INSUFF_RES_ASCQ);
1638 return check_condition_result;
1639 }
1640 switch (reporting_opts) {
1641 case 0: /* all commands */
1642 /* count number of commands */
1643 for (count = 0, oip = opcode_info_arr;
1644 oip->num_attached != 0xff; ++oip) {
1645 if (F_INV_OP & oip->flags)
1646 continue;
1647 count += (oip->num_attached + 1);
1648 }
1649 bump = rctd ? 20 : 8;
1650 put_unaligned_be32(count * bump, arr);
1651 for (offset = 4, oip = opcode_info_arr;
1652 oip->num_attached != 0xff && offset < a_len; ++oip) {
1653 if (F_INV_OP & oip->flags)
1654 continue;
1655 na = oip->num_attached;
1656 arr[offset] = oip->opcode;
1657 put_unaligned_be16(oip->sa, arr + offset + 2);
1658 if (rctd)
1659 arr[offset + 5] |= 0x2;
1660 if (FF_SA & oip->flags)
1661 arr[offset + 5] |= 0x1;
1662 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663 if (rctd)
1664 put_unaligned_be16(0xa, arr + offset + 8);
1665 r_oip = oip;
1666 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667 if (F_INV_OP & oip->flags)
1668 continue;
1669 offset += bump;
1670 arr[offset] = oip->opcode;
1671 put_unaligned_be16(oip->sa, arr + offset + 2);
1672 if (rctd)
1673 arr[offset + 5] |= 0x2;
1674 if (FF_SA & oip->flags)
1675 arr[offset + 5] |= 0x1;
1676 put_unaligned_be16(oip->len_mask[0],
1677 arr + offset + 6);
1678 if (rctd)
1679 put_unaligned_be16(0xa,
1680 arr + offset + 8);
1681 }
1682 oip = r_oip;
1683 offset += bump;
1684 }
1685 break;
1686 case 1: /* one command: opcode only */
1687 case 2: /* one command: opcode plus service action */
1688 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1689 sdeb_i = opcode_ind_arr[req_opcode];
1690 oip = &opcode_info_arr[sdeb_i];
1691 if (F_INV_OP & oip->flags) {
1692 supp = 1;
1693 offset = 4;
1694 } else {
1695 if (1 == reporting_opts) {
1696 if (FF_SA & oip->flags) {
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1698 2, 2);
1699 kfree(arr);
1700 return check_condition_result;
1701 }
1702 req_sa = 0;
1703 } else if (2 == reporting_opts &&
1704 0 == (FF_SA & oip->flags)) {
1705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706 kfree(arr); /* point at requested sa */
1707 return check_condition_result;
1708 }
1709 if (0 == (FF_SA & oip->flags) &&
1710 req_opcode == oip->opcode)
1711 supp = 3;
1712 else if (0 == (FF_SA & oip->flags)) {
1713 na = oip->num_attached;
1714 for (k = 0, oip = oip->arrp; k < na;
1715 ++k, ++oip) {
1716 if (req_opcode == oip->opcode)
1717 break;
1718 }
1719 supp = (k >= na) ? 1 : 3;
1720 } else if (req_sa != oip->sa) {
1721 na = oip->num_attached;
1722 for (k = 0, oip = oip->arrp; k < na;
1723 ++k, ++oip) {
1724 if (req_sa == oip->sa)
1725 break;
1726 }
1727 supp = (k >= na) ? 1 : 3;
1728 } else
1729 supp = 3;
1730 if (3 == supp) {
1731 u = oip->len_mask[0];
1732 put_unaligned_be16(u, arr + 2);
1733 arr[4] = oip->opcode;
1734 for (k = 1; k < u; ++k)
1735 arr[4 + k] = (k < 16) ?
1736 oip->len_mask[k] : 0xff;
1737 offset = 4 + u;
1738 } else
1739 offset = 4;
1740 }
1741 arr[1] = (rctd ? 0x80 : 0) | supp;
1742 if (rctd) {
1743 put_unaligned_be16(0xa, arr + offset);
1744 offset += 12;
1745 }
1746 break;
1747 default:
1748 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749 kfree(arr);
1750 return check_condition_result;
1751 }
1752 offset = (offset < a_len) ? offset : a_len;
1753 len = (offset < alloc_len) ? offset : alloc_len;
1754 errsts = fill_from_dev_buffer(scp, arr, len);
1755 kfree(arr);
1756 return errsts;
1757}
1758
1759static int
1760resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1761{
1762 bool repd;
1763 u32 alloc_len, len;
1764 u8 arr[16];
1765 u8 *cmd = scp->cmnd;
1766
1767 memset(arr, 0, sizeof(arr));
1768 repd = !!(cmd[2] & 0x80);
1769 alloc_len = get_unaligned_be32(cmd + 6);
1770 if (alloc_len < 4) {
1771 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772 return check_condition_result;
1773 }
1774 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1775 arr[1] = 0x1; /* ITNRS */
1776 if (repd) {
1777 arr[3] = 0xc;
1778 len = 16;
1779 } else
1780 len = 4;
1781
1782 len = (len < alloc_len) ? len : alloc_len;
1783 return fill_from_dev_buffer(scp, arr, len);
1784}
1785
1303/* <<Following mode page info copied from ST318451LW>> */ 1786/* <<Following mode page info copied from ST318451LW>> */
1304 1787
1305static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) 1788static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
@@ -1459,20 +1942,18 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1459 1942
1460#define SDEBUG_MAX_MSENSE_SZ 256 1943#define SDEBUG_MAX_MSENSE_SZ 256
1461 1944
1462static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1945static int
1463 struct sdebug_dev_info * devip) 1946resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1464{ 1947{
1465 unsigned char dbd, llbaa; 1948 unsigned char dbd, llbaa;
1466 int pcontrol, pcode, subpcode, bd_len; 1949 int pcontrol, pcode, subpcode, bd_len;
1467 unsigned char dev_spec; 1950 unsigned char dev_spec;
1468 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; 1951 int k, alloc_len, msense_6, offset, len, target_dev_id;
1952 int target = scp->device->id;
1469 unsigned char * ap; 1953 unsigned char * ap;
1470 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1954 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471 unsigned char *cmd = scp->cmnd; 1955 unsigned char *cmd = scp->cmnd;
1472 1956
1473 errsts = check_readiness(scp, UAS_ONLY, devip);
1474 if (errsts)
1475 return errsts;
1476 dbd = !!(cmd[1] & 0x8); 1957 dbd = !!(cmd[1] & 0x8);
1477 pcontrol = (cmd[2] & 0xc0) >> 6; 1958 pcontrol = (cmd[2] & 0xc0) >> 6;
1478 pcode = cmd[2] & 0x3f; 1959 pcode = cmd[2] & 0x3f;
@@ -1542,8 +2023,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1542 2023
1543 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { 2024 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1544 /* TODO: Control Extension page */ 2025 /* TODO: Control Extension page */
1545 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 2026 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1546 0);
1547 return check_condition_result; 2027 return check_condition_result;
1548 } 2028 }
1549 switch (pcode) { 2029 switch (pcode) {
@@ -1569,8 +2049,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1569 break; 2049 break;
1570 case 0x19: /* if spc==1 then sas phy, control+discover */ 2050 case 0x19: /* if spc==1 then sas phy, control+discover */
1571 if ((subpcode > 0x2) && (subpcode < 0xff)) { 2051 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1572 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2052 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1573 INVALID_FIELD_IN_CDB, 0);
1574 return check_condition_result; 2053 return check_condition_result;
1575 } 2054 }
1576 len = 0; 2055 len = 0;
@@ -1602,15 +2081,13 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1602 } 2081 }
1603 len += resp_iec_m_pg(ap + len, pcontrol, target); 2082 len += resp_iec_m_pg(ap + len, pcontrol, target);
1604 } else { 2083 } else {
1605 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1606 INVALID_FIELD_IN_CDB, 0);
1607 return check_condition_result; 2085 return check_condition_result;
1608 } 2086 }
1609 offset += len; 2087 offset += len;
1610 break; 2088 break;
1611 default: 2089 default:
1612 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 2090 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1613 0);
1614 return check_condition_result; 2091 return check_condition_result;
1615 } 2092 }
1616 if (msense_6) 2093 if (msense_6)
@@ -1624,24 +2101,21 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1624 2101
1625#define SDEBUG_MAX_MSELECT_SZ 512 2102#define SDEBUG_MAX_MSELECT_SZ 512
1626 2103
1627static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, 2104static int
1628 struct sdebug_dev_info * devip) 2105resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1629{ 2106{
1630 int pf, sp, ps, md_len, bd_len, off, spf, pg_len; 2107 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631 int param_len, res, errsts, mpage; 2108 int param_len, res, mpage;
1632 unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; 2109 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633 unsigned char *cmd = scp->cmnd; 2110 unsigned char *cmd = scp->cmnd;
2111 int mselect6 = (MODE_SELECT == cmd[0]);
1634 2112
1635 errsts = check_readiness(scp, UAS_ONLY, devip);
1636 if (errsts)
1637 return errsts;
1638 memset(arr, 0, sizeof(arr)); 2113 memset(arr, 0, sizeof(arr));
1639 pf = cmd[1] & 0x10; 2114 pf = cmd[1] & 0x10;
1640 sp = cmd[1] & 0x1; 2115 sp = cmd[1] & 0x1;
1641 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); 2116 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1642 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { 2117 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1643 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2118 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
1644 INVALID_FIELD_IN_CDB, 0);
1645 return check_condition_result; 2119 return check_condition_result;
1646 } 2120 }
1647 res = fetch_to_dev_buffer(scp, arr, param_len); 2121 res = fetch_to_dev_buffer(scp, arr, param_len);
@@ -1655,16 +2129,14 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1655 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); 2129 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1656 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); 2130 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1657 if (md_len > 2) { 2131 if (md_len > 2) {
1658 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2132 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1659 INVALID_FIELD_IN_PARAM_LIST, 0);
1660 return check_condition_result; 2133 return check_condition_result;
1661 } 2134 }
1662 off = bd_len + (mselect6 ? 4 : 8); 2135 off = bd_len + (mselect6 ? 4 : 8);
1663 mpage = arr[off] & 0x3f; 2136 mpage = arr[off] & 0x3f;
1664 ps = !!(arr[off] & 0x80); 2137 ps = !!(arr[off] & 0x80);
1665 if (ps) { 2138 if (ps) {
1666 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2139 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
1667 INVALID_FIELD_IN_PARAM_LIST, 0);
1668 return check_condition_result; 2140 return check_condition_result;
1669 } 2141 }
1670 spf = !!(arr[off] & 0x40); 2142 spf = !!(arr[off] & 0x40);
@@ -1701,8 +2173,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1701 default: 2173 default:
1702 break; 2174 break;
1703 } 2175 }
1704 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2176 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
1705 INVALID_FIELD_IN_PARAM_LIST, 0);
1706 return check_condition_result; 2177 return check_condition_result;
1707set_mode_changed_ua: 2178set_mode_changed_ua:
1708 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); 2179 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
@@ -1737,19 +2208,15 @@ static int resp_ie_l_pg(unsigned char * arr)
1737static int resp_log_sense(struct scsi_cmnd * scp, 2208static int resp_log_sense(struct scsi_cmnd * scp,
1738 struct sdebug_dev_info * devip) 2209 struct sdebug_dev_info * devip)
1739{ 2210{
1740 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; 2211 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
1741 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 2212 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742 unsigned char *cmd = scp->cmnd; 2213 unsigned char *cmd = scp->cmnd;
1743 2214
1744 errsts = check_readiness(scp, UAS_ONLY, devip);
1745 if (errsts)
1746 return errsts;
1747 memset(arr, 0, sizeof(arr)); 2215 memset(arr, 0, sizeof(arr));
1748 ppc = cmd[1] & 0x2; 2216 ppc = cmd[1] & 0x2;
1749 sp = cmd[1] & 0x1; 2217 sp = cmd[1] & 0x1;
1750 if (ppc || sp) { 2218 if (ppc || sp) {
1751 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2219 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
1752 INVALID_FIELD_IN_CDB, 0);
1753 return check_condition_result; 2220 return check_condition_result;
1754 } 2221 }
1755 pcontrol = (cmd[2] & 0xc0) >> 6; 2222 pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -1773,8 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1773 arr[3] = resp_ie_l_pg(arr + 4); 2240 arr[3] = resp_ie_l_pg(arr + 4);
1774 break; 2241 break;
1775 default: 2242 default:
1776 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2243 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1777 INVALID_FIELD_IN_CDB, 0);
1778 return check_condition_result; 2244 return check_condition_result;
1779 } 2245 }
1780 } else if (0xff == subpcode) { 2246 } else if (0xff == subpcode) {
@@ -1806,13 +2272,11 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1806 arr[3] = n - 4; 2272 arr[3] = n - 4;
1807 break; 2273 break;
1808 default: 2274 default:
1809 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2275 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1810 INVALID_FIELD_IN_CDB, 0);
1811 return check_condition_result; 2276 return check_condition_result;
1812 } 2277 }
1813 } else { 2278 } else {
1814 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2279 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1815 INVALID_FIELD_IN_CDB, 0);
1816 return check_condition_result; 2280 return check_condition_result;
1817 } 2281 }
1818 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); 2282 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
@@ -1824,11 +2288,12 @@ static int check_device_access_params(struct scsi_cmnd *scp,
1824 unsigned long long lba, unsigned int num) 2288 unsigned long long lba, unsigned int num)
1825{ 2289{
1826 if (lba + num > sdebug_capacity) { 2290 if (lba + num > sdebug_capacity) {
1827 mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); 2291 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
1828 return check_condition_result; 2292 return check_condition_result;
1829 } 2293 }
1830 /* transfer length excessive (tie in to block limits VPD page) */ 2294 /* transfer length excessive (tie in to block limits VPD page) */
1831 if (num > sdebug_store_sectors) { 2295 if (num > sdebug_store_sectors) {
2296 /* needs work to find which cdb byte 'num' comes from */
1832 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 2297 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1833 return check_condition_result; 2298 return check_condition_result;
1834 } 2299 }
@@ -1836,17 +2301,17 @@ static int check_device_access_params(struct scsi_cmnd *scp,
1836} 2301}
1837 2302
1838/* Returns number of bytes copied or -1 if error. */ 2303/* Returns number of bytes copied or -1 if error. */
1839static int do_device_access(struct scsi_cmnd *scmd, 2304static int
1840 unsigned long long lba, unsigned int num, int write) 2305do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
1841{ 2306{
1842 int ret; 2307 int ret;
1843 unsigned long long block, rest = 0; 2308 u64 block, rest = 0;
1844 struct scsi_data_buffer *sdb; 2309 struct scsi_data_buffer *sdb;
1845 enum dma_data_direction dir; 2310 enum dma_data_direction dir;
1846 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, 2311 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1847 off_t); 2312 off_t);
1848 2313
1849 if (write) { 2314 if (do_write) {
1850 sdb = scsi_out(scmd); 2315 sdb = scsi_out(scmd);
1851 dir = DMA_TO_DEVICE; 2316 dir = DMA_TO_DEVICE;
1852 func = sg_pcopy_to_buffer; 2317 func = sg_pcopy_to_buffer;
@@ -1880,6 +2345,38 @@ static int do_device_access(struct scsi_cmnd *scmd,
1880 return ret; 2345 return ret;
1881} 2346}
1882 2347
2348/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2349 * arr into fake_store(lba,num) and return true. If comparison fails then
2350 * return false. */
2351static bool
2352comp_write_worker(u64 lba, u32 num, const u8 *arr)
2353{
2354 bool res;
2355 u64 block, rest = 0;
2356 u32 store_blks = sdebug_store_sectors;
2357 u32 lb_size = scsi_debug_sector_size;
2358
2359 block = do_div(lba, store_blks);
2360 if (block + num > store_blks)
2361 rest = block + num - store_blks;
2362
2363 res = !memcmp(fake_storep + (block * lb_size), arr,
2364 (num - rest) * lb_size);
2365 if (!res)
2366 return res;
2367 if (rest)
2368 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2369 rest * lb_size);
2370 if (!res)
2371 return res;
2372 arr += num * lb_size;
2373 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2374 if (rest)
2375 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2376 rest * lb_size);
2377 return res;
2378}
2379
1883static __be16 dif_compute_csum(const void *buf, int len) 2380static __be16 dif_compute_csum(const void *buf, int len)
1884{ 2381{
1885 __be16 csum; 2382 __be16 csum;
@@ -1992,55 +2489,143 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1992 return 0; 2489 return 0;
1993} 2490}
1994 2491
1995static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, 2492static int
1996 unsigned int num, u32 ei_lba) 2493resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1997{ 2494{
2495 u8 *cmd = scp->cmnd;
2496 u64 lba;
2497 u32 num;
2498 u32 ei_lba;
1998 unsigned long iflags; 2499 unsigned long iflags;
1999 int ret; 2500 int ret;
2501 bool check_prot;
2000 2502
2001 ret = check_device_access_params(SCpnt, lba, num); 2503 switch (cmd[0]) {
2002 if (ret) 2504 case READ_16:
2003 return ret; 2505 ei_lba = 0;
2506 lba = get_unaligned_be64(cmd + 2);
2507 num = get_unaligned_be32(cmd + 10);
2508 check_prot = true;
2509 break;
2510 case READ_10:
2511 ei_lba = 0;
2512 lba = get_unaligned_be32(cmd + 2);
2513 num = get_unaligned_be16(cmd + 7);
2514 check_prot = true;
2515 break;
2516 case READ_6:
2517 ei_lba = 0;
2518 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2519 (u32)(cmd[1] & 0x1f) << 16;
2520 num = (0 == cmd[4]) ? 256 : cmd[4];
2521 check_prot = true;
2522 break;
2523 case READ_12:
2524 ei_lba = 0;
2525 lba = get_unaligned_be32(cmd + 2);
2526 num = get_unaligned_be32(cmd + 6);
2527 check_prot = true;
2528 break;
2529 case XDWRITEREAD_10:
2530 ei_lba = 0;
2531 lba = get_unaligned_be32(cmd + 2);
2532 num = get_unaligned_be16(cmd + 7);
2533 check_prot = false;
2534 break;
2535 default: /* assume READ(32) */
2536 lba = get_unaligned_be64(cmd + 12);
2537 ei_lba = get_unaligned_be32(cmd + 20);
2538 num = get_unaligned_be32(cmd + 28);
2539 check_prot = false;
2540 break;
2541 }
2542 if (check_prot) {
2543 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2544 (cmd[1] & 0xe0)) {
2545 mk_sense_invalid_opcode(scp);
2546 return check_condition_result;
2547 }
2548 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2549 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2550 (cmd[1] & 0xe0) == 0)
2551 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2552 "to DIF device\n");
2553 }
2554 if (sdebug_any_injecting_opt) {
2555 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2556
2557 if (ep->inj_short)
2558 num /= 2;
2559 }
2560
2561 /* inline check_device_access_params() */
2562 if (lba + num > sdebug_capacity) {
2563 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2564 return check_condition_result;
2565 }
2566 /* transfer length excessive (tie in to block limits VPD page) */
2567 if (num > sdebug_store_sectors) {
2568 /* needs work to find which cdb byte 'num' comes from */
2569 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2570 return check_condition_result;
2571 }
2004 2572
2005 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && 2573 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2006 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && 2574 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2007 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { 2575 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2008 /* claim unrecoverable read error */ 2576 /* claim unrecoverable read error */
2009 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); 2577 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2010 /* set info field and valid bit for fixed descriptor */ 2578 /* set info field and valid bit for fixed descriptor */
2011 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) { 2579 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2012 SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */ 2580 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2013 ret = (lba < OPT_MEDIUM_ERR_ADDR) 2581 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2014 ? OPT_MEDIUM_ERR_ADDR : (int)lba; 2582 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2015 SCpnt->sense_buffer[3] = (ret >> 24) & 0xff; 2583 put_unaligned_be32(ret, scp->sense_buffer + 3);
2016 SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2017 SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2018 SCpnt->sense_buffer[6] = ret & 0xff;
2019 } 2584 }
2020 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); 2585 scsi_set_resid(scp, scsi_bufflen(scp));
2021 return check_condition_result; 2586 return check_condition_result;
2022 } 2587 }
2023 2588
2024 read_lock_irqsave(&atomic_rw, iflags); 2589 read_lock_irqsave(&atomic_rw, iflags);
2025 2590
2026 /* DIX + T10 DIF */ 2591 /* DIX + T10 DIF */
2027 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 2592 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2028 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); 2593 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2029 2594
2030 if (prot_ret) { 2595 if (prot_ret) {
2031 read_unlock_irqrestore(&atomic_rw, iflags); 2596 read_unlock_irqrestore(&atomic_rw, iflags);
2032 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret); 2597 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2033 return illegal_condition_result; 2598 return illegal_condition_result;
2034 } 2599 }
2035 } 2600 }
2036 2601
2037 ret = do_device_access(SCpnt, lba, num, 0); 2602 ret = do_device_access(scp, lba, num, false);
2038 read_unlock_irqrestore(&atomic_rw, iflags); 2603 read_unlock_irqrestore(&atomic_rw, iflags);
2039 if (ret == -1) 2604 if (ret == -1)
2040 return DID_ERROR << 16; 2605 return DID_ERROR << 16;
2041 2606
2042 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret; 2607 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2608
2609 if (sdebug_any_injecting_opt) {
2610 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2043 2611
2612 if (ep->inj_recovered) {
2613 mk_sense_buffer(scp, RECOVERED_ERROR,
2614 THRESHOLD_EXCEEDED, 0);
2615 return check_condition_result;
2616 } else if (ep->inj_transport) {
2617 mk_sense_buffer(scp, ABORTED_COMMAND,
2618 TRANSPORT_PROBLEM, ACK_NAK_TO);
2619 return check_condition_result;
2620 } else if (ep->inj_dif) {
2621 /* Logical block guard check failed */
2622 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2623 return illegal_condition_result;
2624 } else if (ep->inj_dix) {
2625 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2626 return illegal_condition_result;
2627 }
2628 }
2044 return 0; 2629 return 0;
2045} 2630}
2046 2631
@@ -2223,31 +2808,95 @@ static void unmap_region(sector_t lba, unsigned int len)
2223 } 2808 }
2224} 2809}
2225 2810
2226static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 2811static int
2227 unsigned int num, u32 ei_lba) 2812resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2228{ 2813{
2814 u8 *cmd = scp->cmnd;
2815 u64 lba;
2816 u32 num;
2817 u32 ei_lba;
2229 unsigned long iflags; 2818 unsigned long iflags;
2230 int ret; 2819 int ret;
2820 bool check_prot;
2231 2821
2232 ret = check_device_access_params(SCpnt, lba, num); 2822 switch (cmd[0]) {
2233 if (ret) 2823 case WRITE_16:
2234 return ret; 2824 ei_lba = 0;
2825 lba = get_unaligned_be64(cmd + 2);
2826 num = get_unaligned_be32(cmd + 10);
2827 check_prot = true;
2828 break;
2829 case WRITE_10:
2830 ei_lba = 0;
2831 lba = get_unaligned_be32(cmd + 2);
2832 num = get_unaligned_be16(cmd + 7);
2833 check_prot = true;
2834 break;
2835 case WRITE_6:
2836 ei_lba = 0;
2837 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2838 (u32)(cmd[1] & 0x1f) << 16;
2839 num = (0 == cmd[4]) ? 256 : cmd[4];
2840 check_prot = true;
2841 break;
2842 case WRITE_12:
2843 ei_lba = 0;
2844 lba = get_unaligned_be32(cmd + 2);
2845 num = get_unaligned_be32(cmd + 6);
2846 check_prot = true;
2847 break;
2848 case 0x53: /* XDWRITEREAD(10) */
2849 ei_lba = 0;
2850 lba = get_unaligned_be32(cmd + 2);
2851 num = get_unaligned_be16(cmd + 7);
2852 check_prot = false;
2853 break;
2854 default: /* assume WRITE(32) */
2855 lba = get_unaligned_be64(cmd + 12);
2856 ei_lba = get_unaligned_be32(cmd + 20);
2857 num = get_unaligned_be32(cmd + 28);
2858 check_prot = false;
2859 break;
2860 }
2861 if (check_prot) {
2862 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2863 (cmd[1] & 0xe0)) {
2864 mk_sense_invalid_opcode(scp);
2865 return check_condition_result;
2866 }
2867 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2868 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2869 (cmd[1] & 0xe0) == 0)
2870 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2871 "to DIF device\n");
2872 }
2873
2874 /* inline check_device_access_params() */
2875 if (lba + num > sdebug_capacity) {
2876 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877 return check_condition_result;
2878 }
2879 /* transfer length excessive (tie in to block limits VPD page) */
2880 if (num > sdebug_store_sectors) {
2881 /* needs work to find which cdb byte 'num' comes from */
2882 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883 return check_condition_result;
2884 }
2235 2885
2236 write_lock_irqsave(&atomic_rw, iflags); 2886 write_lock_irqsave(&atomic_rw, iflags);
2237 2887
2238 /* DIX + T10 DIF */ 2888 /* DIX + T10 DIF */
2239 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 2889 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2240 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); 2890 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2241 2891
2242 if (prot_ret) { 2892 if (prot_ret) {
2243 write_unlock_irqrestore(&atomic_rw, iflags); 2893 write_unlock_irqrestore(&atomic_rw, iflags);
2244 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 2894 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2245 prot_ret);
2246 return illegal_condition_result; 2895 return illegal_condition_result;
2247 } 2896 }
2248 } 2897 }
2249 2898
2250 ret = do_device_access(SCpnt, lba, num, 1); 2899 ret = do_device_access(scp, lba, num, true);
2251 if (scsi_debug_lbp()) 2900 if (scsi_debug_lbp())
2252 map_region(lba, num); 2901 map_region(lba, num);
2253 write_unlock_irqrestore(&atomic_rw, iflags); 2902 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2255,30 +2904,41 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2255 return (DID_ERROR << 16); 2904 return (DID_ERROR << 16);
2256 else if ((ret < (num * scsi_debug_sector_size)) && 2905 else if ((ret < (num * scsi_debug_sector_size)) &&
2257 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2906 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2258 sdev_printk(KERN_INFO, SCpnt->device, 2907 sdev_printk(KERN_INFO, scp->device,
2259 "%s: write: cdb indicated=%u, IO sent=%d bytes\n", 2908 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2260 my_name, num * scsi_debug_sector_size, ret); 2909 my_name, num * scsi_debug_sector_size, ret);
2261 2910
2911 if (sdebug_any_injecting_opt) {
2912 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2913
2914 if (ep->inj_recovered) {
2915 mk_sense_buffer(scp, RECOVERED_ERROR,
2916 THRESHOLD_EXCEEDED, 0);
2917 return check_condition_result;
2918 } else if (ep->inj_dif) {
2919 /* Logical block guard check failed */
2920 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2921 return illegal_condition_result;
2922 } else if (ep->inj_dix) {
2923 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2924 return illegal_condition_result;
2925 }
2926 }
2262 return 0; 2927 return 0;
2263} 2928}
2264 2929
2265static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, 2930static int
2266 unsigned int num, u32 ei_lba, unsigned int unmap) 2931resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2932 bool unmap, bool ndob)
2267{ 2933{
2268 unsigned long iflags; 2934 unsigned long iflags;
2269 unsigned long long i; 2935 unsigned long long i;
2270 int ret; 2936 int ret;
2271 2937
2272 ret = check_device_access_params(scmd, lba, num); 2938 ret = check_device_access_params(scp, lba, num);
2273 if (ret) 2939 if (ret)
2274 return ret; 2940 return ret;
2275 2941
2276 if (num > scsi_debug_write_same_length) {
2277 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2278 0);
2279 return check_condition_result;
2280 }
2281
2282 write_lock_irqsave(&atomic_rw, iflags); 2942 write_lock_irqsave(&atomic_rw, iflags);
2283 2943
2284 if (unmap && scsi_debug_lbp()) { 2944 if (unmap && scsi_debug_lbp()) {
@@ -2286,17 +2946,22 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2286 goto out; 2946 goto out;
2287 } 2947 }
2288 2948
2289 /* Else fetch one logical block */ 2949 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2290 ret = fetch_to_dev_buffer(scmd, 2950 if (ndob) {
2291 fake_storep + (lba * scsi_debug_sector_size), 2951 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2292 scsi_debug_sector_size); 2952 scsi_debug_sector_size);
2953 ret = 0;
2954 } else
2955 ret = fetch_to_dev_buffer(scp, fake_storep +
2956 (lba * scsi_debug_sector_size),
2957 scsi_debug_sector_size);
2293 2958
2294 if (-1 == ret) { 2959 if (-1 == ret) {
2295 write_unlock_irqrestore(&atomic_rw, iflags); 2960 write_unlock_irqrestore(&atomic_rw, iflags);
2296 return (DID_ERROR << 16); 2961 return (DID_ERROR << 16);
2297 } else if ((ret < (num * scsi_debug_sector_size)) && 2962 } else if ((ret < (num * scsi_debug_sector_size)) &&
2298 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2963 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2299 sdev_printk(KERN_INFO, scmd->device, 2964 sdev_printk(KERN_INFO, scp->device,
2300 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 2965 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2301 my_name, "write same", 2966 my_name, "write same",
2302 num * scsi_debug_sector_size, ret); 2967 num * scsi_debug_sector_size, ret);
@@ -2315,13 +2980,143 @@ out:
2315 return 0; 2980 return 0;
2316} 2981}
2317 2982
2983static int
2984resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2985{
2986 u8 *cmd = scp->cmnd;
2987 u32 lba;
2988 u16 num;
2989 u32 ei_lba = 0;
2990 bool unmap = false;
2991
2992 if (cmd[1] & 0x8) {
2993 if (scsi_debug_lbpws10 == 0) {
2994 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2995 return check_condition_result;
2996 } else
2997 unmap = true;
2998 }
2999 lba = get_unaligned_be32(cmd + 2);
3000 num = get_unaligned_be16(cmd + 7);
3001 if (num > scsi_debug_write_same_length) {
3002 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3003 return check_condition_result;
3004 }
3005 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3006}
3007
3008static int
3009resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3010{
3011 u8 *cmd = scp->cmnd;
3012 u64 lba;
3013 u32 num;
3014 u32 ei_lba = 0;
3015 bool unmap = false;
3016 bool ndob = false;
3017
3018 if (cmd[1] & 0x8) { /* UNMAP */
3019 if (scsi_debug_lbpws == 0) {
3020 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3021 return check_condition_result;
3022 } else
3023 unmap = true;
3024 }
3025 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3026 ndob = true;
3027 lba = get_unaligned_be64(cmd + 2);
3028 num = get_unaligned_be32(cmd + 10);
3029 if (num > scsi_debug_write_same_length) {
3030 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3031 return check_condition_result;
3032 }
3033 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3034}
3035
3036static int
3037resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3038{
3039 u8 *cmd = scp->cmnd;
3040 u8 *arr;
3041 u8 *fake_storep_hold;
3042 u64 lba;
3043 u32 dnum;
3044 u32 lb_size = scsi_debug_sector_size;
3045 u8 num;
3046 unsigned long iflags;
3047 int ret;
3048
3049 lba = get_unaligned_be32(cmd + 2);
3050 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3051 if (0 == num)
3052 return 0; /* degenerate case, not an error */
3053 dnum = 2 * num;
3054 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3055 if (NULL == arr) {
3056 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3057 INSUFF_RES_ASCQ);
3058 return check_condition_result;
3059 }
3060 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3061 (cmd[1] & 0xe0)) {
3062 mk_sense_invalid_opcode(scp);
3063 return check_condition_result;
3064 }
3065 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3066 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3067 (cmd[1] & 0xe0) == 0)
3068 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3069 "to DIF device\n");
3070
3071 /* inline check_device_access_params() */
3072 if (lba + num > sdebug_capacity) {
3073 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3074 return check_condition_result;
3075 }
3076 /* transfer length excessive (tie in to block limits VPD page) */
3077 if (num > sdebug_store_sectors) {
3078 /* needs work to find which cdb byte 'num' comes from */
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3080 return check_condition_result;
3081 }
3082
3083 write_lock_irqsave(&atomic_rw, iflags);
3084
3085 /* trick do_device_access() to fetch both compare and write buffers
3086 * from data-in into arr. Safe (atomic) since write_lock held. */
3087 fake_storep_hold = fake_storep;
3088 fake_storep = arr;
3089 ret = do_device_access(scp, 0, dnum, true);
3090 fake_storep = fake_storep_hold;
3091 if (ret == -1) {
3092 write_unlock_irqrestore(&atomic_rw, iflags);
3093 kfree(arr);
3094 return DID_ERROR << 16;
3095 } else if ((ret < (dnum * lb_size)) &&
3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098 "indicated=%u, IO sent=%d bytes\n", my_name,
3099 dnum * lb_size, ret);
3100 if (!comp_write_worker(lba, num, arr)) {
3101 write_unlock_irqrestore(&atomic_rw, iflags);
3102 kfree(arr);
3103 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3104 return check_condition_result;
3105 }
3106 if (scsi_debug_lbp())
3107 map_region(lba, num);
3108 write_unlock_irqrestore(&atomic_rw, iflags);
3109 return 0;
3110}
3111
2318struct unmap_block_desc { 3112struct unmap_block_desc {
2319 __be64 lba; 3113 __be64 lba;
2320 __be32 blocks; 3114 __be32 blocks;
2321 __be32 __reserved; 3115 __be32 __reserved;
2322}; 3116};
2323 3117
2324static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) 3118static int
3119resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2325{ 3120{
2326 unsigned char *buf; 3121 unsigned char *buf;
2327 struct unmap_block_desc *desc; 3122 struct unmap_block_desc *desc;
@@ -2329,20 +3124,26 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2329 int ret; 3124 int ret;
2330 unsigned long iflags; 3125 unsigned long iflags;
2331 3126
2332 ret = check_readiness(scmd, UAS_ONLY, devip);
2333 if (ret)
2334 return ret;
2335 3127
2336 payload_len = get_unaligned_be16(&scmd->cmnd[7]); 3128 if (!scsi_debug_lbp())
2337 BUG_ON(scsi_bufflen(scmd) != payload_len); 3129 return 0; /* fib and say its done */
3130 payload_len = get_unaligned_be16(scp->cmnd + 7);
3131 BUG_ON(scsi_bufflen(scp) != payload_len);
2338 3132
2339 descriptors = (payload_len - 8) / 16; 3133 descriptors = (payload_len - 8) / 16;
3134 if (descriptors > scsi_debug_unmap_max_desc) {
3135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3136 return check_condition_result;
3137 }
2340 3138
2341 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); 3139 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2342 if (!buf) 3140 if (!buf) {
3141 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3142 INSUFF_RES_ASCQ);
2343 return check_condition_result; 3143 return check_condition_result;
3144 }
2344 3145
2345 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 3146 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2346 3147
2347 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); 3148 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2348 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); 3149 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
@@ -2355,7 +3156,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2355 unsigned long long lba = get_unaligned_be64(&desc[i].lba); 3156 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2356 unsigned int num = get_unaligned_be32(&desc[i].blocks); 3157 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2357 3158
2358 ret = check_device_access_params(scmd, lba, num); 3159 ret = check_device_access_params(scp, lba, num);
2359 if (ret) 3160 if (ret)
2360 goto out; 3161 goto out;
2361 3162
@@ -2373,37 +3174,44 @@ out:
2373 3174
2374#define SDEBUG_GET_LBA_STATUS_LEN 32 3175#define SDEBUG_GET_LBA_STATUS_LEN 32
2375 3176
2376static int resp_get_lba_status(struct scsi_cmnd * scmd, 3177static int
2377 struct sdebug_dev_info * devip) 3178resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2378{ 3179{
2379 unsigned long long lba; 3180 u8 *cmd = scp->cmnd;
2380 unsigned int alloc_len, mapped, num; 3181 u64 lba;
2381 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; 3182 u32 alloc_len, mapped, num;
3183 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
2382 int ret; 3184 int ret;
2383 3185
2384 ret = check_readiness(scmd, UAS_ONLY, devip); 3186 lba = get_unaligned_be64(cmd + 2);
2385 if (ret) 3187 alloc_len = get_unaligned_be32(cmd + 10);
2386 return ret;
2387
2388 lba = get_unaligned_be64(&scmd->cmnd[2]);
2389 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2390 3188
2391 if (alloc_len < 24) 3189 if (alloc_len < 24)
2392 return 0; 3190 return 0;
2393 3191
2394 ret = check_device_access_params(scmd, lba, 1); 3192 ret = check_device_access_params(scp, lba, 1);
2395 if (ret) 3193 if (ret)
2396 return ret; 3194 return ret;
2397 3195
2398 mapped = map_state(lba, &num); 3196 if (scsi_debug_lbp())
3197 mapped = map_state(lba, &num);
3198 else {
3199 mapped = 1;
3200 /* following just in case virtual_gb changed */
3201 sdebug_capacity = get_sdebug_capacity();
3202 if (sdebug_capacity - lba <= 0xffffffff)
3203 num = sdebug_capacity - lba;
3204 else
3205 num = 0xffffffff;
3206 }
2399 3207
2400 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); 3208 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2401 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */ 3209 put_unaligned_be32(20, arr); /* Parameter Data Length */
2402 put_unaligned_be64(lba, &arr[8]); /* LBA */ 3210 put_unaligned_be64(lba, arr + 8); /* LBA */
2403 put_unaligned_be32(num, &arr[16]); /* Number of blocks */ 3211 put_unaligned_be32(num, arr + 16); /* Number of blocks */
2404 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ 3212 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
2405 3213
2406 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); 3214 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
2407} 3215}
2408 3216
2409#define SDEBUG_RLUN_ARR_SZ 256 3217#define SDEBUG_RLUN_ARR_SZ 256
@@ -2412,8 +3220,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2412 struct sdebug_dev_info * devip) 3220 struct sdebug_dev_info * devip)
2413{ 3221{
2414 unsigned int alloc_len; 3222 unsigned int alloc_len;
2415 int lun_cnt, i, upper, num, n; 3223 int lun_cnt, i, upper, num, n, want_wlun, shortish;
2416 u64 wlun, lun; 3224 u64 lun;
2417 unsigned char *cmd = scp->cmnd; 3225 unsigned char *cmd = scp->cmnd;
2418 int select_report = (int)cmd[2]; 3226 int select_report = (int)cmd[2];
2419 struct scsi_lun *one_lun; 3227 struct scsi_lun *one_lun;
@@ -2421,9 +3229,9 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2421 unsigned char * max_addr; 3229 unsigned char * max_addr;
2422 3230
2423 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); 3231 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2424 if ((alloc_len < 4) || (select_report > 2)) { 3232 shortish = (alloc_len < 4);
2425 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 3233 if (shortish || (select_report > 2)) {
2426 0); 3234 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
2427 return check_condition_result; 3235 return check_condition_result;
2428 } 3236 }
2429 /* can produce response with up to 16k luns (lun 0 to lun 16383) */ 3237 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
@@ -2433,14 +3241,14 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2433 lun_cnt = 0; 3241 lun_cnt = 0;
2434 else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) 3242 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2435 --lun_cnt; 3243 --lun_cnt;
2436 wlun = (select_report > 0) ? 1 : 0; 3244 want_wlun = (select_report > 0) ? 1 : 0;
2437 num = lun_cnt + wlun; 3245 num = lun_cnt + want_wlun;
2438 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; 3246 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2439 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; 3247 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2440 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / 3248 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2441 sizeof(struct scsi_lun)), num); 3249 sizeof(struct scsi_lun)), num);
2442 if (n < num) { 3250 if (n < num) {
2443 wlun = 0; 3251 want_wlun = 0;
2444 lun_cnt = n; 3252 lun_cnt = n;
2445 } 3253 }
2446 one_lun = (struct scsi_lun *) &arr[8]; 3254 one_lun = (struct scsi_lun *) &arr[8];
@@ -2454,7 +3262,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2454 (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); 3262 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2455 one_lun[i].scsi_lun[1] = lun & 0xff; 3263 one_lun[i].scsi_lun[1] = lun & 0xff;
2456 } 3264 }
2457 if (wlun) { 3265 if (want_wlun) {
2458 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; 3266 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2459 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; 3267 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2460 i++; 3268 i++;
@@ -2476,8 +3284,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2476 /* better not to use temporary buffer. */ 3284 /* better not to use temporary buffer. */
2477 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); 3285 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2478 if (!buf) { 3286 if (!buf) {
2479 mk_sense_buffer(scp, NOT_READY, 3287 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2480 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 3288 INSUFF_RES_ASCQ);
2481 return check_condition_result; 3289 return check_condition_result;
2482 } 3290 }
2483 3291
@@ -2500,6 +3308,32 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2500 return 0; 3308 return 0;
2501} 3309}
2502 3310
3311static int
3312resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3313{
3314 u8 *cmd = scp->cmnd;
3315 u64 lba;
3316 u32 num;
3317 int errsts;
3318
3319 if (!scsi_bidi_cmnd(scp)) {
3320 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3321 INSUFF_RES_ASCQ);
3322 return check_condition_result;
3323 }
3324 errsts = resp_read_dt0(scp, devip);
3325 if (errsts)
3326 return errsts;
3327 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3328 errsts = resp_write_dt0(scp, devip);
3329 if (errsts)
3330 return errsts;
3331 }
3332 lba = get_unaligned_be32(cmd + 2);
3333 num = get_unaligned_be16(cmd + 7);
3334 return resp_xdwriteread(scp, lba, num, devip);
3335}
3336
2503/* When timer or tasklet goes off this function is called. */ 3337/* When timer or tasklet goes off this function is called. */
2504static void sdebug_q_cmd_complete(unsigned long indx) 3338static void sdebug_q_cmd_complete(unsigned long indx)
2505{ 3339{
@@ -2672,10 +3506,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2672 open_devip->sdbg_host = sdbg_host; 3506 open_devip->sdbg_host = sdbg_host;
2673 atomic_set(&open_devip->num_in_q, 0); 3507 atomic_set(&open_devip->num_in_q, 0);
2674 set_bit(SDEBUG_UA_POR, open_devip->uas_bm); 3508 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2675 open_devip->used = 1; 3509 open_devip->used = true;
2676 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2677 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2678
2679 return open_devip; 3510 return open_devip;
2680} 3511}
2681 3512
@@ -2701,10 +3532,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
2701 if (NULL == devip) 3532 if (NULL == devip)
2702 return 1; /* no resources, will be marked offline */ 3533 return 1; /* no resources, will be marked offline */
2703 sdp->hostdata = devip; 3534 sdp->hostdata = devip;
2704 sdp->tagged_supported = 1;
2705 if (sdp->host->cmd_per_lun)
2706 scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING,
2707 DEF_CMD_PER_LUN);
2708 blk_queue_max_segment_size(sdp->request_queue, -1U); 3535 blk_queue_max_segment_size(sdp->request_queue, -1U);
2709 if (scsi_debug_no_uld) 3536 if (scsi_debug_no_uld)
2710 sdp->no_uld_attach = 1; 3537 sdp->no_uld_attach = 1;
@@ -2721,7 +3548,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2721 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3548 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2722 if (devip) { 3549 if (devip) {
2723 /* make this slot available for re-use */ 3550 /* make this slot available for re-use */
2724 devip->used = 0; 3551 devip->used = false;
2725 sdp->hostdata = NULL; 3552 sdp->hostdata = NULL;
2726 } 3553 }
2727} 3554}
@@ -3166,6 +3993,7 @@ module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3166module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); 3993module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3167module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 3994module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3168module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); 3995module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3996module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3169module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 3997module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3170module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); 3998module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3171module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); 3999module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
@@ -3185,7 +4013,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3185MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 4013MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3186MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); 4014MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3187MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); 4015MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3188MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 4016MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
3189MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 4017MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3190MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 4018MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3191MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 4019MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
@@ -3212,11 +4040,12 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3212MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 4040MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3213MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); 4041MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3214MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 4042MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4043MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
3215MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 4044MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3216MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); 4045MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3217MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); 4046MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3218MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); 4047MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3219MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 4048MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
3220MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 4049MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3221MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); 4050MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3222 4051
@@ -3382,6 +4211,16 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3382 return -EINVAL; 4211 return -EINVAL;
3383opts_done: 4212opts_done:
3384 scsi_debug_opts = opts; 4213 scsi_debug_opts = opts;
4214 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4215 sdebug_any_injecting_opt = true;
4216 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4217 sdebug_any_injecting_opt = true;
4218 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4219 sdebug_any_injecting_opt = true;
4220 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4221 sdebug_any_injecting_opt = true;
4222 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4223 sdebug_any_injecting_opt = true;
3385 atomic_set(&sdebug_cmnd_count, 0); 4224 atomic_set(&sdebug_cmnd_count, 0);
3386 atomic_set(&sdebug_a_tsf, 0); 4225 atomic_set(&sdebug_a_tsf, 0);
3387 return count; 4226 return count;
@@ -3589,12 +4428,25 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3589 size_t count) 4428 size_t count)
3590{ 4429{
3591 int n; 4430 int n;
4431 bool changed;
3592 4432
3593 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4433 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4434 changed = (scsi_debug_virtual_gb != n);
3594 scsi_debug_virtual_gb = n; 4435 scsi_debug_virtual_gb = n;
3595
3596 sdebug_capacity = get_sdebug_capacity(); 4436 sdebug_capacity = get_sdebug_capacity();
3597 4437 if (changed) {
4438 struct sdebug_host_info *sdhp;
4439 struct sdebug_dev_info *dp;
4440
4441 list_for_each_entry(sdhp, &sdebug_host_list,
4442 host_list) {
4443 list_for_each_entry(dp, &sdhp->dev_info_list,
4444 dev_list) {
4445 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4446 dp->uas_bm);
4447 }
4448 }
4449 }
3598 return count; 4450 return count;
3599 } 4451 }
3600 return -EINVAL; 4452 return -EINVAL;
@@ -3740,6 +4592,23 @@ static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3740} 4592}
3741static DRIVER_ATTR_RW(host_lock); 4593static DRIVER_ATTR_RW(host_lock);
3742 4594
4595static ssize_t strict_show(struct device_driver *ddp, char *buf)
4596{
4597 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4598}
4599static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4600 size_t count)
4601{
4602 int n;
4603
4604 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605 scsi_debug_strict = (n > 0);
4606 return count;
4607 }
4608 return -EINVAL;
4609}
4610static DRIVER_ATTR_RW(strict);
4611
3743 4612
3744/* Note: The following array creates attribute files in the 4613/* Note: The following array creates attribute files in the
3745 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 4614 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -3775,6 +4644,7 @@ static struct attribute *sdebug_drv_attrs[] = {
3775 &driver_attr_removable.attr, 4644 &driver_attr_removable.attr,
3776 &driver_attr_host_lock.attr, 4645 &driver_attr_host_lock.attr,
3777 &driver_attr_ndelay.attr, 4646 &driver_attr_ndelay.attr,
4647 &driver_attr_strict.attr,
3778 NULL, 4648 NULL,
3779}; 4649};
3780ATTRIBUTE_GROUPS(sdebug_drv); 4650ATTRIBUTE_GROUPS(sdebug_drv);
@@ -4087,396 +4957,9 @@ static void sdebug_remove_adapter(void)
4087} 4957}
4088 4958
4089static int 4959static int
4090scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) 4960sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4091{
4092 unsigned char *cmd = SCpnt->cmnd;
4093 int len, k;
4094 unsigned int num;
4095 unsigned long long lba;
4096 u32 ei_lba;
4097 int errsts = 0;
4098 int target = SCpnt->device->id;
4099 struct sdebug_dev_info *devip = NULL;
4100 int inj_recovered = 0;
4101 int inj_transport = 0;
4102 int inj_dif = 0;
4103 int inj_dix = 0;
4104 int inj_short = 0;
4105 int delay_override = 0;
4106 int unmap = 0;
4107
4108 scsi_set_resid(SCpnt, 0);
4109 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4110 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4111 char b[120];
4112 int n;
4113
4114 len = SCpnt->cmd_len;
4115 if (len > 32)
4116 strcpy(b, "too long, over 32 bytes");
4117 else {
4118 for (k = 0, n = 0; k < len; ++k)
4119 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4120 (unsigned int)cmd[k]);
4121 }
4122 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4123 b);
4124 }
4125
4126 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4127 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4128 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4129 devip = devInfoReg(SCpnt->device);
4130 if (NULL == devip)
4131 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4132
4133 if ((scsi_debug_every_nth != 0) &&
4134 (atomic_inc_return(&sdebug_cmnd_count) >=
4135 abs(scsi_debug_every_nth))) {
4136 atomic_set(&sdebug_cmnd_count, 0);
4137 if (scsi_debug_every_nth < -1)
4138 scsi_debug_every_nth = -1;
4139 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4140 return 0; /* ignore command causing timeout */
4141 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4142 scsi_medium_access_command(SCpnt))
4143 return 0; /* time out reads and writes */
4144 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4145 inj_recovered = 1; /* to reads and writes below */
4146 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4147 inj_transport = 1; /* to reads and writes below */
4148 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4149 inj_dif = 1; /* to reads and writes below */
4150 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4151 inj_dix = 1; /* to reads and writes below */
4152 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4153 inj_short = 1;
4154 }
4155
4156 if (devip->wlun) {
4157 switch (*cmd) {
4158 case INQUIRY:
4159 case REQUEST_SENSE:
4160 case TEST_UNIT_READY:
4161 case REPORT_LUNS:
4162 break; /* only allowable wlun commands */
4163 default:
4164 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4165 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4166 "not supported for wlun\n", *cmd);
4167 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4168 INVALID_OPCODE, 0);
4169 errsts = check_condition_result;
4170 return schedule_resp(SCpnt, devip, errsts, 0);
4171 }
4172 }
4173
4174 switch (*cmd) {
4175 case INQUIRY: /* mandatory, ignore unit attention */
4176 delay_override = 1;
4177 errsts = resp_inquiry(SCpnt, target, devip);
4178 break;
4179 case REQUEST_SENSE: /* mandatory, ignore unit attention */
4180 delay_override = 1;
4181 errsts = resp_requests(SCpnt, devip);
4182 break;
4183 case REZERO_UNIT: /* actually this is REWIND for SSC */
4184 case START_STOP:
4185 errsts = resp_start_stop(SCpnt, devip);
4186 break;
4187 case ALLOW_MEDIUM_REMOVAL:
4188 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4189 if (errsts)
4190 break;
4191 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4192 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4193 cmd[4] ? "inhibited" : "enabled");
4194 break;
4195 case SEND_DIAGNOSTIC: /* mandatory */
4196 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4197 break;
4198 case TEST_UNIT_READY: /* mandatory */
4199 /* delay_override = 1; */
4200 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4201 break;
4202 case RESERVE:
4203 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4204 break;
4205 case RESERVE_10:
4206 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4207 break;
4208 case RELEASE:
4209 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4210 break;
4211 case RELEASE_10:
4212 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4213 break;
4214 case READ_CAPACITY:
4215 errsts = resp_readcap(SCpnt, devip);
4216 break;
4217 case SERVICE_ACTION_IN:
4218 if (cmd[1] == SAI_READ_CAPACITY_16)
4219 errsts = resp_readcap16(SCpnt, devip);
4220 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4221
4222 if (scsi_debug_lbp() == 0) {
4223 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4224 INVALID_COMMAND_OPCODE, 0);
4225 errsts = check_condition_result;
4226 } else
4227 errsts = resp_get_lba_status(SCpnt, devip);
4228 } else {
4229 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4230 INVALID_OPCODE, 0);
4231 errsts = check_condition_result;
4232 }
4233 break;
4234 case MAINTENANCE_IN:
4235 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4236 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4237 INVALID_OPCODE, 0);
4238 errsts = check_condition_result;
4239 break;
4240 }
4241 errsts = resp_report_tgtpgs(SCpnt, devip);
4242 break;
4243 case READ_16:
4244 case READ_12:
4245 case READ_10:
4246 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4247 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4248 cmd[1] & 0xe0) {
4249 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4250 INVALID_COMMAND_OPCODE, 0);
4251 errsts = check_condition_result;
4252 break;
4253 }
4254
4255 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4256 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4257 (cmd[1] & 0xe0) == 0)
4258 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4259
4260 /* fall through */
4261 case READ_6:
4262read:
4263 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4264 if (errsts)
4265 break;
4266 if (scsi_debug_fake_rw)
4267 break;
4268 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4269
4270 if (inj_short)
4271 num /= 2;
4272
4273 errsts = resp_read(SCpnt, lba, num, ei_lba);
4274 if (inj_recovered && (0 == errsts)) {
4275 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4276 THRESHOLD_EXCEEDED, 0);
4277 errsts = check_condition_result;
4278 } else if (inj_transport && (0 == errsts)) {
4279 mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4280 TRANSPORT_PROBLEM, ACK_NAK_TO);
4281 errsts = check_condition_result;
4282 } else if (inj_dif && (0 == errsts)) {
4283 /* Logical block guard check failed */
4284 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4285 errsts = illegal_condition_result;
4286 } else if (inj_dix && (0 == errsts)) {
4287 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4288 errsts = illegal_condition_result;
4289 }
4290 break;
4291 case REPORT_LUNS: /* mandatory, ignore unit attention */
4292 delay_override = 1;
4293 errsts = resp_report_luns(SCpnt, devip);
4294 break;
4295 case VERIFY: /* 10 byte SBC-2 command */
4296 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4297 break;
4298 case WRITE_16:
4299 case WRITE_12:
4300 case WRITE_10:
4301 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4302 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4303 cmd[1] & 0xe0) {
4304 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4305 INVALID_COMMAND_OPCODE, 0);
4306 errsts = check_condition_result;
4307 break;
4308 }
4309
4310 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4311 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4312 (cmd[1] & 0xe0) == 0)
4313 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4314
4315 /* fall through */
4316 case WRITE_6:
4317write:
4318 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4319 if (errsts)
4320 break;
4321 if (scsi_debug_fake_rw)
4322 break;
4323 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4324 errsts = resp_write(SCpnt, lba, num, ei_lba);
4325 if (inj_recovered && (0 == errsts)) {
4326 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4327 THRESHOLD_EXCEEDED, 0);
4328 errsts = check_condition_result;
4329 } else if (inj_dif && (0 == errsts)) {
4330 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4331 errsts = illegal_condition_result;
4332 } else if (inj_dix && (0 == errsts)) {
4333 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4334 errsts = illegal_condition_result;
4335 }
4336 break;
4337 case WRITE_SAME_16:
4338 case WRITE_SAME:
4339 if (cmd[1] & 0x8) {
4340 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4341 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4342 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4343 INVALID_FIELD_IN_CDB, 0);
4344 errsts = check_condition_result;
4345 } else
4346 unmap = 1;
4347 }
4348 if (errsts)
4349 break;
4350 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4351 if (errsts)
4352 break;
4353 if (scsi_debug_fake_rw)
4354 break;
4355 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4356 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4357 break;
4358 case UNMAP:
4359 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4360 if (errsts)
4361 break;
4362 if (scsi_debug_fake_rw)
4363 break;
4364
4365 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4366 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4367 INVALID_COMMAND_OPCODE, 0);
4368 errsts = check_condition_result;
4369 } else
4370 errsts = resp_unmap(SCpnt, devip);
4371 break;
4372 case MODE_SENSE:
4373 case MODE_SENSE_10:
4374 errsts = resp_mode_sense(SCpnt, target, devip);
4375 break;
4376 case MODE_SELECT:
4377 errsts = resp_mode_select(SCpnt, 1, devip);
4378 break;
4379 case MODE_SELECT_10:
4380 errsts = resp_mode_select(SCpnt, 0, devip);
4381 break;
4382 case LOG_SENSE:
4383 errsts = resp_log_sense(SCpnt, devip);
4384 break;
4385 case SYNCHRONIZE_CACHE:
4386 delay_override = 1;
4387 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4388 break;
4389 case WRITE_BUFFER:
4390 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4391 break;
4392 case XDWRITEREAD_10:
4393 if (!scsi_bidi_cmnd(SCpnt)) {
4394 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4395 INVALID_FIELD_IN_CDB, 0);
4396 errsts = check_condition_result;
4397 break;
4398 }
4399
4400 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4401 if (errsts)
4402 break;
4403 if (scsi_debug_fake_rw)
4404 break;
4405 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4406 errsts = resp_read(SCpnt, lba, num, ei_lba);
4407 if (errsts)
4408 break;
4409 errsts = resp_write(SCpnt, lba, num, ei_lba);
4410 if (errsts)
4411 break;
4412 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4413 break;
4414 case VARIABLE_LENGTH_CMD:
4415 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4416
4417 if ((cmd[10] & 0xe0) == 0)
4418 printk(KERN_ERR
4419 "Unprotected RD/WR to DIF device\n");
4420
4421 if (cmd[9] == READ_32) {
4422 BUG_ON(SCpnt->cmd_len < 32);
4423 goto read;
4424 }
4425
4426 if (cmd[9] == WRITE_32) {
4427 BUG_ON(SCpnt->cmd_len < 32);
4428 goto write;
4429 }
4430 }
4431
4432 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4433 INVALID_FIELD_IN_CDB, 0);
4434 errsts = check_condition_result;
4435 break;
4436 case 0x85:
4437 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4438 sdev_printk(KERN_INFO, SCpnt->device,
4439 "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4440 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4441 INVALID_OPCODE, 0);
4442 errsts = check_condition_result;
4443 break;
4444 default:
4445 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4446 sdev_printk(KERN_INFO, SCpnt->device,
4447 "%s: Opcode: 0x%x not supported\n",
4448 my_name, *cmd);
4449 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4450 if (errsts)
4451 break; /* Unit attention takes precedence */
4452 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4453 errsts = check_condition_result;
4454 break;
4455 }
4456 return schedule_resp(SCpnt, devip, errsts,
4457 (delay_override ? 0 : scsi_debug_delay));
4458}
4459
4460static int
4461sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4462{
4463 if (scsi_debug_host_lock) {
4464 unsigned long iflags;
4465 int rc;
4466
4467 spin_lock_irqsave(shost->host_lock, iflags);
4468 rc = scsi_debug_queuecommand(cmd);
4469 spin_unlock_irqrestore(shost->host_lock, iflags);
4470 return rc;
4471 } else
4472 return scsi_debug_queuecommand(cmd);
4473}
4474
4475static int
4476sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4477{ 4961{
4478 int num_in_q = 0; 4962 int num_in_q = 0;
4479 int bad = 0;
4480 unsigned long iflags; 4963 unsigned long iflags;
4481 struct sdebug_dev_info *devip; 4964 struct sdebug_dev_info *devip;
4482 4965
@@ -4488,43 +4971,18 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4488 } 4971 }
4489 num_in_q = atomic_read(&devip->num_in_q); 4972 num_in_q = atomic_read(&devip->num_in_q);
4490 spin_unlock_irqrestore(&queued_arr_lock, iflags); 4973 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4491 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { 4974
4492 if (qdepth < 1) 4975 if (qdepth < 1)
4493 qdepth = 1; 4976 qdepth = 1;
4494 /* allow to exceed max host queued_arr elements for testing */ 4977 /* allow to exceed max host queued_arr elements for testing */
4495 if (qdepth > SCSI_DEBUG_CANQUEUE + 10) 4978 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4496 qdepth = SCSI_DEBUG_CANQUEUE + 10; 4979 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4497 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 4980 scsi_change_queue_depth(sdev, qdepth);
4498 } else if (reason == SCSI_QDEPTH_QFULL) 4981
4499 scsi_track_queue_full(sdev, qdepth);
4500 else
4501 bad = 1;
4502 if (bad)
4503 sdev_printk(KERN_WARNING, sdev,
4504 "%s: unknown reason=0x%x\n", __func__, reason);
4505 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { 4982 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4506 if (SCSI_QDEPTH_QFULL == reason) 4983 sdev_printk(KERN_INFO, sdev,
4507 sdev_printk(KERN_INFO, sdev, 4984 "%s: qdepth=%d, num_in_q=%d\n",
4508 "%s: -> %d, num_in_q=%d, reason: queue full\n", 4985 __func__, qdepth, num_in_q);
4509 __func__, qdepth, num_in_q);
4510 else {
4511 const char *cp;
4512
4513 switch (reason) {
4514 case SCSI_QDEPTH_DEFAULT:
4515 cp = "default (sysfs ?)";
4516 break;
4517 case SCSI_QDEPTH_RAMP_UP:
4518 cp = "ramp up";
4519 break;
4520 default:
4521 cp = "unknown";
4522 break;
4523 }
4524 sdev_printk(KERN_INFO, sdev,
4525 "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
4526 __func__, qdepth, num_in_q, cp);
4527 }
4528 } 4986 }
4529 return sdev->queue_depth; 4987 return sdev->queue_depth;
4530} 4988}
@@ -4532,14 +4990,7 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4532static int 4990static int
4533sdebug_change_qtype(struct scsi_device *sdev, int qtype) 4991sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4534{ 4992{
4535 if (sdev->tagged_supported) { 4993 qtype = scsi_change_queue_type(sdev, qtype);
4536 scsi_set_tag_type(sdev, qtype);
4537 if (qtype)
4538 scsi_activate_tcq(sdev, sdev->queue_depth);
4539 else
4540 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4541 } else
4542 qtype = 0;
4543 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { 4994 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4544 const char *cp; 4995 const char *cp;
4545 4996
@@ -4562,6 +5013,193 @@ sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4562 return qtype; 5013 return qtype;
4563} 5014}
4564 5015
5016static int
5017check_inject(struct scsi_cmnd *scp)
5018{
5019 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5020
5021 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5022
5023 if (atomic_inc_return(&sdebug_cmnd_count) >=
5024 abs(scsi_debug_every_nth)) {
5025 atomic_set(&sdebug_cmnd_count, 0);
5026 if (scsi_debug_every_nth < -1)
5027 scsi_debug_every_nth = -1;
5028 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5029 return 1; /* ignore command causing timeout */
5030 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5031 scsi_medium_access_command(scp))
5032 return 1; /* time out reads and writes */
5033 if (sdebug_any_injecting_opt) {
5034 int opts = scsi_debug_opts;
5035
5036 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5037 ep->inj_recovered = true;
5038 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5039 ep->inj_transport = true;
5040 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5041 ep->inj_dif = true;
5042 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5043 ep->inj_dix = true;
5044 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5045 ep->inj_short = true;
5046 }
5047 }
5048 return 0;
5049}
5050
5051static int
5052scsi_debug_queuecommand(struct scsi_cmnd *scp)
5053{
5054 u8 sdeb_i;
5055 struct scsi_device *sdp = scp->device;
5056 const struct opcode_info_t *oip;
5057 const struct opcode_info_t *r_oip;
5058 struct sdebug_dev_info *devip;
5059 u8 *cmd = scp->cmnd;
5060 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5061 int k, na;
5062 int errsts = 0;
5063 int errsts_no_connect = DID_NO_CONNECT << 16;
5064 u32 flags;
5065 u16 sa;
5066 u8 opcode = cmd[0];
5067 bool has_wlun_rl;
5068 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5069
5070 scsi_set_resid(scp, 0);
5071 if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5072 char b[120];
5073 int n, len, sb;
5074
5075 len = scp->cmd_len;
5076 sb = (int)sizeof(b);
5077 if (len > 32)
5078 strcpy(b, "too long, over 32 bytes");
5079 else {
5080 for (k = 0, n = 0; k < len && n < sb; ++k)
5081 n += scnprintf(b + n, sb - n, "%02x ",
5082 (u32)cmd[k]);
5083 }
5084 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5085 }
5086 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5087 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5088 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5089
5090 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5091 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5092 devip = (struct sdebug_dev_info *)sdp->hostdata;
5093 if (!devip) {
5094 devip = devInfoReg(sdp);
5095 if (NULL == devip)
5096 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5097 }
5098 na = oip->num_attached;
5099 r_pfp = oip->pfp;
5100 if (na) { /* multiple commands with this opcode */
5101 r_oip = oip;
5102 if (FF_SA & r_oip->flags) {
5103 if (F_SA_LOW & oip->flags)
5104 sa = 0x1f & cmd[1];
5105 else
5106 sa = get_unaligned_be16(cmd + 8);
5107 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5108 if (opcode == oip->opcode && sa == oip->sa)
5109 break;
5110 }
5111 } else { /* since no service action only check opcode */
5112 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5113 if (opcode == oip->opcode)
5114 break;
5115 }
5116 }
5117 if (k > na) {
5118 if (F_SA_LOW & r_oip->flags)
5119 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5120 else if (F_SA_HIGH & r_oip->flags)
5121 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5122 else
5123 mk_sense_invalid_opcode(scp);
5124 goto check_cond;
5125 }
5126 } /* else (when na==0) we assume the oip is a match */
5127 flags = oip->flags;
5128 if (F_INV_OP & flags) {
5129 mk_sense_invalid_opcode(scp);
5130 goto check_cond;
5131 }
5132 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5133 if (debug)
5134 sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5135 "0x%x not supported for wlun\n", opcode);
5136 mk_sense_invalid_opcode(scp);
5137 goto check_cond;
5138 }
5139 if (scsi_debug_strict) { /* check cdb against mask */
5140 u8 rem;
5141 int j;
5142
5143 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5144 rem = ~oip->len_mask[k] & cmd[k];
5145 if (rem) {
5146 for (j = 7; j >= 0; --j, rem <<= 1) {
5147 if (0x80 & rem)
5148 break;
5149 }
5150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5151 goto check_cond;
5152 }
5153 }
5154 }
5155 if (!(F_SKIP_UA & flags) &&
5156 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5157 errsts = check_readiness(scp, UAS_ONLY, devip);
5158 if (errsts)
5159 goto check_cond;
5160 }
5161 if ((F_M_ACCESS & flags) && devip->stopped) {
5162 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5163 if (debug)
5164 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5165 "%s\n", my_name, "initializing command "
5166 "required");
5167 errsts = check_condition_result;
5168 goto fini;
5169 }
5170 if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5171 goto fini;
5172 if (scsi_debug_every_nth) {
5173 if (check_inject(scp))
5174 return 0; /* ignore command: make trouble */
5175 }
5176 if (oip->pfp) /* if this command has a resp_* function, call it */
5177 errsts = oip->pfp(scp, devip);
5178 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5179 errsts = r_pfp(scp, devip);
5180
5181fini:
5182 return schedule_resp(scp, devip, errsts,
5183 ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5184check_cond:
5185 return schedule_resp(scp, devip, check_condition_result, 0);
5186}
5187
5188static int
5189sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5190{
5191 if (scsi_debug_host_lock) {
5192 unsigned long iflags;
5193 int rc;
5194
5195 spin_lock_irqsave(shost->host_lock, iflags);
5196 rc = scsi_debug_queuecommand(cmd);
5197 spin_unlock_irqrestore(shost->host_lock, iflags);
5198 return rc;
5199 } else
5200 return scsi_debug_queuecommand(cmd);
5201}
5202
4565static struct scsi_host_template sdebug_driver_template = { 5203static struct scsi_host_template sdebug_driver_template = {
4566 .show_info = scsi_debug_show_info, 5204 .show_info = scsi_debug_show_info,
4567 .write_info = scsi_debug_write_info, 5205 .write_info = scsi_debug_write_info,
@@ -4587,13 +5225,16 @@ static struct scsi_host_template sdebug_driver_template = {
4587 .max_sectors = -1U, 5225 .max_sectors = -1U,
4588 .use_clustering = DISABLE_CLUSTERING, 5226 .use_clustering = DISABLE_CLUSTERING,
4589 .module = THIS_MODULE, 5227 .module = THIS_MODULE,
5228 .track_queue_depth = 1,
5229 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
4590}; 5230};
4591 5231
4592static int sdebug_driver_probe(struct device * dev) 5232static int sdebug_driver_probe(struct device * dev)
4593{ 5233{
4594 int error = 0; 5234 int error = 0;
4595 struct sdebug_host_info *sdbg_host; 5235 int opts;
4596 struct Scsi_Host *hpnt; 5236 struct sdebug_host_info *sdbg_host;
5237 struct Scsi_Host *hpnt;
4597 int host_prot; 5238 int host_prot;
4598 5239
4599 sdbg_host = to_sdebug_host(dev); 5240 sdbg_host = to_sdebug_host(dev);
@@ -4603,7 +5244,7 @@ static int sdebug_driver_probe(struct device * dev)
4603 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5244 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4604 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5245 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4605 if (NULL == hpnt) { 5246 if (NULL == hpnt) {
4606 printk(KERN_ERR "%s: scsi_register failed\n", __func__); 5247 pr_err("%s: scsi_host_alloc failed\n", __func__);
4607 error = -ENODEV; 5248 error = -ENODEV;
4608 return error; 5249 return error;
4609 } 5250 }
@@ -4660,6 +5301,18 @@ static int sdebug_driver_probe(struct device * dev)
4660 else 5301 else
4661 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); 5302 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4662 5303
5304 opts = scsi_debug_opts;
5305 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5306 sdebug_any_injecting_opt = true;
5307 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5308 sdebug_any_injecting_opt = true;
5309 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5310 sdebug_any_injecting_opt = true;
5311 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5312 sdebug_any_injecting_opt = true;
5313 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5314 sdebug_any_injecting_opt = true;
5315
4663 error = scsi_add_host(hpnt, &sdbg_host->dev); 5316 error = scsi_add_host(hpnt, &sdbg_host->dev);
4664 if (error) { 5317 if (error) {
4665 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 5318 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 49014a143c6a..c1d04d4d3c6c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -202,6 +202,7 @@ static struct {
202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, 202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, 203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
204 {"INSITE", "I325VM", NULL, BLIST_KEY}, 204 {"INSITE", "I325VM", NULL, BLIST_KEY},
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
205 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
206 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
207 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index bc5ff6ff9c79..e42fff6e8c10 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -36,6 +36,7 @@
36#include <scsi/scsi_transport.h> 36#include <scsi/scsi_transport.h>
37#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
38#include <scsi/scsi_ioctl.h> 38#include <scsi/scsi_ioctl.h>
39#include <scsi/sg.h>
39 40
40#include "scsi_priv.h" 41#include "scsi_priv.h"
41#include "scsi_logging.h" 42#include "scsi_logging.h"
@@ -157,8 +158,9 @@ scmd_eh_abort_handler(struct work_struct *work)
157 } else { 158 } else {
158 SCSI_LOG_ERROR_RECOVERY(3, 159 SCSI_LOG_ERROR_RECOVERY(3,
159 scmd_printk(KERN_INFO, scmd, 160 scmd_printk(KERN_INFO, scmd,
160 "scmd %p abort failed, rtn %d\n", 161 "scmd %p abort %s\n", scmd,
161 scmd, rtn)); 162 (rtn == FAST_IO_FAIL) ?
163 "not send" : "failed"));
162 } 164 }
163 } 165 }
164 166
@@ -355,7 +357,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
355 357
356 if (cmd_cancel || cmd_failed) { 358 if (cmd_cancel || cmd_failed) {
357 SCSI_LOG_ERROR_RECOVERY(3, 359 SCSI_LOG_ERROR_RECOVERY(3,
358 sdev_printk(KERN_INFO, sdev, 360 shost_printk(KERN_INFO, shost,
359 "%s: cmds failed: %d, cancel: %d\n", 361 "%s: cmds failed: %d, cancel: %d\n",
360 __func__, cmd_failed, 362 __func__, cmd_failed,
361 cmd_cancel)); 363 cmd_cancel));
@@ -608,7 +610,7 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
608 struct scsi_host_template *sht = sdev->host->hostt; 610 struct scsi_host_template *sht = sdev->host->hostt;
609 struct scsi_device *tmp_sdev; 611 struct scsi_device *tmp_sdev;
610 612
611 if (!sht->change_queue_depth || 613 if (!sht->track_queue_depth ||
612 sdev->queue_depth >= sdev->max_queue_depth) 614 sdev->queue_depth >= sdev->max_queue_depth)
613 return; 615 return;
614 616
@@ -629,12 +631,8 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
629 tmp_sdev->id != sdev->id || 631 tmp_sdev->id != sdev->id ||
630 tmp_sdev->queue_depth == sdev->max_queue_depth) 632 tmp_sdev->queue_depth == sdev->max_queue_depth)
631 continue; 633 continue;
632 /* 634
633 * call back into LLD to increase queue_depth by one 635 scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
634 * with ramp up reason code.
635 */
636 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1,
637 SCSI_QDEPTH_RAMP_UP);
638 sdev->last_queue_ramp_up = jiffies; 636 sdev->last_queue_ramp_up = jiffies;
639 } 637 }
640} 638}
@@ -644,7 +642,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
644 struct scsi_host_template *sht = sdev->host->hostt; 642 struct scsi_host_template *sht = sdev->host->hostt;
645 struct scsi_device *tmp_sdev; 643 struct scsi_device *tmp_sdev;
646 644
647 if (!sht->change_queue_depth) 645 if (!sht->track_queue_depth)
648 return; 646 return;
649 647
650 shost_for_each_device(tmp_sdev, sdev->host) { 648 shost_for_each_device(tmp_sdev, sdev->host) {
@@ -656,8 +654,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
656 * the device when we got the queue full so we start 654 * the device when we got the queue full so we start
657 * from the highest possible value and work our way down. 655 * from the highest possible value and work our way down.
658 */ 656 */
659 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, 657 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
660 SCSI_QDEPTH_QFULL);
661 } 658 }
662} 659}
663 660
@@ -869,7 +866,24 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
869 return rtn; 866 return rtn;
870} 867}
871 868
872static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd) 869/**
870 * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
871 * @scmd: SCSI cmd used to send a target reset
872 *
873 * Return value:
874 * SUCCESS, FAILED, or FAST_IO_FAIL
875 *
876 * Notes:
877 * SUCCESS does not necessarily indicate that the command
878 * has been aborted; it only indicates that the LLDDs
879 * has cleared all references to that command.
880 * LLDDs should return FAILED only if an abort was required
881 * but could not be executed. LLDDs should return FAST_IO_FAIL
882 * if the device is temporarily unavailable (eg due to a
883 * link down on FibreChannel)
884 */
885static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
886 struct scsi_cmnd *scmd)
873{ 887{
874 if (!hostt->eh_abort_handler) 888 if (!hostt->eh_abort_handler)
875 return FAILED; 889 return FAILED;
@@ -1156,9 +1170,9 @@ int scsi_eh_get_sense(struct list_head *work_q,
1156 shost = scmd->device->host; 1170 shost = scmd->device->host;
1157 if (scsi_host_eh_past_deadline(shost)) { 1171 if (scsi_host_eh_past_deadline(shost)) {
1158 SCSI_LOG_ERROR_RECOVERY(3, 1172 SCSI_LOG_ERROR_RECOVERY(3,
1159 shost_printk(KERN_INFO, shost, 1173 scmd_printk(KERN_INFO, scmd,
1160 "skip %s, past eh deadline\n", 1174 "%s: skip request sense, past eh deadline\n",
1161 __func__)); 1175 current->comm));
1162 break; 1176 break;
1163 } 1177 }
1164 if (status_byte(scmd->result) != CHECK_CONDITION) 1178 if (status_byte(scmd->result) != CHECK_CONDITION)
@@ -1180,7 +1194,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
1180 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, 1194 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1181 "sense requested for %p result %x\n", 1195 "sense requested for %p result %x\n",
1182 scmd, scmd->result)); 1196 scmd, scmd->result));
1183 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); 1197 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1184 1198
1185 rtn = scsi_decide_disposition(scmd); 1199 rtn = scsi_decide_disposition(scmd);
1186 1200
@@ -1265,9 +1279,9 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
1265 /* Push items back onto work_q */ 1279 /* Push items back onto work_q */
1266 list_splice_init(cmd_list, work_q); 1280 list_splice_init(cmd_list, work_q);
1267 SCSI_LOG_ERROR_RECOVERY(3, 1281 SCSI_LOG_ERROR_RECOVERY(3,
1268 shost_printk(KERN_INFO, sdev->host, 1282 sdev_printk(KERN_INFO, sdev,
1269 "skip %s, past eh deadline", 1283 "%s: skip test device, past eh deadline",
1270 __func__)); 1284 current->comm));
1271 break; 1285 break;
1272 } 1286 }
1273 } 1287 }
@@ -1318,21 +1332,20 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
1318 if (scsi_host_eh_past_deadline(shost)) { 1332 if (scsi_host_eh_past_deadline(shost)) {
1319 list_splice_init(&check_list, work_q); 1333 list_splice_init(&check_list, work_q);
1320 SCSI_LOG_ERROR_RECOVERY(3, 1334 SCSI_LOG_ERROR_RECOVERY(3,
1321 shost_printk(KERN_INFO, shost, 1335 scmd_printk(KERN_INFO, scmd,
1322 "skip %s, past eh deadline\n", 1336 "%s: skip aborting cmd, past eh deadline\n",
1323 __func__)); 1337 current->comm));
1324 return list_empty(work_q); 1338 return list_empty(work_q);
1325 } 1339 }
1326 SCSI_LOG_ERROR_RECOVERY(3, 1340 SCSI_LOG_ERROR_RECOVERY(3,
1327 shost_printk(KERN_INFO, shost, 1341 scmd_printk(KERN_INFO, scmd,
1328 "%s: aborting cmd: 0x%p\n", 1342 "%s: aborting cmd\n", current->comm));
1329 current->comm, scmd));
1330 rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); 1343 rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
1331 if (rtn == FAILED) { 1344 if (rtn == FAILED) {
1332 SCSI_LOG_ERROR_RECOVERY(3, 1345 SCSI_LOG_ERROR_RECOVERY(3,
1333 shost_printk(KERN_INFO, shost, 1346 scmd_printk(KERN_INFO, scmd,
1334 "%s: aborting cmd failed: 0x%p\n", 1347 "%s: aborting cmd failed\n",
1335 current->comm, scmd)); 1348 current->comm));
1336 list_splice_init(&check_list, work_q); 1349 list_splice_init(&check_list, work_q);
1337 return list_empty(work_q); 1350 return list_empty(work_q);
1338 } 1351 }
@@ -1390,9 +1403,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
1390 shost_for_each_device(sdev, shost) { 1403 shost_for_each_device(sdev, shost) {
1391 if (scsi_host_eh_past_deadline(shost)) { 1404 if (scsi_host_eh_past_deadline(shost)) {
1392 SCSI_LOG_ERROR_RECOVERY(3, 1405 SCSI_LOG_ERROR_RECOVERY(3,
1393 shost_printk(KERN_INFO, shost, 1406 sdev_printk(KERN_INFO, sdev,
1394 "skip %s, past eh deadline\n", 1407 "%s: skip START_UNIT, past eh deadline\n",
1395 __func__)); 1408 current->comm));
1396 break; 1409 break;
1397 } 1410 }
1398 stu_scmd = NULL; 1411 stu_scmd = NULL;
@@ -1407,9 +1420,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
1407 continue; 1420 continue;
1408 1421
1409 SCSI_LOG_ERROR_RECOVERY(3, 1422 SCSI_LOG_ERROR_RECOVERY(3,
1410 shost_printk(KERN_INFO, shost, 1423 sdev_printk(KERN_INFO, sdev,
1411 "%s: Sending START_UNIT to sdev: 0x%p\n", 1424 "%s: Sending START_UNIT\n",
1412 current->comm, sdev)); 1425 current->comm));
1413 1426
1414 if (!scsi_eh_try_stu(stu_scmd)) { 1427 if (!scsi_eh_try_stu(stu_scmd)) {
1415 if (!scsi_device_online(sdev) || 1428 if (!scsi_device_online(sdev) ||
@@ -1423,9 +1436,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
1423 } 1436 }
1424 } else { 1437 } else {
1425 SCSI_LOG_ERROR_RECOVERY(3, 1438 SCSI_LOG_ERROR_RECOVERY(3,
1426 shost_printk(KERN_INFO, shost, 1439 sdev_printk(KERN_INFO, sdev,
1427 "%s: START_UNIT failed to sdev:" 1440 "%s: START_UNIT failed\n",
1428 " 0x%p\n", current->comm, sdev)); 1441 current->comm));
1429 } 1442 }
1430 } 1443 }
1431 1444
@@ -1456,9 +1469,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1456 shost_for_each_device(sdev, shost) { 1469 shost_for_each_device(sdev, shost) {
1457 if (scsi_host_eh_past_deadline(shost)) { 1470 if (scsi_host_eh_past_deadline(shost)) {
1458 SCSI_LOG_ERROR_RECOVERY(3, 1471 SCSI_LOG_ERROR_RECOVERY(3,
1459 shost_printk(KERN_INFO, shost, 1472 sdev_printk(KERN_INFO, sdev,
1460 "skip %s, past eh deadline\n", 1473 "%s: skip BDR, past eh deadline\n",
1461 __func__)); 1474 current->comm));
1462 break; 1475 break;
1463 } 1476 }
1464 bdr_scmd = NULL; 1477 bdr_scmd = NULL;
@@ -1472,9 +1485,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1472 continue; 1485 continue;
1473 1486
1474 SCSI_LOG_ERROR_RECOVERY(3, 1487 SCSI_LOG_ERROR_RECOVERY(3,
1475 shost_printk(KERN_INFO, shost, 1488 sdev_printk(KERN_INFO, sdev,
1476 "%s: Sending BDR sdev: 0x%p\n", 1489 "%s: Sending BDR\n", current->comm));
1477 current->comm, sdev));
1478 rtn = scsi_try_bus_device_reset(bdr_scmd); 1490 rtn = scsi_try_bus_device_reset(bdr_scmd);
1479 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1491 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1480 if (!scsi_device_online(sdev) || 1492 if (!scsi_device_online(sdev) ||
@@ -1490,9 +1502,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1490 } 1502 }
1491 } else { 1503 } else {
1492 SCSI_LOG_ERROR_RECOVERY(3, 1504 SCSI_LOG_ERROR_RECOVERY(3,
1493 shost_printk(KERN_INFO, shost, 1505 sdev_printk(KERN_INFO, sdev,
1494 "%s: BDR failed sdev: 0x%p\n", 1506 "%s: BDR failed\n", current->comm));
1495 current->comm, sdev));
1496 } 1507 }
1497 } 1508 }
1498 1509
@@ -1528,8 +1539,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1528 list_splice_init(&tmp_list, work_q); 1539 list_splice_init(&tmp_list, work_q);
1529 SCSI_LOG_ERROR_RECOVERY(3, 1540 SCSI_LOG_ERROR_RECOVERY(3,
1530 shost_printk(KERN_INFO, shost, 1541 shost_printk(KERN_INFO, shost,
1531 "skip %s, past eh deadline\n", 1542 "%s: Skip target reset, past eh deadline\n",
1532 __func__)); 1543 current->comm));
1533 return list_empty(work_q); 1544 return list_empty(work_q);
1534 } 1545 }
1535 1546
@@ -1591,8 +1602,8 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1591 list_splice_init(&check_list, work_q); 1602 list_splice_init(&check_list, work_q);
1592 SCSI_LOG_ERROR_RECOVERY(3, 1603 SCSI_LOG_ERROR_RECOVERY(3,
1593 shost_printk(KERN_INFO, shost, 1604 shost_printk(KERN_INFO, shost,
1594 "skip %s, past eh deadline\n", 1605 "%s: skip BRST, past eh deadline\n",
1595 __func__)); 1606 current->comm));
1596 return list_empty(work_q); 1607 return list_empty(work_q);
1597 } 1608 }
1598 1609
@@ -2193,9 +2204,9 @@ int scsi_error_handler(void *data)
2193 */ 2204 */
2194 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) { 2205 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2195 SCSI_LOG_ERROR_RECOVERY(1, 2206 SCSI_LOG_ERROR_RECOVERY(1,
2196 printk(KERN_ERR "Error handler scsi_eh_%d " 2207 shost_printk(KERN_ERR, shost,
2197 "unable to autoresume\n", 2208 "scsi_eh_%d: unable to autoresume\n",
2198 shost->host_no)); 2209 shost->host_no));
2199 continue; 2210 continue;
2200 } 2211 }
2201 2212
@@ -2296,42 +2307,34 @@ scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
2296{ 2307{
2297} 2308}
2298 2309
2299/* 2310/**
2300 * Function: scsi_reset_provider 2311 * scsi_ioctl_reset: explicitly reset a host/bus/target/device
2301 * 2312 * @dev: scsi_device to operate on
2302 * Purpose: Send requested reset to a bus or device at any phase. 2313 * @arg: reset type (see sg.h)
2303 *
2304 * Arguments: device - device to send reset to
2305 * flag - reset type (see scsi.h)
2306 *
2307 * Returns: SUCCESS/FAILURE.
2308 *
2309 * Notes: This is used by the SCSI Generic driver to provide
2310 * Bus/Device reset capability.
2311 */ 2314 */
2312int 2315int
2313scsi_reset_provider(struct scsi_device *dev, int flag) 2316scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2314{ 2317{
2315 struct scsi_cmnd *scmd; 2318 struct scsi_cmnd *scmd;
2316 struct Scsi_Host *shost = dev->host; 2319 struct Scsi_Host *shost = dev->host;
2317 struct request req; 2320 struct request req;
2318 unsigned long flags; 2321 unsigned long flags;
2319 int rtn; 2322 int error = 0, rtn, val;
2320 2323
2321 if (scsi_autopm_get_host(shost) < 0) 2324 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2322 return FAILED; 2325 return -EACCES;
2323 2326
2324 if (!get_device(&dev->sdev_gendev)) { 2327 error = get_user(val, arg);
2325 rtn = FAILED; 2328 if (error)
2326 goto out_put_autopm_host; 2329 return error;
2327 }
2328 2330
2331 if (scsi_autopm_get_host(shost) < 0)
2332 return -EIO;
2333
2334 error = -EIO;
2329 scmd = scsi_get_command(dev, GFP_KERNEL); 2335 scmd = scsi_get_command(dev, GFP_KERNEL);
2330 if (!scmd) { 2336 if (!scmd)
2331 rtn = FAILED;
2332 put_device(&dev->sdev_gendev);
2333 goto out_put_autopm_host; 2337 goto out_put_autopm_host;
2334 }
2335 2338
2336 blk_rq_init(NULL, &req); 2339 blk_rq_init(NULL, &req);
2337 scmd->request = &req; 2340 scmd->request = &req;
@@ -2349,29 +2352,37 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
2349 shost->tmf_in_progress = 1; 2352 shost->tmf_in_progress = 1;
2350 spin_unlock_irqrestore(shost->host_lock, flags); 2353 spin_unlock_irqrestore(shost->host_lock, flags);
2351 2354
2352 switch (flag) { 2355 switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2353 case SCSI_TRY_RESET_DEVICE: 2356 case SG_SCSI_RESET_NOTHING:
2357 rtn = SUCCESS;
2358 break;
2359 case SG_SCSI_RESET_DEVICE:
2354 rtn = scsi_try_bus_device_reset(scmd); 2360 rtn = scsi_try_bus_device_reset(scmd);
2355 if (rtn == SUCCESS) 2361 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2356 break; 2362 break;
2357 /* FALLTHROUGH */ 2363 /* FALLTHROUGH */
2358 case SCSI_TRY_RESET_TARGET: 2364 case SG_SCSI_RESET_TARGET:
2359 rtn = scsi_try_target_reset(scmd); 2365 rtn = scsi_try_target_reset(scmd);
2360 if (rtn == SUCCESS) 2366 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2361 break; 2367 break;
2362 /* FALLTHROUGH */ 2368 /* FALLTHROUGH */
2363 case SCSI_TRY_RESET_BUS: 2369 case SG_SCSI_RESET_BUS:
2364 rtn = scsi_try_bus_reset(scmd); 2370 rtn = scsi_try_bus_reset(scmd);
2365 if (rtn == SUCCESS) 2371 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2366 break; 2372 break;
2367 /* FALLTHROUGH */ 2373 /* FALLTHROUGH */
2368 case SCSI_TRY_RESET_HOST: 2374 case SG_SCSI_RESET_HOST:
2369 rtn = scsi_try_host_reset(scmd); 2375 rtn = scsi_try_host_reset(scmd);
2370 break; 2376 if (rtn == SUCCESS)
2377 break;
2371 default: 2378 default:
2379 /* FALLTHROUGH */
2372 rtn = FAILED; 2380 rtn = FAILED;
2381 break;
2373 } 2382 }
2374 2383
2384 error = (rtn == SUCCESS) ? 0 : -EIO;
2385
2375 spin_lock_irqsave(shost->host_lock, flags); 2386 spin_lock_irqsave(shost->host_lock, flags);
2376 shost->tmf_in_progress = 0; 2387 shost->tmf_in_progress = 0;
2377 spin_unlock_irqrestore(shost->host_lock, flags); 2388 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2385,15 +2396,15 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
2385 "waking up host to restart after TMF\n")); 2396 "waking up host to restart after TMF\n"));
2386 2397
2387 wake_up(&shost->host_wait); 2398 wake_up(&shost->host_wait);
2388
2389 scsi_run_host_queues(shost); 2399 scsi_run_host_queues(shost);
2390 2400
2391 scsi_next_command(scmd); 2401 scsi_put_command(scmd);
2402
2392out_put_autopm_host: 2403out_put_autopm_host:
2393 scsi_autopm_put_host(shost); 2404 scsi_autopm_put_host(shost);
2394 return rtn; 2405 return error;
2395} 2406}
2396EXPORT_SYMBOL(scsi_reset_provider); 2407EXPORT_SYMBOL(scsi_ioctl_reset);
2397 2408
2398/** 2409/**
2399 * scsi_normalize_sense - normalize main elements from either fixed or 2410 * scsi_normalize_sense - normalize main elements from either fixed or
@@ -2412,20 +2423,20 @@ EXPORT_SYMBOL(scsi_reset_provider);
2412 * responded to a SCSI command with the CHECK_CONDITION status. 2423 * responded to a SCSI command with the CHECK_CONDITION status.
2413 * 2424 *
2414 * Return value: 2425 * Return value:
2415 * 1 if valid sense data information found, else 0; 2426 * true if valid sense data information found, else false;
2416 */ 2427 */
2417int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 2428bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2418 struct scsi_sense_hdr *sshdr) 2429 struct scsi_sense_hdr *sshdr)
2419{ 2430{
2420 if (!sense_buffer || !sb_len) 2431 if (!sense_buffer || !sb_len)
2421 return 0; 2432 return false;
2422 2433
2423 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 2434 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
2424 2435
2425 sshdr->response_code = (sense_buffer[0] & 0x7f); 2436 sshdr->response_code = (sense_buffer[0] & 0x7f);
2426 2437
2427 if (!scsi_sense_valid(sshdr)) 2438 if (!scsi_sense_valid(sshdr))
2428 return 0; 2439 return false;
2429 2440
2430 if (sshdr->response_code >= 0x72) { 2441 if (sshdr->response_code >= 0x72) {
2431 /* 2442 /*
@@ -2455,12 +2466,12 @@ int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
2455 } 2466 }
2456 } 2467 }
2457 2468
2458 return 1; 2469 return true;
2459} 2470}
2460EXPORT_SYMBOL(scsi_normalize_sense); 2471EXPORT_SYMBOL(scsi_normalize_sense);
2461 2472
2462int scsi_command_normalize_sense(struct scsi_cmnd *cmd, 2473bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2463 struct scsi_sense_hdr *sshdr) 2474 struct scsi_sense_hdr *sshdr)
2464{ 2475{
2465 return scsi_normalize_sense(cmd->sense_buffer, 2476 return scsi_normalize_sense(cmd->sense_buffer,
2466 SCSI_SENSE_BUFFERSIZE, sshdr); 2477 SCSI_SENSE_BUFFERSIZE, sshdr);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 1aaaf43c6803..c4f7b56fa6f6 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -126,7 +126,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
126 sdev_printk(KERN_INFO, sdev, 126 sdev_printk(KERN_INFO, sdev,
127 "ioctl_internal_command return code = %x\n", 127 "ioctl_internal_command return code = %x\n",
128 result); 128 result);
129 scsi_print_sense_hdr(" ", &sshdr); 129 scsi_print_sense_hdr(sdev, NULL, &sshdr);
130 break; 130 break;
131 } 131 }
132 } 132 }
@@ -200,19 +200,6 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
200{ 200{
201 char scsi_cmd[MAX_COMMAND_SIZE]; 201 char scsi_cmd[MAX_COMMAND_SIZE];
202 202
203 /* No idea how this happens.... */
204 if (!sdev)
205 return -ENXIO;
206
207 /*
208 * If we are in the middle of error recovery, don't let anyone
209 * else try and use this device. Also, if error recovery fails, it
210 * may try and take the device offline, in which case all further
211 * access to the device is prohibited.
212 */
213 if (!scsi_block_when_processing_errors(sdev))
214 return -ENODEV;
215
216 /* Check for deprecated ioctls ... all the ioctls which don't 203 /* Check for deprecated ioctls ... all the ioctls which don't
217 * follow the new unique numbering scheme are deprecated */ 204 * follow the new unique numbering scheme are deprecated */
218 switch (cmd) { 205 switch (cmd) {
@@ -273,6 +260,8 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
273 START_STOP_TIMEOUT, NORMAL_RETRIES); 260 START_STOP_TIMEOUT, NORMAL_RETRIES);
274 case SCSI_IOCTL_GET_PCI: 261 case SCSI_IOCTL_GET_PCI:
275 return scsi_ioctl_get_pci(sdev, arg); 262 return scsi_ioctl_get_pci(sdev, arg);
263 case SG_SCSI_RESET:
264 return scsi_ioctl_reset(sdev, arg);
276 default: 265 default:
277 if (sdev->host->hostt->ioctl) 266 if (sdev->host->hostt->ioctl)
278 return sdev->host->hostt->ioctl(sdev, cmd, arg); 267 return sdev->host->hostt->ioctl(sdev, cmd, arg);
@@ -281,55 +270,20 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
281} 270}
282EXPORT_SYMBOL(scsi_ioctl); 271EXPORT_SYMBOL(scsi_ioctl);
283 272
284/** 273/*
285 * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET 274 * We can process a reset even when a device isn't fully operable.
286 * @sdev: scsi device receiving ioctl
287 * @cmd: Must be SC_SCSI_RESET
288 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
289 * @ndelay: file mode O_NDELAY flag
290 */ 275 */
291int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 276int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd,
292 void __user *arg, int ndelay) 277 bool ndelay)
293{ 278{
294 int val, result; 279 if (cmd == SG_SCSI_RESET && ndelay) {
295
296 /* The first set of iocts may be executed even if we're doing
297 * error processing, as long as the device was opened
298 * non-blocking */
299 if (ndelay) {
300 if (scsi_host_in_recovery(sdev->host)) 280 if (scsi_host_in_recovery(sdev->host))
281 return -EAGAIN;
282 } else {
283 if (!scsi_block_when_processing_errors(sdev))
301 return -ENODEV; 284 return -ENODEV;
302 } else if (!scsi_block_when_processing_errors(sdev))
303 return -ENODEV;
304
305 switch (cmd) {
306 case SG_SCSI_RESET:
307 result = get_user(val, (int __user *)arg);
308 if (result)
309 return result;
310 if (val == SG_SCSI_RESET_NOTHING)
311 return 0;
312 switch (val) {
313 case SG_SCSI_RESET_DEVICE:
314 val = SCSI_TRY_RESET_DEVICE;
315 break;
316 case SG_SCSI_RESET_TARGET:
317 val = SCSI_TRY_RESET_TARGET;
318 break;
319 case SG_SCSI_RESET_BUS:
320 val = SCSI_TRY_RESET_BUS;
321 break;
322 case SG_SCSI_RESET_HOST:
323 val = SCSI_TRY_RESET_HOST;
324 break;
325 default:
326 return -EINVAL;
327 }
328 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
329 return -EACCES;
330 return (scsi_reset_provider(sdev, val) ==
331 SUCCESS) ? 0 : -EIO;
332 } 285 }
333 return -ENODEV; 286
287 return 0;
334} 288}
335EXPORT_SYMBOL(scsi_nonblockable_ioctl); 289EXPORT_SYMBOL_GPL(scsi_ioctl_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 50a6e1ac8d9c..7e3d954c9cac 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -22,6 +22,7 @@
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/blk-mq.h> 24#include <linux/blk-mq.h>
25#include <linux/ratelimit.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
@@ -47,7 +48,7 @@ struct scsi_host_sg_pool {
47 mempool_t *pool; 48 mempool_t *pool;
48}; 49};
49 50
50#define SP(x) { x, "sgpool-" __stringify(x) } 51#define SP(x) { .size = x, "sgpool-" __stringify(x) }
51#if (SCSI_MAX_SG_SEGMENTS < 32) 52#if (SCSI_MAX_SG_SEGMENTS < 32)
52#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 53#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
53#endif 54#endif
@@ -542,17 +543,6 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
542 put_device(&sdev->sdev_gendev); 543 put_device(&sdev->sdev_gendev);
543} 544}
544 545
545void scsi_next_command(struct scsi_cmnd *cmd)
546{
547 struct scsi_device *sdev = cmd->device;
548 struct request_queue *q = sdev->request_queue;
549
550 scsi_put_command(cmd);
551 scsi_run_queue(q);
552
553 put_device(&sdev->sdev_gendev);
554}
555
556void scsi_run_host_queues(struct Scsi_Host *shost) 546void scsi_run_host_queues(struct Scsi_Host *shost)
557{ 547{
558 struct scsi_device *sdev; 548 struct scsi_device *sdev;
@@ -598,10 +588,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
598 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
599} 589}
600 590
601static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 591static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
602 gfp_t gfp_mask, bool mq)
603{ 592{
604 struct scatterlist *first_chunk = NULL; 593 struct scatterlist *first_chunk = NULL;
594 gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
605 int ret; 595 int ret;
606 596
607 BUG_ON(!nents); 597 BUG_ON(!nents);
@@ -730,8 +720,6 @@ static bool scsi_end_request(struct request *req, int error,
730 kblockd_schedule_work(&sdev->requeue_work); 720 kblockd_schedule_work(&sdev->requeue_work);
731 else 721 else
732 blk_mq_start_stopped_hw_queues(q, true); 722 blk_mq_start_stopped_hw_queues(q, true);
733
734 put_device(&sdev->sdev_gendev);
735 } else { 723 } else {
736 unsigned long flags; 724 unsigned long flags;
737 725
@@ -743,9 +731,12 @@ static bool scsi_end_request(struct request *req, int error,
743 spin_unlock_irqrestore(q->queue_lock, flags); 731 spin_unlock_irqrestore(q->queue_lock, flags);
744 732
745 scsi_release_buffers(cmd); 733 scsi_release_buffers(cmd);
746 scsi_next_command(cmd); 734
735 scsi_put_command(cmd);
736 scsi_run_queue(q);
747 } 737 }
748 738
739 put_device(&sdev->sdev_gendev);
749 return false; 740 return false;
750} 741}
751 742
@@ -831,8 +822,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
831 struct request *req = cmd->request; 822 struct request *req = cmd->request;
832 int error = 0; 823 int error = 0;
833 struct scsi_sense_hdr sshdr; 824 struct scsi_sense_hdr sshdr;
834 int sense_valid = 0; 825 bool sense_valid = false;
835 int sense_deferred = 0; 826 int sense_deferred = 0, level = 0;
836 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 827 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
837 ACTION_DELAYED_RETRY} action; 828 ACTION_DELAYED_RETRY} action;
838 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 829 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
@@ -912,7 +903,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
912 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 903 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
913 ; 904 ;
914 else if (!(req->cmd_flags & REQ_QUIET)) 905 else if (!(req->cmd_flags & REQ_QUIET))
915 scsi_print_sense("", cmd); 906 scsi_print_sense(cmd);
916 result = 0; 907 result = 0;
917 /* BLOCK_PC may have set error */ 908 /* BLOCK_PC may have set error */
918 error = 0; 909 error = 0;
@@ -1039,10 +1030,24 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1039 case ACTION_FAIL: 1030 case ACTION_FAIL:
1040 /* Give up and fail the remainder of the request */ 1031 /* Give up and fail the remainder of the request */
1041 if (!(req->cmd_flags & REQ_QUIET)) { 1032 if (!(req->cmd_flags & REQ_QUIET)) {
1042 scsi_print_result(cmd); 1033 static DEFINE_RATELIMIT_STATE(_rs,
1043 if (driver_byte(result) & DRIVER_SENSE) 1034 DEFAULT_RATELIMIT_INTERVAL,
1044 scsi_print_sense("", cmd); 1035 DEFAULT_RATELIMIT_BURST);
1045 scsi_print_command(cmd); 1036
1037 if (unlikely(scsi_logging_level))
1038 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
1039 SCSI_LOG_MLCOMPLETE_BITS);
1040
1041 /*
1042 * if logging is enabled the failure will be printed
1043 * in scsi_log_completion(), so avoid duplicate messages
1044 */
1045 if (!level && __ratelimit(&_rs)) {
1046 scsi_print_result(cmd, NULL, FAILED);
1047 if (driver_byte(result) & DRIVER_SENSE)
1048 scsi_print_sense(cmd);
1049 scsi_print_command(cmd);
1050 }
1046 } 1051 }
1047 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1052 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
1048 return; 1053 return;
@@ -1072,8 +1077,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1072 } 1077 }
1073} 1078}
1074 1079
1075static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1080static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1076 gfp_t gfp_mask)
1077{ 1081{
1078 int count; 1082 int count;
1079 1083
@@ -1081,7 +1085,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1081 * If sg table allocation fails, requeue request later. 1085 * If sg table allocation fails, requeue request later.
1082 */ 1086 */
1083 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1087 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1084 gfp_mask, req->mq_ctx != NULL))) 1088 req->mq_ctx != NULL)))
1085 return BLKPREP_DEFER; 1089 return BLKPREP_DEFER;
1086 1090
1087 /* 1091 /*
@@ -1106,7 +1110,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1106 * BLKPREP_DEFER if the failure is retryable 1110 * BLKPREP_DEFER if the failure is retryable
1107 * BLKPREP_KILL if the failure is fatal 1111 * BLKPREP_KILL if the failure is fatal
1108 */ 1112 */
1109int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1113int scsi_init_io(struct scsi_cmnd *cmd)
1110{ 1114{
1111 struct scsi_device *sdev = cmd->device; 1115 struct scsi_device *sdev = cmd->device;
1112 struct request *rq = cmd->request; 1116 struct request *rq = cmd->request;
@@ -1115,7 +1119,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1115 1119
1116 BUG_ON(!rq->nr_phys_segments); 1120 BUG_ON(!rq->nr_phys_segments);
1117 1121
1118 error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); 1122 error = scsi_init_sgtable(rq, &cmd->sdb);
1119 if (error) 1123 if (error)
1120 goto err_exit; 1124 goto err_exit;
1121 1125
@@ -1131,8 +1135,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1131 rq->next_rq->special = bidi_sdb; 1135 rq->next_rq->special = bidi_sdb;
1132 } 1136 }
1133 1137
1134 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special, 1138 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1135 GFP_ATOMIC);
1136 if (error) 1139 if (error)
1137 goto err_exit; 1140 goto err_exit;
1138 } 1141 }
@@ -1144,7 +1147,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1144 BUG_ON(prot_sdb == NULL); 1147 BUG_ON(prot_sdb == NULL);
1145 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1148 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1146 1149
1147 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) { 1150 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
1148 error = BLKPREP_DEFER; 1151 error = BLKPREP_DEFER;
1149 goto err_exit; 1152 goto err_exit;
1150 } 1153 }
@@ -1213,7 +1216,7 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1213 * submit a request without an attached bio. 1216 * submit a request without an attached bio.
1214 */ 1217 */
1215 if (req->bio) { 1218 if (req->bio) {
1216 int ret = scsi_init_io(cmd, GFP_ATOMIC); 1219 int ret = scsi_init_io(cmd);
1217 if (unlikely(ret)) 1220 if (unlikely(ret))
1218 return ret; 1221 return ret;
1219 } else { 1222 } else {
@@ -1638,6 +1641,87 @@ static void scsi_softirq_done(struct request *rq)
1638} 1641}
1639 1642
1640/** 1643/**
1644 * scsi_dispatch_command - Dispatch a command to the low-level driver.
1645 * @cmd: command block we are dispatching.
1646 *
1647 * Return: nonzero return request was rejected and device's queue needs to be
1648 * plugged.
1649 */
1650static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1651{
1652 struct Scsi_Host *host = cmd->device->host;
1653 int rtn = 0;
1654
1655 atomic_inc(&cmd->device->iorequest_cnt);
1656
1657 /* check if the device is still usable */
1658 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1659 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
1660 * returns an immediate error upwards, and signals
1661 * that the device is no longer present */
1662 cmd->result = DID_NO_CONNECT << 16;
1663 goto done;
1664 }
1665
1666 /* Check to see if the scsi lld made this device blocked. */
1667 if (unlikely(scsi_device_blocked(cmd->device))) {
1668 /*
1669 * in blocked state, the command is just put back on
1670 * the device queue. The suspend state has already
1671 * blocked the queue so future requests should not
1672 * occur until the device transitions out of the
1673 * suspend state.
1674 */
1675 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1676 "queuecommand : device blocked\n"));
1677 return SCSI_MLQUEUE_DEVICE_BUSY;
1678 }
1679
1680 /* Store the LUN value in cmnd, if needed. */
1681 if (cmd->device->lun_in_cdb)
1682 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1683 (cmd->device->lun << 5 & 0xe0);
1684
1685 scsi_log_send(cmd);
1686
1687 /*
1688 * Before we queue this command, check if the command
1689 * length exceeds what the host adapter can handle.
1690 */
1691 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1692 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1693 "queuecommand : command too long. "
1694 "cdb_size=%d host->max_cmd_len=%d\n",
1695 cmd->cmd_len, cmd->device->host->max_cmd_len));
1696 cmd->result = (DID_ABORT << 16);
1697 goto done;
1698 }
1699
1700 if (unlikely(host->shost_state == SHOST_DEL)) {
1701 cmd->result = (DID_NO_CONNECT << 16);
1702 goto done;
1703
1704 }
1705
1706 trace_scsi_dispatch_cmd_start(cmd);
1707 rtn = host->hostt->queuecommand(host, cmd);
1708 if (rtn) {
1709 trace_scsi_dispatch_cmd_error(cmd, rtn);
1710 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1711 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1712 rtn = SCSI_MLQUEUE_HOST_BUSY;
1713
1714 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1715 "queuecommand : request rejected\n"));
1716 }
1717
1718 return rtn;
1719 done:
1720 cmd->scsi_done(cmd);
1721 return 0;
1722}
1723
1724/**
1641 * scsi_done - Invoke completion on finished SCSI command. 1725 * scsi_done - Invoke completion on finished SCSI command.
1642 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1726 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1643 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1727 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -1725,7 +1809,7 @@ static void scsi_request_fn(struct request_queue *q)
1725 * we add the dev to the starved list so it eventually gets 1809 * we add the dev to the starved list so it eventually gets
1726 * a run when a tag is freed. 1810 * a run when a tag is freed.
1727 */ 1811 */
1728 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1812 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1729 spin_lock_irq(shost->host_lock); 1813 spin_lock_irq(shost->host_lock);
1730 if (list_empty(&sdev->starved_entry)) 1814 if (list_empty(&sdev->starved_entry))
1731 list_add_tail(&sdev->starved_entry, 1815 list_add_tail(&sdev->starved_entry,
@@ -1739,6 +1823,11 @@ static void scsi_request_fn(struct request_queue *q)
1739 1823
1740 if (!scsi_host_queue_ready(q, shost, sdev)) 1824 if (!scsi_host_queue_ready(q, shost, sdev))
1741 goto host_not_ready; 1825 goto host_not_ready;
1826
1827 if (sdev->simple_tags)
1828 cmd->flags |= SCMD_TAGGED;
1829 else
1830 cmd->flags &= ~SCMD_TAGGED;
1742 1831
1743 /* 1832 /*
1744 * Finally, initialize any error handling parameters, and set up 1833 * Finally, initialize any error handling parameters, and set up
@@ -1893,10 +1982,10 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
1893 blk_mq_start_request(req); 1982 blk_mq_start_request(req);
1894 } 1983 }
1895 1984
1896 if (blk_queue_tagged(q)) 1985 if (sdev->simple_tags)
1897 req->cmd_flags |= REQ_QUEUED; 1986 cmd->flags |= SCMD_TAGGED;
1898 else 1987 else
1899 req->cmd_flags &= ~REQ_QUEUED; 1988 cmd->flags &= ~SCMD_TAGGED;
1900 1989
1901 scsi_init_cmd_errh(cmd); 1990 scsi_init_cmd_errh(cmd);
1902 cmd->scsi_done = scsi_mq_done; 1991 cmd->scsi_done = scsi_mq_done;
@@ -2091,7 +2180,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
2091 2180
2092 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2181 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2093 shost->tag_set.ops = &scsi_mq_ops; 2182 shost->tag_set.ops = &scsi_mq_ops;
2094 shost->tag_set.nr_hw_queues = 1; 2183 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2095 shost->tag_set.queue_depth = shost->can_queue; 2184 shost->tag_set.queue_depth = shost->can_queue;
2096 shost->tag_set.cmd_size = cmd_size; 2185 shost->tag_set.cmd_size = cmd_size;
2097 shost->tag_set.numa_node = NUMA_NO_NODE; 2186 shost->tag_set.numa_node = NUMA_NO_NODE;
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 1f65139e14f8..7fe64a847143 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -51,6 +51,7 @@ do { \
51 } while (0); \ 51 } while (0); \
52} while (0) 52} while (0)
53#else 53#else
54#define SCSI_LOG_LEVEL(SHIFT, BITS) 0
54#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) 55#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
55#endif /* CONFIG_SCSI_LOGGING */ 56#endif /* CONFIG_SCSI_LOGGING */
56 57
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 12b8e1bee7f0..2dc4a83fb84c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,7 +29,6 @@ extern int scsi_init_hosts(void);
29extern void scsi_exit_hosts(void); 29extern void scsi_exit_hosts(void);
30 30
31/* scsi.c */ 31/* scsi.c */
32extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
33extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 32extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
34extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 33extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
35#ifdef CONFIG_SCSI_LOGGING 34#ifdef CONFIG_SCSI_LOGGING
@@ -84,7 +83,6 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
84extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 83extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
85extern void scsi_device_unbusy(struct scsi_device *sdev); 84extern void scsi_device_unbusy(struct scsi_device *sdev);
86extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 85extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
87extern void scsi_next_command(struct scsi_cmnd *cmd);
88extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 86extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
89extern void scsi_run_host_queues(struct Scsi_Host *shost); 87extern void scsi_run_host_queues(struct Scsi_Host *shost);
90extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); 88extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ba3f1e8d0d57..983aed10ff2f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -286,7 +286,13 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
286 } 286 }
287 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); 287 WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
288 sdev->request_queue->queuedata = sdev; 288 sdev->request_queue->queuedata = sdev;
289 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 289
290 if (!shost_use_blk_mq(sdev->host) &&
291 (shost->bqt || shost->hostt->use_blk_tags)) {
292 blk_queue_init_tags(sdev->request_queue,
293 sdev->host->cmd_per_lun, shost->bqt);
294 }
295 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
290 296
291 scsi_sysfs_device_initialize(sdev); 297 scsi_sysfs_device_initialize(sdev);
292 298
@@ -874,8 +880,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
874 (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); 880 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
875 881
876 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && 882 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
877 !(*bflags & BLIST_NOTQ)) 883 !(*bflags & BLIST_NOTQ)) {
878 sdev->tagged_supported = 1; 884 sdev->tagged_supported = 1;
885 sdev->simple_tags = 1;
886 }
879 887
880 /* 888 /*
881 * Some devices (Texel CD ROM drives) have handshaking problems 889 * Some devices (Texel CD ROM drives) have handshaking problems
@@ -1214,9 +1222,9 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1214 sparse_lun = 0; 1222 sparse_lun = 0;
1215 1223
1216 /* 1224 /*
1217 * If less than SCSI_1_CSS, and no special lun scaning, stop 1225 * If less than SCSI_1_CCS, and no special lun scanning, stop
1218 * scanning; this matches 2.4 behaviour, but could just be a bug 1226 * scanning; this matches 2.4 behaviour, but could just be a bug
1219 * (to continue scanning a SCSI_1_CSS device). 1227 * (to continue scanning a SCSI_1_CCS device).
1220 * 1228 *
1221 * This test is broken. We might not have any device on lun0 for 1229 * This test is broken. We might not have any device on lun0 for
1222 * a sparselun device, and if that's the case then how would we 1230 * a sparselun device, and if that's the case then how would we
@@ -1585,16 +1593,15 @@ EXPORT_SYMBOL(scsi_add_device);
1585 1593
1586void scsi_rescan_device(struct device *dev) 1594void scsi_rescan_device(struct device *dev)
1587{ 1595{
1588 struct scsi_driver *drv;
1589
1590 if (!dev->driver) 1596 if (!dev->driver)
1591 return; 1597 return;
1592 1598
1593 drv = to_scsi_driver(dev->driver); 1599 if (try_module_get(dev->driver->owner)) {
1594 if (try_module_get(drv->owner)) { 1600 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1601
1595 if (drv->rescan) 1602 if (drv->rescan)
1596 drv->rescan(dev); 1603 drv->rescan(dev);
1597 module_put(drv->owner); 1604 module_put(dev->driver->owner);
1598 } 1605 }
1599} 1606}
1600EXPORT_SYMBOL(scsi_rescan_device); 1607EXPORT_SYMBOL(scsi_rescan_device);
@@ -1727,7 +1734,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1727 1734
1728 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1735 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1729 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1736 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1730 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) 1737 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1731 return -EINVAL; 1738 return -EINVAL;
1732 1739
1733 mutex_lock(&shost->scan_mutex); 1740 mutex_lock(&shost->scan_mutex);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f4cb7b3e9e23..1cb64a8e18c9 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -727,9 +727,7 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr,
727 struct scsi_device *sdev = to_scsi_device(dev); 727 struct scsi_device *sdev = to_scsi_device(dev);
728 const char *name = "none"; 728 const char *name = "none";
729 729
730 if (sdev->ordered_tags) 730 if (sdev->simple_tags)
731 name = "ordered";
732 else if (sdev->simple_tags)
733 name = "simple"; 731 name = "simple";
734 732
735 return snprintf(buf, 20, "%s\n", name); 733 return snprintf(buf, 20, "%s\n", name);
@@ -747,9 +745,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
747 if (!sdev->tagged_supported || !sht->change_queue_type) 745 if (!sdev->tagged_supported || !sht->change_queue_type)
748 return -EINVAL; 746 return -EINVAL;
749 747
750 if (strncmp(buf, "ordered", 7) == 0) 748 /*
751 tag_type = MSG_ORDERED_TAG; 749 * We're never issueing order tags these days, but allow the value
752 else if (strncmp(buf, "simple", 6) == 0) 750 * for backwards compatibility.
751 */
752 if (strncmp(buf, "ordered", 7) == 0 ||
753 strncmp(buf, "simple", 6) == 0)
753 tag_type = MSG_SIMPLE_TAG; 754 tag_type = MSG_SIMPLE_TAG;
754 else if (strncmp(buf, "none", 4) != 0) 755 else if (strncmp(buf, "none", 4) != 0)
755 return -EINVAL; 756 return -EINVAL;
@@ -876,11 +877,10 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
876 877
877 depth = simple_strtoul(buf, NULL, 0); 878 depth = simple_strtoul(buf, NULL, 0);
878 879
879 if (depth < 1) 880 if (depth < 1 || depth > sht->can_queue)
880 return -EINVAL; 881 return -EINVAL;
881 882
882 retval = sht->change_queue_depth(sdev, depth, 883 retval = sht->change_queue_depth(sdev, depth);
883 SCSI_QDEPTH_DEFAULT);
884 if (retval < 0) 884 if (retval < 0)
885 return retval; 885 return retval;
886 886
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 503594e5f76d..82af28b90294 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -278,7 +278,7 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
278 return scsi_trace_rw16(p, cdb, len); 278 return scsi_trace_rw16(p, cdb, len);
279 case UNMAP: 279 case UNMAP:
280 return scsi_trace_unmap(p, cdb, len); 280 return scsi_trace_unmap(p, cdb, len);
281 case SERVICE_ACTION_IN: 281 case SERVICE_ACTION_IN_16:
282 return scsi_trace_service_action_in(p, cdb, len); 282 return scsi_trace_service_action_in(p, cdb, len);
283 case VARIABLE_LENGTH_CMD: 283 case VARIABLE_LENGTH_CMD:
284 return scsi_trace_varlen(p, cdb, len); 284 return scsi_trace_varlen(p, cdb, len);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index cf08071a9b6e..fa2aece76cc2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -32,6 +32,7 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_eh.h> 34#include <scsi/scsi_eh.h>
35#include <scsi/scsi_tcq.h>
35#include <scsi/scsi_transport.h> 36#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_spi.h> 37#include <scsi/scsi_transport_spi.h>
37 38
@@ -1207,6 +1208,28 @@ int spi_populate_ppr_msg(unsigned char *msg, int period, int offset,
1207} 1208}
1208EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); 1209EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
1209 1210
1211/**
1212 * spi_populate_tag_msg - place a tag message in a buffer
1213 * @msg: pointer to the area to place the tag
1214 * @cmd: pointer to the scsi command for the tag
1215 *
1216 * Notes:
1217 * designed to create the correct type of tag message for the
1218 * particular request. Returns the size of the tag message.
1219 * May return 0 if TCQ is disabled for this device.
1220 **/
1221int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
1222{
1223 if (cmd->flags & SCMD_TAGGED) {
1224 *msg++ = MSG_SIMPLE_TAG;
1225 *msg++ = cmd->request->tag;
1226 return 2;
1227 }
1228
1229 return 0;
1230}
1231EXPORT_SYMBOL_GPL(spi_populate_tag_msg);
1232
1210#ifdef CONFIG_SCSI_CONSTANTS 1233#ifdef CONFIG_SCSI_CONSTANTS
1211static const char * const one_byte_msgs[] = { 1234static const char * const one_byte_msgs[] = {
1212/* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", 1235/* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers",
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 92d24d6dcb39..910f4a7a3924 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -163,8 +163,8 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity,
163 end_head * end_sector + end_sector; 163 end_head * end_sector + end_sector;
164 164
165 /* This is the actual _sector_ number at the end */ 165 /* This is the actual _sector_ number at the end */
166 logical_end = get_unaligned(&largest->start_sect) 166 logical_end = get_unaligned_le32(&largest->start_sect)
167 + get_unaligned(&largest->nr_sects); 167 + get_unaligned_le32(&largest->nr_sects);
168 168
169 /* This is for >1023 cylinders */ 169 /* This is for >1023 cylinders */
170 ext_cyl = (logical_end - (end_head * end_sector + end_sector)) 170 ext_cyl = (logical_end - (end_head * end_sector + end_sector))
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cfba74cd8e8b..fedab3c21ddf 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -116,7 +116,7 @@ static int sd_eh_action(struct scsi_cmnd *, int);
116static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 116static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
117static void scsi_disk_release(struct device *cdev); 117static void scsi_disk_release(struct device *cdev);
118static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 118static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
119static void sd_print_result(struct scsi_disk *, int); 119static void sd_print_result(const struct scsi_disk *, const char *, int);
120 120
121static DEFINE_SPINLOCK(sd_index_lock); 121static DEFINE_SPINLOCK(sd_index_lock);
122static DEFINE_IDA(sd_index_ida); 122static DEFINE_IDA(sd_index_ida);
@@ -510,9 +510,9 @@ static const struct dev_pm_ops sd_pm_ops = {
510}; 510};
511 511
512static struct scsi_driver sd_template = { 512static struct scsi_driver sd_template = {
513 .owner = THIS_MODULE,
514 .gendrv = { 513 .gendrv = {
515 .name = "sd", 514 .name = "sd",
515 .owner = THIS_MODULE,
516 .probe = sd_probe, 516 .probe = sd_probe,
517 .remove = sd_remove, 517 .remove = sd_remove,
518 .shutdown = sd_shutdown, 518 .shutdown = sd_shutdown,
@@ -656,7 +656,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
656 unsigned int logical_block_size = sdkp->device->sector_size; 656 unsigned int logical_block_size = sdkp->device->sector_size;
657 unsigned int max_blocks = 0; 657 unsigned int max_blocks = 0;
658 658
659 q->limits.discard_zeroes_data = sdkp->lbprz; 659 q->limits.discard_zeroes_data = 0;
660 q->limits.discard_alignment = sdkp->unmap_alignment * 660 q->limits.discard_alignment = sdkp->unmap_alignment *
661 logical_block_size; 661 logical_block_size;
662 q->limits.discard_granularity = 662 q->limits.discard_granularity =
@@ -680,11 +680,13 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
680 case SD_LBP_WS16: 680 case SD_LBP_WS16:
681 max_blocks = min_not_zero(sdkp->max_ws_blocks, 681 max_blocks = min_not_zero(sdkp->max_ws_blocks,
682 (u32)SD_MAX_WS16_BLOCKS); 682 (u32)SD_MAX_WS16_BLOCKS);
683 q->limits.discard_zeroes_data = sdkp->lbprz;
683 break; 684 break;
684 685
685 case SD_LBP_WS10: 686 case SD_LBP_WS10:
686 max_blocks = min_not_zero(sdkp->max_ws_blocks, 687 max_blocks = min_not_zero(sdkp->max_ws_blocks,
687 (u32)SD_MAX_WS10_BLOCKS); 688 (u32)SD_MAX_WS10_BLOCKS);
689 q->limits.discard_zeroes_data = sdkp->lbprz;
688 break; 690 break;
689 691
690 case SD_LBP_ZERO: 692 case SD_LBP_ZERO:
@@ -784,7 +786,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
784 * amount of blocks described by the request. 786 * amount of blocks described by the request.
785 */ 787 */
786 blk_add_request_payload(rq, page, len); 788 blk_add_request_payload(rq, page, len);
787 ret = scsi_init_io(cmd, GFP_ATOMIC); 789 ret = scsi_init_io(cmd);
788 rq->__data_len = nr_bytes; 790 rq->__data_len = nr_bytes;
789 791
790out: 792out:
@@ -878,7 +880,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
878 * knows how much to actually write. 880 * knows how much to actually write.
879 */ 881 */
880 rq->__data_len = sdp->sector_size; 882 rq->__data_len = sdp->sector_size;
881 ret = scsi_init_io(cmd, GFP_ATOMIC); 883 ret = scsi_init_io(cmd);
882 rq->__data_len = nr_bytes; 884 rq->__data_len = nr_bytes;
883 return ret; 885 return ret;
884} 886}
@@ -912,7 +914,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
912 int ret; 914 int ret;
913 unsigned char protect; 915 unsigned char protect;
914 916
915 ret = scsi_init_io(SCpnt, GFP_ATOMIC); 917 ret = scsi_init_io(SCpnt);
916 if (ret != BLKPREP_OK) 918 if (ret != BLKPREP_OK)
917 goto out; 919 goto out;
918 SCpnt = rq->special; 920 SCpnt = rq->special;
@@ -1334,9 +1336,9 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1334 * may try and take the device offline, in which case all further 1336 * may try and take the device offline, in which case all further
1335 * access to the device is prohibited. 1337 * access to the device is prohibited.
1336 */ 1338 */
1337 error = scsi_nonblockable_ioctl(sdp, cmd, p, 1339 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1338 (mode & FMODE_NDELAY) != 0); 1340 (mode & FMODE_NDELAY) != 0);
1339 if (!scsi_block_when_processing_errors(sdp) || !error) 1341 if (error)
1340 goto out; 1342 goto out;
1341 1343
1342 /* 1344 /*
@@ -1492,7 +1494,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1492 } 1494 }
1493 1495
1494 if (res) { 1496 if (res) {
1495 sd_print_result(sdkp, res); 1497 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1496 1498
1497 if (driver_byte(res) & DRIVER_SENSE) 1499 if (driver_byte(res) & DRIVER_SENSE)
1498 sd_print_sense_hdr(sdkp, &sshdr); 1500 sd_print_sense_hdr(sdkp, &sshdr);
@@ -1541,31 +1543,19 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1541 unsigned int cmd, unsigned long arg) 1543 unsigned int cmd, unsigned long arg)
1542{ 1544{
1543 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; 1545 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
1544 int ret; 1546 int error;
1545
1546 ret = scsi_verify_blk_ioctl(bdev, cmd);
1547 if (ret < 0)
1548 return ret;
1549 1547
1550 /* 1548 error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
1551 * If we are in the middle of error recovery, don't let anyone 1549 (mode & FMODE_NDELAY) != 0);
1552 * else try and use this device. Also, if error recovery fails, it 1550 if (error)
1553 * may try and take the device offline, in which case all further 1551 return error;
1554 * access to the device is prohibited.
1555 */
1556 if (!scsi_block_when_processing_errors(sdev))
1557 return -ENODEV;
1558 1552
1559 if (sdev->host->hostt->compat_ioctl) {
1560 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1561
1562 return ret;
1563 }
1564
1565 /* 1553 /*
1566 * Let the static ioctl translation table take care of it. 1554 * Let the static ioctl translation table take care of it.
1567 */ 1555 */
1568 return -ENOIOCTLCMD; 1556 if (!sdev->host->hostt->compat_ioctl)
1557 return -ENOIOCTLCMD;
1558 return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1569} 1559}
1570#endif 1560#endif
1571 1561
@@ -1713,17 +1703,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1713 if (sense_valid) 1703 if (sense_valid)
1714 sense_deferred = scsi_sense_is_deferred(&sshdr); 1704 sense_deferred = scsi_sense_is_deferred(&sshdr);
1715 } 1705 }
1716#ifdef CONFIG_SCSI_LOGGING
1717 SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
1718 if (sense_valid) {
1719 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
1720 "sd_done: sb[respc,sk,asc,"
1721 "ascq]=%x,%x,%x,%x\n",
1722 sshdr.response_code,
1723 sshdr.sense_key, sshdr.asc,
1724 sshdr.ascq));
1725 }
1726#endif
1727 sdkp->medium_access_timed_out = 0; 1706 sdkp->medium_access_timed_out = 0;
1728 1707
1729 if (driver_byte(result) != DRIVER_SENSE && 1708 if (driver_byte(result) != DRIVER_SENSE &&
@@ -1743,7 +1722,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1743 * unknown amount of data was transferred so treat it as an 1722 * unknown amount of data was transferred so treat it as an
1744 * error. 1723 * error.
1745 */ 1724 */
1746 scsi_print_sense("sd", SCpnt);
1747 SCpnt->result = 0; 1725 SCpnt->result = 0;
1748 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1726 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1749 break; 1727 break;
@@ -1779,6 +1757,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1779 break; 1757 break;
1780 } 1758 }
1781 out: 1759 out:
1760 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
1761 "sd_done: completed %d of %d bytes\n",
1762 good_bytes, scsi_bufflen(SCpnt)));
1763
1782 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1764 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1783 sd_dif_complete(SCpnt, good_bytes); 1765 sd_dif_complete(SCpnt, good_bytes);
1784 1766
@@ -1834,12 +1816,12 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1834 /* no sense, TUR either succeeded or failed 1816 /* no sense, TUR either succeeded or failed
1835 * with a status error */ 1817 * with a status error */
1836 if(!spintime && !scsi_status_is_good(the_result)) { 1818 if(!spintime && !scsi_status_is_good(the_result)) {
1837 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); 1819 sd_print_result(sdkp, "Test Unit Ready failed",
1838 sd_print_result(sdkp, the_result); 1820 the_result);
1839 } 1821 }
1840 break; 1822 break;
1841 } 1823 }
1842 1824
1843 /* 1825 /*
1844 * The device does not want the automatic start to be issued. 1826 * The device does not want the automatic start to be issued.
1845 */ 1827 */
@@ -1955,7 +1937,6 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1955 struct scsi_sense_hdr *sshdr, int sense_valid, 1937 struct scsi_sense_hdr *sshdr, int sense_valid,
1956 int the_result) 1938 int the_result)
1957{ 1939{
1958 sd_print_result(sdkp, the_result);
1959 if (driver_byte(the_result) & DRIVER_SENSE) 1940 if (driver_byte(the_result) & DRIVER_SENSE)
1960 sd_print_sense_hdr(sdkp, sshdr); 1941 sd_print_sense_hdr(sdkp, sshdr);
1961 else 1942 else
@@ -2001,7 +1982,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2001 1982
2002 do { 1983 do {
2003 memset(cmd, 0, 16); 1984 memset(cmd, 0, 16);
2004 cmd[0] = SERVICE_ACTION_IN; 1985 cmd[0] = SERVICE_ACTION_IN_16;
2005 cmd[1] = SAI_READ_CAPACITY_16; 1986 cmd[1] = SAI_READ_CAPACITY_16;
2006 cmd[13] = RC16_LEN; 1987 cmd[13] = RC16_LEN;
2007 memset(buffer, 0, RC16_LEN); 1988 memset(buffer, 0, RC16_LEN);
@@ -2036,7 +2017,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2036 } while (the_result && retries); 2017 } while (the_result && retries);
2037 2018
2038 if (the_result) { 2019 if (the_result) {
2039 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n"); 2020 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2040 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2021 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2041 return -EINVAL; 2022 return -EINVAL;
2042 } 2023 }
@@ -2118,7 +2099,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2118 } while (the_result && retries); 2099 } while (the_result && retries);
2119 2100
2120 if (the_result) { 2101 if (the_result) {
2121 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n"); 2102 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2122 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); 2103 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2123 return -EINVAL; 2104 return -EINVAL;
2124 } 2105 }
@@ -2643,12 +2624,12 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2643 2624
2644 } else { /* LBP VPD page tells us what to use */ 2625 } else { /* LBP VPD page tells us what to use */
2645 2626
2646 if (sdkp->lbpu && sdkp->max_unmap_blocks) 2627 if (sdkp->lbpws)
2647 sd_config_discard(sdkp, SD_LBP_UNMAP);
2648 else if (sdkp->lbpws)
2649 sd_config_discard(sdkp, SD_LBP_WS16); 2628 sd_config_discard(sdkp, SD_LBP_WS16);
2650 else if (sdkp->lbpws10) 2629 else if (sdkp->lbpws10)
2651 sd_config_discard(sdkp, SD_LBP_WS10); 2630 sd_config_discard(sdkp, SD_LBP_WS10);
2631 else if (sdkp->lbpu && sdkp->max_unmap_blocks)
2632 sd_config_discard(sdkp, SD_LBP_UNMAP);
2652 else 2633 else
2653 sd_config_discard(sdkp, SD_LBP_DISABLE); 2634 sd_config_discard(sdkp, SD_LBP_DISABLE);
2654 } 2635 }
@@ -3142,8 +3123,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3142 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 3123 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
3143 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); 3124 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
3144 if (res) { 3125 if (res) {
3145 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); 3126 sd_print_result(sdkp, "Start/Stop Unit failed", res);
3146 sd_print_result(sdkp, res);
3147 if (driver_byte(res) & DRIVER_SENSE) 3127 if (driver_byte(res) & DRIVER_SENSE)
3148 sd_print_sense_hdr(sdkp, &sshdr); 3128 sd_print_sense_hdr(sdkp, &sshdr);
3149 if (scsi_sense_valid(&sshdr) && 3129 if (scsi_sense_valid(&sshdr) &&
@@ -3337,15 +3317,27 @@ module_exit(exit_sd);
3337static void sd_print_sense_hdr(struct scsi_disk *sdkp, 3317static void sd_print_sense_hdr(struct scsi_disk *sdkp,
3338 struct scsi_sense_hdr *sshdr) 3318 struct scsi_sense_hdr *sshdr)
3339{ 3319{
3340 sd_printk(KERN_INFO, sdkp, " "); 3320 scsi_show_sense_hdr(sdkp->device,
3341 scsi_show_sense_hdr(sshdr); 3321 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3342 sd_printk(KERN_INFO, sdkp, " "); 3322 scsi_show_extd_sense(sdkp->device,
3343 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 3323 sdkp->disk ? sdkp->disk->disk_name : NULL,
3324 sshdr->asc, sshdr->ascq);
3344} 3325}
3345 3326
3346static void sd_print_result(struct scsi_disk *sdkp, int result) 3327static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
3328 int result)
3347{ 3329{
3348 sd_printk(KERN_INFO, sdkp, " "); 3330 const char *hb_string = scsi_hostbyte_string(result);
3349 scsi_show_result(result); 3331 const char *db_string = scsi_driverbyte_string(result);
3332
3333 if (hb_string || db_string)
3334 sd_printk(KERN_INFO, sdkp,
3335 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3336 hb_string ? hb_string : "invalid",
3337 db_string ? db_string : "invalid");
3338 else
3339 sd_printk(KERN_INFO, sdkp,
3340 "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
3341 msg, host_byte(result), driver_byte(result));
3350} 3342}
3351 3343
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 467377884b63..63ba5ca7f9a1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -103,9 +103,9 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
103 103
104#define sd_printk(prefix, sdsk, fmt, a...) \ 104#define sd_printk(prefix, sdsk, fmt, a...) \
105 (sdsk)->disk ? \ 105 (sdsk)->disk ? \
106 sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \ 106 sdev_prefix_printk(prefix, (sdsk)->device, \
107 (sdsk)->disk->disk_name, ##a) : \ 107 (sdsk)->disk->disk_name, fmt, ##a) : \
108 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 108 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
109 109
110#define sd_first_printk(prefix, sdsk, fmt, a...) \ 110#define sd_first_printk(prefix, sdsk, fmt, a...) \
111 do { \ 111 do { \
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 80bfece1a2de..b7e79e7646ad 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -693,9 +693,9 @@ static struct class_interface ses_interface = {
693}; 693};
694 694
695static struct scsi_driver ses_template = { 695static struct scsi_driver ses_template = {
696 .owner = THIS_MODULE,
697 .gendrv = { 696 .gendrv = {
698 .name = "ses", 697 .name = "ses",
698 .owner = THIS_MODULE,
699 .probe = ses_probe, 699 .probe = ses_probe,
700 .remove = ses_remove, 700 .remove = ses_remove,
701 }, 701 },
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 60354449d9ed..b14f64cb9724 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -219,8 +219,8 @@ static void sg_device_destroy(struct kref *kref);
219#define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 219#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
220 220
221#define sg_printk(prefix, sdp, fmt, a...) \ 221#define sg_printk(prefix, sdp, fmt, a...) \
222 sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \ 222 sdev_prefix_printk(prefix, (sdp)->device, \
223 (sdp)->disk->disk_name, ##a) 223 (sdp)->disk->disk_name, fmt, ##a)
224 224
225static int sg_allow_access(struct file *filp, unsigned char *cmd) 225static int sg_allow_access(struct file *filp, unsigned char *cmd)
226{ 226{
@@ -1071,39 +1071,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1071 if (atomic_read(&sdp->detaching)) 1071 if (atomic_read(&sdp->detaching))
1072 return -ENODEV; 1072 return -ENODEV;
1073 return put_user(sdp->device->host->hostt->emulated, ip); 1073 return put_user(sdp->device->host->hostt->emulated, ip);
1074 case SG_SCSI_RESET:
1075 if (atomic_read(&sdp->detaching))
1076 return -ENODEV;
1077 if (filp->f_flags & O_NONBLOCK) {
1078 if (scsi_host_in_recovery(sdp->device->host))
1079 return -EBUSY;
1080 } else if (!scsi_block_when_processing_errors(sdp->device))
1081 return -EBUSY;
1082 result = get_user(val, ip);
1083 if (result)
1084 return result;
1085 if (SG_SCSI_RESET_NOTHING == val)
1086 return 0;
1087 switch (val) {
1088 case SG_SCSI_RESET_DEVICE:
1089 val = SCSI_TRY_RESET_DEVICE;
1090 break;
1091 case SG_SCSI_RESET_TARGET:
1092 val = SCSI_TRY_RESET_TARGET;
1093 break;
1094 case SG_SCSI_RESET_BUS:
1095 val = SCSI_TRY_RESET_BUS;
1096 break;
1097 case SG_SCSI_RESET_HOST:
1098 val = SCSI_TRY_RESET_HOST;
1099 break;
1100 default:
1101 return -EINVAL;
1102 }
1103 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1104 return -EACCES;
1105 return (scsi_reset_provider(sdp->device, val) ==
1106 SUCCESS) ? 0 : -EIO;
1107 case SCSI_IOCTL_SEND_COMMAND: 1074 case SCSI_IOCTL_SEND_COMMAND:
1108 if (atomic_read(&sdp->detaching)) 1075 if (atomic_read(&sdp->detaching))
1109 return -ENODEV; 1076 return -ENODEV;
@@ -1123,13 +1090,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1123 return result; 1090 return result;
1124 sdp->sgdebug = (char) val; 1091 sdp->sgdebug = (char) val;
1125 return 0; 1092 return 0;
1126 case SCSI_IOCTL_GET_IDLUN:
1127 case SCSI_IOCTL_GET_BUS_NUMBER:
1128 case SCSI_IOCTL_PROBE_HOST:
1129 case SG_GET_TRANSFORM:
1130 if (atomic_read(&sdp->detaching))
1131 return -ENODEV;
1132 return scsi_ioctl(sdp->device, cmd_in, p);
1133 case BLKSECTGET: 1093 case BLKSECTGET:
1134 return put_user(max_sectors_bytes(sdp->device->request_queue), 1094 return put_user(max_sectors_bytes(sdp->device->request_queue),
1135 ip); 1095 ip);
@@ -1145,11 +1105,25 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1145 return blk_trace_startstop(sdp->device->request_queue, 0); 1105 return blk_trace_startstop(sdp->device->request_queue, 0);
1146 case BLKTRACETEARDOWN: 1106 case BLKTRACETEARDOWN:
1147 return blk_trace_remove(sdp->device->request_queue); 1107 return blk_trace_remove(sdp->device->request_queue);
1108 case SCSI_IOCTL_GET_IDLUN:
1109 case SCSI_IOCTL_GET_BUS_NUMBER:
1110 case SCSI_IOCTL_PROBE_HOST:
1111 case SG_GET_TRANSFORM:
1112 case SG_SCSI_RESET:
1113 if (atomic_read(&sdp->detaching))
1114 return -ENODEV;
1115 break;
1148 default: 1116 default:
1149 if (read_only) 1117 if (read_only)
1150 return -EPERM; /* don't know so take safe approach */ 1118 return -EPERM; /* don't know so take safe approach */
1151 return scsi_ioctl(sdp->device, cmd_in, p); 1119 break;
1152 } 1120 }
1121
1122 result = scsi_ioctl_block_when_processing_errors(sdp->device,
1123 cmd_in, filp->f_flags & O_NDELAY);
1124 if (result)
1125 return result;
1126 return scsi_ioctl(sdp->device, cmd_in, p);
1153} 1127}
1154 1128
1155#ifdef CONFIG_COMPAT 1129#ifdef CONFIG_COMPAT
@@ -1360,7 +1334,7 @@ sg_rq_end_io(struct request *rq, int uptodate)
1360 if ((sdp->sgdebug > 0) && 1334 if ((sdp->sgdebug > 0) &&
1361 ((CHECK_CONDITION == srp->header.masked_status) || 1335 ((CHECK_CONDITION == srp->header.masked_status) ||
1362 (COMMAND_TERMINATED == srp->header.masked_status))) 1336 (COMMAND_TERMINATED == srp->header.masked_status)))
1363 __scsi_print_sense(__func__, sense, 1337 __scsi_print_sense(sdp->device, __func__, sense,
1364 SCSI_SENSE_BUFFERSIZE); 1338 SCSI_SENSE_BUFFERSIZE);
1365 1339
1366 /* Following if statement is a patch supplied by Eric Youngdale */ 1340 /* Following if statement is a patch supplied by Eric Youngdale */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 2de44cc58b1a..8bd54a64efd6 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -88,9 +88,9 @@ static struct dev_pm_ops sr_pm_ops = {
88}; 88};
89 89
90static struct scsi_driver sr_template = { 90static struct scsi_driver sr_template = {
91 .owner = THIS_MODULE,
92 .gendrv = { 91 .gendrv = {
93 .name = "sr", 92 .name = "sr",
93 .owner = THIS_MODULE,
94 .probe = sr_probe, 94 .probe = sr_probe,
95 .remove = sr_remove, 95 .remove = sr_remove,
96 .pm = &sr_pm_ops, 96 .pm = &sr_pm_ops,
@@ -387,7 +387,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
387 struct request *rq = SCpnt->request; 387 struct request *rq = SCpnt->request;
388 int ret; 388 int ret;
389 389
390 ret = scsi_init_io(SCpnt, GFP_ATOMIC); 390 ret = scsi_init_io(SCpnt);
391 if (ret != BLKPREP_OK) 391 if (ret != BLKPREP_OK)
392 goto out; 392 goto out;
393 SCpnt = rq->special; 393 SCpnt = rq->special;
@@ -549,6 +549,11 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
549 549
550 mutex_lock(&sr_mutex); 550 mutex_lock(&sr_mutex);
551 551
552 ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
553 (mode & FMODE_NDELAY) != 0);
554 if (ret)
555 goto out;
556
552 /* 557 /*
553 * Send SCSI addressing ioctls directly to mid level, send other 558 * Send SCSI addressing ioctls directly to mid level, send other
554 * ioctls to cdrom/block level. 559 * ioctls to cdrom/block level.
@@ -564,16 +569,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
564 if (ret != -ENOSYS) 569 if (ret != -ENOSYS)
565 goto out; 570 goto out;
566 571
567 /*
568 * ENODEV means that we didn't recognise the ioctl, or that we
569 * cannot execute it in the current device state. In either
570 * case fall through to scsi_ioctl, which will return ENDOEV again
571 * if it doesn't recognise the ioctl
572 */
573 ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
574 (mode & FMODE_NDELAY) != 0);
575 if (ret != -ENODEV)
576 goto out;
577 ret = scsi_ioctl(sdev, cmd, argp); 572 ret = scsi_ioctl(sdev, cmd, argp);
578 573
579out: 574out:
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1d1f6f416c59..1de33719ad8e 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -57,8 +57,7 @@ typedef struct scsi_cd {
57} Scsi_CD; 57} Scsi_CD;
58 58
59#define sr_printk(prefix, cd, fmt, a...) \ 59#define sr_printk(prefix, cd, fmt, a...) \
60 sdev_printk(prefix, (cd)->device, "[%s] " fmt, \ 60 sdev_prefix_printk(prefix, (cd)->device, (cd)->cdi.name, fmt, ##a)
61 (cd)->cdi.name, ##a)
62 61
63int sr_do_ioctl(Scsi_CD *, struct packet_command *); 62int sr_do_ioctl(Scsi_CD *, struct packet_command *);
64 63
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 6389fcff12ec..fb929fac22ba 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -246,7 +246,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
246 "CDROM not ready. Make sure there " 246 "CDROM not ready. Make sure there "
247 "is a disc in the drive.\n"); 247 "is a disc in the drive.\n");
248#ifdef DEBUG 248#ifdef DEBUG
249 scsi_print_sense_hdr("sr", &sshdr); 249 scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
250#endif 250#endif
251 err = -ENOMEDIUM; 251 err = -ENOMEDIUM;
252 break; 252 break;
@@ -257,15 +257,15 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
257 /* sense: Invalid command operation code */ 257 /* sense: Invalid command operation code */
258 err = -EDRIVE_CANT_DO_THIS; 258 err = -EDRIVE_CANT_DO_THIS;
259#ifdef DEBUG 259#ifdef DEBUG
260 __scsi_print_command(cgc->cmd); 260 __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
261 scsi_print_sense_hdr("sr", &sshdr); 261 scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
262#endif 262#endif
263 break; 263 break;
264 default: 264 default:
265 sr_printk(KERN_ERR, cd, 265 sr_printk(KERN_ERR, cd,
266 "CDROM (ioctl) error, command: "); 266 "CDROM (ioctl) error, command: ");
267 __scsi_print_command(cgc->cmd); 267 __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE);
268 scsi_print_sense_hdr("sr", &sshdr); 268 scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr);
269 err = -EIO; 269 err = -EIO;
270 } 270 }
271 } 271 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 4daa372ed381..128d3b55bdd9 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -56,7 +56,8 @@ static const char *verstr = "20101219";
56 56
57/* The driver prints some debugging information on the console if DEBUG 57/* The driver prints some debugging information on the console if DEBUG
58 is defined and non-zero. */ 58 is defined and non-zero. */
59#define DEBUG 0 59#define DEBUG 1
60#define NO_DEBUG 0
60 61
61#define ST_DEB_MSG KERN_NOTICE 62#define ST_DEB_MSG KERN_NOTICE
62#if DEBUG 63#if DEBUG
@@ -80,6 +81,7 @@ static int max_sg_segs;
80static int try_direct_io = TRY_DIRECT_IO; 81static int try_direct_io = TRY_DIRECT_IO;
81static int try_rdio = 1; 82static int try_rdio = 1;
82static int try_wdio = 1; 83static int try_wdio = 1;
84static int debug_flag;
83 85
84static struct class st_sysfs_class; 86static struct class st_sysfs_class;
85static const struct attribute_group *st_dev_groups[]; 87static const struct attribute_group *st_dev_groups[];
@@ -100,6 +102,9 @@ module_param_named(max_sg_segs, max_sg_segs, int, 0);
100MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)"); 102MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)");
101module_param_named(try_direct_io, try_direct_io, int, 0); 103module_param_named(try_direct_io, try_direct_io, int, 0);
102MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)"); 104MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)");
105module_param_named(debug_flag, debug_flag, int, 0);
106MODULE_PARM_DESC(debug_flag, "Enable DEBUG, same as setting debugging=1");
107
103 108
104/* Extra parameters for testing */ 109/* Extra parameters for testing */
105module_param_named(try_rdio, try_rdio, int, 0); 110module_param_named(try_rdio, try_rdio, int, 0);
@@ -124,6 +129,9 @@ static struct st_dev_parm {
124 }, 129 },
125 { 130 {
126 "try_direct_io", &try_direct_io 131 "try_direct_io", &try_direct_io
132 },
133 {
134 "debug_flag", &debug_flag
127 } 135 }
128}; 136};
129#endif 137#endif
@@ -194,9 +202,9 @@ static int do_create_sysfs_files(void);
194static void do_remove_sysfs_files(void); 202static void do_remove_sysfs_files(void);
195 203
196static struct scsi_driver st_template = { 204static struct scsi_driver st_template = {
197 .owner = THIS_MODULE,
198 .gendrv = { 205 .gendrv = {
199 .name = "st", 206 .name = "st",
207 .owner = THIS_MODULE,
200 .probe = st_probe, 208 .probe = st_probe,
201 .remove = st_remove, 209 .remove = st_remove,
202 }, 210 },
@@ -306,8 +314,7 @@ static inline char *tape_name(struct scsi_tape *tape)
306} 314}
307 315
308#define st_printk(prefix, t, fmt, a...) \ 316#define st_printk(prefix, t, fmt, a...) \
309 sdev_printk(prefix, (t)->device, "%s: " fmt, \ 317 sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a)
310 tape_name(t), ##a)
311#ifdef DEBUG 318#ifdef DEBUG
312#define DEBC_printk(t, fmt, a...) \ 319#define DEBC_printk(t, fmt, a...) \
313 if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); } 320 if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
@@ -374,7 +381,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
374 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 381 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
375 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 382 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
376 if (cmdstatp->have_sense) 383 if (cmdstatp->have_sense)
377 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 384 __scsi_print_sense(STp->device, name,
385 SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
378 } ) /* end DEB */ 386 } ) /* end DEB */
379 if (!debugging) { /* Abnormal conditions for tape */ 387 if (!debugging) { /* Abnormal conditions for tape */
380 if (!cmdstatp->have_sense) 388 if (!cmdstatp->have_sense)
@@ -390,7 +398,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
390 SRpnt->cmd[0] != MODE_SENSE && 398 SRpnt->cmd[0] != MODE_SENSE &&
391 SRpnt->cmd[0] != TEST_UNIT_READY) { 399 SRpnt->cmd[0] != TEST_UNIT_READY) {
392 400
393 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 401 __scsi_print_sense(STp->device, name,
402 SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
394 } 403 }
395 } 404 }
396 405
@@ -852,17 +861,16 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
852/* Lock or unlock the drive door. Don't use when st_request allocated. */ 861/* Lock or unlock the drive door. Don't use when st_request allocated. */
853static int do_door_lock(struct scsi_tape * STp, int do_lock) 862static int do_door_lock(struct scsi_tape * STp, int do_lock)
854{ 863{
855 int retval, cmd; 864 int retval;
856 865
857 cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK;
858 DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl"); 866 DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl");
859 retval = scsi_ioctl(STp->device, cmd, NULL); 867
860 if (!retval) { 868 retval = scsi_set_medium_removal(STp->device,
869 do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
870 if (!retval)
861 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; 871 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
862 } 872 else
863 else {
864 STp->door_locked = ST_LOCK_FAILS; 873 STp->door_locked = ST_LOCK_FAILS;
865 }
866 return retval; 874 return retval;
867} 875}
868 876
@@ -3367,11 +3375,10 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
3367 * may try and take the device offline, in which case all further 3375 * may try and take the device offline, in which case all further
3368 * access to the device is prohibited. 3376 * access to the device is prohibited.
3369 */ 3377 */
3370 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, 3378 retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
3371 file->f_flags & O_NDELAY); 3379 file->f_flags & O_NDELAY);
3372 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV) 3380 if (retval)
3373 goto out; 3381 goto out;
3374 retval = 0;
3375 3382
3376 cmd_type = _IOC_TYPE(cmd_in); 3383 cmd_type = _IOC_TYPE(cmd_in);
3377 cmd_nr = _IOC_NR(cmd_in); 3384 cmd_nr = _IOC_NR(cmd_in);
@@ -4309,6 +4316,12 @@ static int __init init_st(void)
4309 printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n", 4316 printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
4310 verstr, st_fixed_buffer_size, st_max_sg_segs); 4317 verstr, st_fixed_buffer_size, st_max_sg_segs);
4311 4318
4319 debugging = (debug_flag > 0) ? debug_flag : NO_DEBUG;
4320 if (debugging) {
4321 printk(KERN_INFO "st: Debugging enabled debug_flag = %d\n",
4322 debugging);
4323 }
4324
4312 err = class_register(&st_sysfs_class); 4325 err = class_register(&st_sysfs_class);
4313 if (err) { 4326 if (err) {
4314 pr_err("Unable register sysfs class for SCSI tapes\n"); 4327 pr_err("Unable register sysfs class for SCSI tapes\n");
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 1aa4befcfbd0..98a62bc15069 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -544,33 +544,15 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
544} 544}
545 545
546static int 546static int
547stex_slave_alloc(struct scsi_device *sdev)
548{
549 /* Cheat: usually extracted from Inquiry data */
550 sdev->tagged_supported = 1;
551
552 scsi_activate_tcq(sdev, sdev->host->can_queue);
553
554 return 0;
555}
556
557static int
558stex_slave_config(struct scsi_device *sdev) 547stex_slave_config(struct scsi_device *sdev)
559{ 548{
560 sdev->use_10_for_rw = 1; 549 sdev->use_10_for_rw = 1;
561 sdev->use_10_for_ms = 1; 550 sdev->use_10_for_ms = 1;
562 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 551 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
563 sdev->tagged_supported = 1;
564 552
565 return 0; 553 return 0;
566} 554}
567 555
568static void
569stex_slave_destroy(struct scsi_device *sdev)
570{
571 scsi_deactivate_tcq(sdev, 1);
572}
573
574static int 556static int
575stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 557stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
576{ 558{
@@ -1162,9 +1144,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
1162 int result = SUCCESS; 1144 int result = SUCCESS;
1163 unsigned long flags; 1145 unsigned long flags;
1164 1146
1165 printk(KERN_INFO DRV_NAME 1147 scmd_printk(KERN_INFO, cmd, "aborting command\n");
1166 "(%s): aborting command\n", pci_name(hba->pdev));
1167 scsi_print_command(cmd);
1168 1148
1169 base = hba->mmio_base; 1149 base = hba->mmio_base;
1170 spin_lock_irqsave(host->host_lock, flags); 1150 spin_lock_irqsave(host->host_lock, flags);
@@ -1352,9 +1332,8 @@ static int stex_reset(struct scsi_cmnd *cmd)
1352 1332
1353 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; 1333 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1354 1334
1355 printk(KERN_INFO DRV_NAME 1335 shost_printk(KERN_INFO, cmd->device->host,
1356 "(%s): resetting host\n", pci_name(hba->pdev)); 1336 "resetting host\n");
1357 scsi_print_command(cmd);
1358 1337
1359 return stex_do_reset(hba) ? FAILED : SUCCESS; 1338 return stex_do_reset(hba) ? FAILED : SUCCESS;
1360} 1339}
@@ -1391,12 +1370,11 @@ static struct scsi_host_template driver_template = {
1391 .proc_name = DRV_NAME, 1370 .proc_name = DRV_NAME,
1392 .bios_param = stex_biosparam, 1371 .bios_param = stex_biosparam,
1393 .queuecommand = stex_queuecommand, 1372 .queuecommand = stex_queuecommand,
1394 .slave_alloc = stex_slave_alloc,
1395 .slave_configure = stex_slave_config, 1373 .slave_configure = stex_slave_config,
1396 .slave_destroy = stex_slave_destroy,
1397 .eh_abort_handler = stex_abort, 1374 .eh_abort_handler = stex_abort,
1398 .eh_host_reset_handler = stex_reset, 1375 .eh_host_reset_handler = stex_reset,
1399 .this_id = -1, 1376 .this_id = -1,
1377 .use_blk_tags = 1,
1400}; 1378};
1401 1379
1402static struct pci_device_id stex_pci_tbl[] = { 1380static struct pci_device_id stex_pci_tbl[] = {
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 733e5f759518..e3ba251fb6e7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1097,7 +1097,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
1097 if (scmnd->result) { 1097 if (scmnd->result) {
1098 if (scsi_normalize_sense(scmnd->sense_buffer, 1098 if (scsi_normalize_sense(scmnd->sense_buffer,
1099 SCSI_SENSE_BUFFERSIZE, &sense_hdr)) 1099 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1100 scsi_print_sense_hdr("storvsc", &sense_hdr); 1100 scsi_print_sense_hdr(scmnd->device, "storvsc",
1101 &sense_hdr);
1101 } 1102 }
1102 1103
1103 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) 1104 if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
@@ -1428,8 +1429,7 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
1428 1429
1429static int storvsc_device_configure(struct scsi_device *sdevice) 1430static int storvsc_device_configure(struct scsi_device *sdevice)
1430{ 1431{
1431 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, 1432 scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS);
1432 STORVSC_MAX_IO_REQUESTS);
1433 1433
1434 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); 1434 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1435 1435
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
deleted file mode 100644
index 1a2367a1b1f2..000000000000
--- a/drivers/scsi/sun3_NCR5380.c
+++ /dev/null
@@ -1,2933 +0,0 @@
1/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by
2 Sam Creasey. */
3/*
4 * NCR 5380 generic driver routines. These should make it *trivial*
5 * to implement 5380 SCSI drivers under Linux with a non-trantor
6 * architecture.
7 *
8 * Note that these routines also work with NR53c400 family chips.
9 *
10 * Copyright 1993, Drew Eckhardt
11 * Visionary Computing
12 * (Unix and Linux consulting and custom programming)
13 * drew@colorado.edu
14 * +1 (303) 666-5836
15 *
16 * DISTRIBUTION RELEASE 6.
17 *
18 * For more information, please consult
19 *
20 * NCR 5380 Family
21 * SCSI Protocol Controller
22 * Databook
23 *
24 * NCR Microelectronics
25 * 1635 Aeroplaza Drive
26 * Colorado Springs, CO 80916
27 * 1+ (719) 578-3400
28 * 1+ (800) 334-5454
29 */
30
31/*
32 * ++roman: To port the 5380 driver to the Atari, I had to do some changes in
33 * this file, too:
34 *
35 * - Some of the debug statements were incorrect (undefined variables and the
36 * like). I fixed that.
37 *
38 * - In information_transfer(), I think a #ifdef was wrong. Looking at the
39 * possible DMA transfer size should also happen for REAL_DMA. I added this
40 * in the #if statement.
41 *
42 * - When using real DMA, information_transfer() should return in a DATAOUT
43 * phase after starting the DMA. It has nothing more to do.
44 *
45 * - The interrupt service routine should run main after end of DMA, too (not
46 * only after RESELECTION interrupts). Additionally, it should _not_ test
47 * for more interrupts after running main, since a DMA process may have
48 * been started and interrupts are turned on now. The new int could happen
49 * inside the execution of NCR5380_intr(), leading to recursive
50 * calls.
51 *
52 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
53 * and USLEEP, because these were messing up readability and will never be
54 * needed for Atari SCSI.
55 *
56 * - I've revised the NCR5380_main() calling scheme (relax the 'main_running'
57 * stuff), and 'main' is executed in a bottom half if awoken by an
58 * interrupt.
59 *
60 * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..."
61 * constructs. In my eyes, this made the source rather unreadable, so I
62 * finally replaced that by the *_PRINTK() macros.
63 *
64 */
65#include <scsi/scsi_dbg.h>
66#include <scsi/scsi_transport_spi.h>
67
68/*
69 * Further development / testing that should be done :
70 * 1. Test linked command handling code after Eric is ready with
71 * the high level code.
72 */
73
74#if (NDEBUG & NDEBUG_LISTS)
75#define LIST(x,y) \
76 { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \
77 if ((x)==(y)) udelay(5); }
78#define REMOVE(w,x,y,z) \
79 { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \
80 (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \
81 if ((x)==(y)) udelay(5); }
82#else
83#define LIST(x,y)
84#define REMOVE(w,x,y,z)
85#endif
86
87#ifndef notyet
88#undef LINKED
89#endif
90
91/*
92 * Design
93 * Issues :
94 *
95 * The other Linux SCSI drivers were written when Linux was Intel PC-only,
96 * and specifically for each board rather than each chip. This makes their
97 * adaptation to platforms like the Mac (Some of which use NCR5380's)
98 * more difficult than it has to be.
99 *
100 * Also, many of the SCSI drivers were written before the command queuing
101 * routines were implemented, meaning their implementations of queued
102 * commands were hacked on rather than designed in from the start.
103 *
104 * When I designed the Linux SCSI drivers I figured that
105 * while having two different SCSI boards in a system might be useful
106 * for debugging things, two of the same type wouldn't be used.
107 * Well, I was wrong and a number of users have mailed me about running
108 * multiple high-performance SCSI boards in a server.
109 *
110 * Finally, when I get questions from users, I have no idea what
111 * revision of my driver they are running.
112 *
113 * This driver attempts to address these problems :
114 * This is a generic 5380 driver. To use it on a different platform,
115 * one simply writes appropriate system specific macros (ie, data
116 * transfer - some PC's will use the I/O bus, 68K's must use
117 * memory mapped) and drops this file in their 'C' wrapper.
118 *
119 * As far as command queueing, two queues are maintained for
120 * each 5380 in the system - commands that haven't been issued yet,
121 * and commands that are currently executing. This means that an
122 * unlimited number of commands may be queued, letting
123 * more commands propagate from the higher driver levels giving higher
124 * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
125 * allowing multiple commands to propagate all the way to a SCSI-II device
126 * while a command is already executing.
127 *
128 * To solve the multiple-boards-in-the-same-system problem,
129 * there is a separate instance structure for each instance
130 * of a 5380 in the system. So, multiple NCR5380 drivers will
131 * be able to coexist with appropriate changes to the high level
132 * SCSI code.
133 *
134 * A NCR5380_PUBLIC_REVISION macro is provided, with the release
135 * number (updated for each public release) printed by the
136 * NCR5380_print_options command, which should be called from the
137 * wrapper detect function, so that I know what release of the driver
138 * users are using.
139 *
140 * Issues specific to the NCR5380 :
141 *
142 * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
143 * piece of hardware that requires you to sit in a loop polling for
144 * the REQ signal as long as you are connected. Some devices are
145 * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
146 * while doing long seek operations.
147 *
148 * The workaround for this is to keep track of devices that have
149 * disconnected. If the device hasn't disconnected, for commands that
150 * should disconnect, we do something like
151 *
152 * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
153 *
154 * Some tweaking of N and M needs to be done. An algorithm based
155 * on "time to data" would give the best results as long as short time
156 * to datas (ie, on the same track) were considered, however these
157 * broken devices are the exception rather than the rule and I'd rather
158 * spend my time optimizing for the normal case.
159 *
160 * Architecture :
161 *
162 * At the heart of the design is a coroutine, NCR5380_main,
163 * which is started when not running by the interrupt handler,
164 * timer, and queue command function. It attempts to establish
165 * I_T_L or I_T_L_Q nexuses by removing the commands from the
166 * issue queue and calling NCR5380_select() if a nexus
167 * is not established.
168 *
169 * Once a nexus is established, the NCR5380_information_transfer()
170 * phase goes through the various phases as instructed by the target.
171 * if the target goes into MSG IN and sends a DISCONNECT message,
172 * the command structure is placed into the per instance disconnected
173 * queue, and NCR5380_main tries to find more work. If USLEEP
174 * was defined, and the target is idle for too long, the system
175 * will try to sleep.
176 *
177 * If a command has disconnected, eventually an interrupt will trigger,
178 * calling NCR5380_intr() which will in turn call NCR5380_reselect
179 * to reestablish a nexus. This will run main if necessary.
180 *
181 * On command termination, the done function will be called as
182 * appropriate.
183 *
184 * SCSI pointers are maintained in the SCp field of SCSI command
185 * structures, being initialized after the command is connected
186 * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
187 * Note that in violation of the standard, an implicit SAVE POINTERS operation
188 * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
189 */
190
191/*
192 * Using this file :
193 * This file a skeleton Linux SCSI driver for the NCR 5380 series
194 * of chips. To use it, you write an architecture specific functions
195 * and macros and include this file in your driver.
196 *
197 * These macros control options :
198 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
199 * for commands that return with a CHECK CONDITION status.
200 *
201 * LINKED - if defined, linked commands are supported.
202 *
203 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
204 *
205 * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
206 *
207 * These macros MUST be defined :
208 *
209 * NCR5380_read(register) - read from the specified register
210 *
211 * NCR5380_write(register, value) - write to the specific register
212 *
213 * Either real DMA *or* pseudo DMA may be implemented
214 * REAL functions :
215 * NCR5380_REAL_DMA should be defined if real DMA is to be used.
216 * Note that the DMA setup functions should return the number of bytes
217 * that they were able to program the controller for.
218 *
219 * Also note that generic i386/PC versions of these macros are
220 * available as NCR5380_i386_dma_write_setup,
221 * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
222 *
223 * NCR5380_dma_write_setup(instance, src, count) - initialize
224 * NCR5380_dma_read_setup(instance, dst, count) - initialize
225 * NCR5380_dma_residual(instance); - residual count
226 *
227 * PSEUDO functions :
228 * NCR5380_pwrite(instance, src, count)
229 * NCR5380_pread(instance, dst, count);
230 *
231 * If nothing specific to this implementation needs doing (ie, with external
232 * hardware), you must also define
233 *
234 * NCR5380_queue_command
235 * NCR5380_reset
236 * NCR5380_abort
237 * NCR5380_proc_info
238 *
239 * to be the global entry points into the specific driver, ie
240 * #define NCR5380_queue_command t128_queue_command.
241 *
242 * If this is not done, the routines will be defined as static functions
243 * with the NCR5380* names and the user must provide a globally
244 * accessible wrapper function.
245 *
246 * The generic driver is initialized by calling NCR5380_init(instance),
247 * after setting the appropriate host specific fields and ID. If the
248 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
249 * possible) function may be used. Before the specific driver initialization
250 * code finishes, NCR5380_print_options should be called.
251 */
252
253static struct Scsi_Host *first_instance = NULL;
254static struct scsi_host_template *the_template = NULL;
255
256/* Macros ease life... :-) */
257#define SETUP_HOSTDATA(in) \
258 struct NCR5380_hostdata *hostdata = \
259 (struct NCR5380_hostdata *)(in)->hostdata
260#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
261
262#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
263#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next))
264#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble))
265
266#define HOSTNO instance->host_no
267#define H_NO(cmd) (cmd)->device->host->host_no
268
269#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer)))))
270
271#ifdef SUPPORT_TAGS
272
273/*
274 * Functions for handling tagged queuing
275 * =====================================
276 *
277 * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes:
278 *
279 * Using consecutive numbers for the tags is no good idea in my eyes. There
280 * could be wrong re-usings if the counter (8 bit!) wraps and some early
281 * command has been preempted for a long time. My solution: a bitfield for
282 * remembering used tags.
283 *
284 * There's also the problem that each target has a certain queue size, but we
285 * cannot know it in advance :-( We just see a QUEUE_FULL status being
286 * returned. So, in this case, the driver internal queue size assumption is
287 * reduced to the number of active tags if QUEUE_FULL is returned by the
288 * target. The command is returned to the mid-level, but with status changed
289 * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL
290 * correctly.
291 *
292 * We're also not allowed running tagged commands as long as an untagged
293 * command is active. And REQUEST SENSE commands after a contingent allegiance
294 * condition _must_ be untagged. To keep track whether an untagged command has
295 * been issued, the host->busy array is still employed, as it is without
296 * support for tagged queuing.
297 *
298 * One could suspect that there are possible race conditions between
299 * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the
300 * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(),
301 * which already guaranteed to be running at most once. It is also the only
302 * place where tags/LUNs are allocated. So no other allocation can slip
303 * between that pair, there could only happen a reselection, which can free a
304 * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes
305 * important: the tag bit must be cleared before 'nr_allocated' is decreased.
306 */
307
308/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */
309#undef TAG_NONE
310#define TAG_NONE 0xff
311
312/* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */
313#if (MAX_TAGS % 32) != 0
314#error "MAX_TAGS must be a multiple of 32!"
315#endif
316
317typedef struct {
318 char allocated[MAX_TAGS/8];
319 int nr_allocated;
320 int queue_size;
321} TAG_ALLOC;
322
323static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */
324
325
326static void __init init_tags( void )
327{
328 int target, lun;
329 TAG_ALLOC *ta;
330
331 if (!setup_use_tagged_queuing)
332 return;
333
334 for( target = 0; target < 8; ++target ) {
335 for( lun = 0; lun < 8; ++lun ) {
336 ta = &TagAlloc[target][lun];
337 memset( &ta->allocated, 0, MAX_TAGS/8 );
338 ta->nr_allocated = 0;
339 /* At the beginning, assume the maximum queue size we could
340 * support (MAX_TAGS). This value will be decreased if the target
341 * returns QUEUE_FULL status.
342 */
343 ta->queue_size = MAX_TAGS;
344 }
345 }
346}
347
348
349/* Check if we can issue a command to this LUN: First see if the LUN is marked
350 * busy by an untagged command. If the command should use tagged queuing, also
351 * check that there is a free tag and the target's queue won't overflow. This
352 * function should be called with interrupts disabled to avoid race
353 * conditions.
354 */
355
356static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
357{
358 u8 lun = cmd->device->lun;
359 SETUP_HOSTDATA(cmd->device->host);
360
361 if (hostdata->busy[cmd->device->id] & (1 << lun))
362 return( 1 );
363 if (!should_be_tagged ||
364 !setup_use_tagged_queuing || !cmd->device->tagged_supported)
365 return( 0 );
366 if (TagAlloc[cmd->device->id][lun].nr_allocated >=
367 TagAlloc[cmd->device->id][lun].queue_size ) {
368 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
369 H_NO(cmd), cmd->device->id, lun );
370 return( 1 );
371 }
372 return( 0 );
373}
374
375
376/* Allocate a tag for a command (there are no checks anymore, check_lun_busy()
377 * must be called before!), or reserve the LUN in 'busy' if the command is
378 * untagged.
379 */
380
381static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
382{
383 u8 lun = cmd->device->lun;
384 SETUP_HOSTDATA(cmd->device->host);
385
386 /* If we or the target don't support tagged queuing, allocate the LUN for
387 * an untagged command.
388 */
389 if (!should_be_tagged ||
390 !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
391 cmd->tag = TAG_NONE;
392 hostdata->busy[cmd->device->id] |= (1 << lun);
393 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
394 "command\n", H_NO(cmd), cmd->device->id, lun );
395 }
396 else {
397 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
398
399 cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
400 set_bit( cmd->tag, &ta->allocated );
401 ta->nr_allocated++;
402 dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
403 "(now %d tags in use)\n",
404 H_NO(cmd), cmd->tag, cmd->device->id, lun,
405 ta->nr_allocated );
406 }
407}
408
409
410/* Mark the tag of command 'cmd' as free, or in case of an untagged command,
411 * unlock the LUN.
412 */
413
414static void cmd_free_tag(struct scsi_cmnd *cmd)
415{
416 u8 lun = cmd->device->lun;
417 SETUP_HOSTDATA(cmd->device->host);
418
419 if (cmd->tag == TAG_NONE) {
420 hostdata->busy[cmd->device->id] &= ~(1 << lun);
421 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
422 H_NO(cmd), cmd->device->id, lun );
423 }
424 else if (cmd->tag >= MAX_TAGS) {
425 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
426 H_NO(cmd), cmd->tag );
427 }
428 else {
429 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
430 clear_bit( cmd->tag, &ta->allocated );
431 ta->nr_allocated--;
432 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
433 H_NO(cmd), cmd->tag, cmd->device->id, lun );
434 }
435}
436
437
438static void free_all_tags( void )
439{
440 int target, lun;
441 TAG_ALLOC *ta;
442
443 if (!setup_use_tagged_queuing)
444 return;
445
446 for( target = 0; target < 8; ++target ) {
447 for( lun = 0; lun < 8; ++lun ) {
448 ta = &TagAlloc[target][lun];
449 memset( &ta->allocated, 0, MAX_TAGS/8 );
450 ta->nr_allocated = 0;
451 }
452 }
453}
454
455#endif /* SUPPORT_TAGS */
456
457
458/*
459 * Function : void initialize_SCp(struct scsi_cmnd *cmd)
460 *
461 * Purpose : initialize the saved data pointers for cmd to point to the
462 * start of the buffer.
463 *
464 * Inputs : cmd - struct scsi_cmnd structure to have pointers reset.
465 */
466
467static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
468{
469 /*
470 * Initialize the Scsi Pointer field so that all of the commands in the
471 * various queues are valid.
472 */
473
474 if (scsi_bufflen(cmd)) {
475 cmd->SCp.buffer = scsi_sglist(cmd);
476 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
477 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
478 cmd->SCp.this_residual = cmd->SCp.buffer->length;
479 } else {
480 cmd->SCp.buffer = NULL;
481 cmd->SCp.buffers_residual = 0;
482 cmd->SCp.ptr = NULL;
483 cmd->SCp.this_residual = 0;
484 }
485
486}
487
488#include <linux/delay.h>
489
490#if NDEBUG
491static struct {
492 unsigned char mask;
493 const char * name;}
494signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
495 { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
496 { SR_SEL, "SEL" }, {0, NULL}},
497basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}},
498icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
499 {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
500 {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
501 {0, NULL}},
502mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
503 {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
504 "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"},
505 {MR_MONITOR_BSY, "MODE MONITOR BSY"},
506 {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
507 {0, NULL}};
508
509/*
510 * Function : void NCR5380_print(struct Scsi_Host *instance)
511 *
512 * Purpose : print the SCSI bus signals for debugging purposes
513 *
514 * Input : instance - which NCR5380
515 */
516
517static void NCR5380_print(struct Scsi_Host *instance) {
518 unsigned char status, data, basr, mr, icr, i;
519 unsigned long flags;
520
521 local_irq_save(flags);
522 data = NCR5380_read(CURRENT_SCSI_DATA_REG);
523 status = NCR5380_read(STATUS_REG);
524 mr = NCR5380_read(MODE_REG);
525 icr = NCR5380_read(INITIATOR_COMMAND_REG);
526 basr = NCR5380_read(BUS_AND_STATUS_REG);
527 local_irq_restore(flags);
528 printk("STATUS_REG: %02x ", status);
529 for (i = 0; signals[i].mask ; ++i)
530 if (status & signals[i].mask)
531 printk(",%s", signals[i].name);
532 printk("\nBASR: %02x ", basr);
533 for (i = 0; basrs[i].mask ; ++i)
534 if (basr & basrs[i].mask)
535 printk(",%s", basrs[i].name);
536 printk("\nICR: %02x ", icr);
537 for (i = 0; icrs[i].mask; ++i)
538 if (icr & icrs[i].mask)
539 printk(",%s", icrs[i].name);
540 printk("\nMODE: %02x ", mr);
541 for (i = 0; mrs[i].mask; ++i)
542 if (mr & mrs[i].mask)
543 printk(",%s", mrs[i].name);
544 printk("\n");
545}
546
547static struct {
548 unsigned char value;
549 const char *name;
550} phases[] = {
551 {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
552 {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
553 {PHASE_UNKNOWN, "UNKNOWN"}};
554
555/*
556 * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
557 *
558 * Purpose : print the current SCSI phase for debugging purposes
559 *
560 * Input : instance - which NCR5380
561 */
562
563static void NCR5380_print_phase(struct Scsi_Host *instance)
564{
565 unsigned char status;
566 int i;
567
568 status = NCR5380_read(STATUS_REG);
569 if (!(status & SR_REQ))
570 printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO);
571 else {
572 for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
573 (phases[i].value != (status & PHASE_MASK)); ++i);
574 printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name);
575 }
576}
577
578#endif
579
580/*
581 * ++roman: New scheme of calling NCR5380_main()
582 *
583 * If we're not in an interrupt, we can call our main directly, it cannot be
584 * already running. Else, we queue it on a task queue, if not 'main_running'
585 * tells us that a lower level is already executing it. This way,
586 * 'main_running' needs not be protected in a special way.
587 *
588 * queue_main() is a utility function for putting our main onto the task
589 * queue, if main_running is false. It should be called only from a
590 * interrupt or bottom half.
591 */
592
593#include <linux/gfp.h>
594#include <linux/workqueue.h>
595#include <linux/interrupt.h>
596
597static volatile int main_running = 0;
598static DECLARE_WORK(NCR5380_tqueue, NCR5380_main);
599
600static __inline__ void queue_main(void)
601{
602 if (!main_running) {
603 /* If in interrupt and NCR5380_main() not already running,
604 queue it on the 'immediate' task queue, to be processed
605 immediately after the current interrupt processing has
606 finished. */
607 schedule_work(&NCR5380_tqueue);
608 }
609 /* else: nothing to do: the running NCR5380_main() will pick up
610 any newly queued command. */
611}
612
613
614static inline void NCR5380_all_init (void)
615{
616 static int done = 0;
617 if (!done) {
618 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
619 done = 1;
620 }
621}
622
623
624/*
625 * Function : void NCR58380_print_options (struct Scsi_Host *instance)
626 *
627 * Purpose : called by probe code indicating the NCR5380 driver
628 * options that were selected.
629 *
630 * Inputs : instance, pointer to this instance. Unused.
631 */
632
633static void __init NCR5380_print_options (struct Scsi_Host *instance)
634{
635 printk(" generic options"
636#ifdef AUTOSENSE
637 " AUTOSENSE"
638#endif
639#ifdef REAL_DMA
640 " REAL DMA"
641#endif
642#ifdef PARITY
643 " PARITY"
644#endif
645#ifdef SUPPORT_TAGS
646 " SCSI-2 TAGGED QUEUING"
647#endif
648 );
649 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
650}
651
652/*
653 * Function : void NCR5380_print_status (struct Scsi_Host *instance)
654 *
655 * Purpose : print commands in the various queues, called from
656 * NCR5380_abort and NCR5380_debug to aid debugging.
657 *
658 * Inputs : instance, pointer to this instance.
659 */
660
661static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
662{
663 int i, s;
664 unsigned char *command;
665 printk("scsi%d: destination target %d, lun %llu\n",
666 H_NO(cmd), cmd->device->id, cmd->device->lun);
667 printk(KERN_CONT " command = ");
668 command = cmd->cmnd;
669 printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]);
670 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
671 printk(KERN_CONT " %02x", command[i]);
672 printk("\n");
673}
674
675static void NCR5380_print_status(struct Scsi_Host *instance)
676{
677 struct NCR5380_hostdata *hostdata;
678 Scsi_Cmnd *ptr;
679 unsigned long flags;
680
681 NCR5380_dprint(NDEBUG_ANY, instance);
682 NCR5380_dprint_phase(NDEBUG_ANY, instance);
683
684 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
685
686 printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
687 local_irq_save(flags);
688 printk("NCR5380: coroutine is%s running.\n",
689 main_running ? "" : "n't");
690 if (!hostdata->connected)
691 printk("scsi%d: no currently connected command\n", HOSTNO);
692 else
693 lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected);
694 printk("scsi%d: issue_queue\n", HOSTNO);
695 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
696 lprint_Scsi_Cmnd(ptr);
697
698 printk("scsi%d: disconnected_queue\n", HOSTNO);
699 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
700 ptr = NEXT(ptr))
701 lprint_Scsi_Cmnd(ptr);
702
703 local_irq_restore(flags);
704 printk("\n");
705}
706
707static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
708{
709 int i, s;
710 unsigned char *command;
711 seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
712 H_NO(cmd), cmd->device->id, cmd->device->lun);
713 seq_printf(m, " command = ");
714 command = cmd->cmnd;
715 seq_printf(m, "%2d (0x%02x)", command[0], command[0]);
716 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
717 seq_printf(m, " %02x", command[i]);
718 seq_printf(m, "\n");
719}
720
721static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance)
722{
723 struct NCR5380_hostdata *hostdata;
724 Scsi_Cmnd *ptr;
725 unsigned long flags;
726
727 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
728
729 seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
730 local_irq_save(flags);
731 seq_printf(m, "NCR5380: coroutine is%s running.\n",
732 main_running ? "" : "n't");
733 if (!hostdata->connected)
734 seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO);
735 else
736 show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m);
737 seq_printf(m, "scsi%d: issue_queue\n", HOSTNO);
738 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr))
739 show_Scsi_Cmnd(ptr, m);
740
741 seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO);
742 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
743 ptr = NEXT(ptr))
744 show_Scsi_Cmnd(ptr, m);
745
746 local_irq_restore(flags);
747 return 0;
748}
749
750/*
751 * Function : void NCR5380_init (struct Scsi_Host *instance)
752 *
753 * Purpose : initializes *instance and corresponding 5380 chip.
754 *
755 * Inputs : instance - instantiation of the 5380 driver.
756 *
757 * Notes : I assume that the host, hostno, and id bits have been
758 * set correctly. I don't care about the irq and other fields.
759 *
760 */
761
762static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
763{
764 int i;
765 SETUP_HOSTDATA(instance);
766
767 NCR5380_all_init();
768
769 hostdata->aborted = 0;
770 hostdata->id_mask = 1 << instance->this_id;
771 hostdata->id_higher_mask = 0;
772 for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
773 if (i > hostdata->id_mask)
774 hostdata->id_higher_mask |= i;
775 for (i = 0; i < 8; ++i)
776 hostdata->busy[i] = 0;
777#ifdef SUPPORT_TAGS
778 init_tags();
779#endif
780#if defined (REAL_DMA)
781 hostdata->dma_len = 0;
782#endif
783 hostdata->targets_present = 0;
784 hostdata->connected = NULL;
785 hostdata->issue_queue = NULL;
786 hostdata->disconnected_queue = NULL;
787 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT;
788
789 if (!the_template) {
790 the_template = instance->hostt;
791 first_instance = instance;
792 }
793
794
795#ifndef AUTOSENSE
796 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1))
797 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n"
798 " without AUTOSENSE option, contingent allegiance conditions may\n"
799 " be incorrectly cleared.\n", HOSTNO);
800#endif /* def AUTOSENSE */
801
802 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
803 NCR5380_write(MODE_REG, MR_BASE);
804 NCR5380_write(TARGET_COMMAND_REG, 0);
805 NCR5380_write(SELECT_ENABLE_REG, 0);
806
807 return 0;
808}
809
810static void NCR5380_exit(struct Scsi_Host *instance)
811{
812 /* Empty, as we didn't schedule any delayed work */
813}
814
815/*
816 * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd,
817 * void (*done)(struct scsi_cmnd *))
818 *
819 * Purpose : enqueues a SCSI command
820 *
821 * Inputs : cmd - SCSI command, done - function called on completion, with
822 * a pointer to the command descriptor.
823 *
824 * Returns : 0
825 *
826 * Side effects :
827 * cmd is added to the per instance issue_queue, with minor
828 * twiddling done to the host specific fields of cmd. If the
829 * main coroutine is not running, it is restarted.
830 *
831 */
832
833/* Only make static if a wrapper function is used */
834static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
835 void (*done)(struct scsi_cmnd *))
836{
837 SETUP_HOSTDATA(cmd->device->host);
838 struct scsi_cmnd *tmp;
839 unsigned long flags;
840
841#if (NDEBUG & NDEBUG_NO_WRITE)
842 switch (cmd->cmnd[0]) {
843 case WRITE_6:
844 case WRITE_10:
845 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
846 H_NO(cmd));
847 cmd->result = (DID_ERROR << 16);
848 done(cmd);
849 return 0;
850 }
851#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
852
853
854#ifdef NCR5380_STATS
855# if 0
856 if (!hostdata->connected && !hostdata->issue_queue &&
857 !hostdata->disconnected_queue) {
858 hostdata->timebase = jiffies;
859 }
860# endif
861# ifdef NCR5380_STAT_LIMIT
862 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
863# endif
864 switch (cmd->cmnd[0])
865 {
866 case WRITE:
867 case WRITE_6:
868 case WRITE_10:
869 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
870 hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
871 hostdata->pendingw++;
872 break;
873 case READ:
874 case READ_6:
875 case READ_10:
876 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
877 hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
878 hostdata->pendingr++;
879 break;
880 }
881#endif
882
883 /*
884 * We use the host_scribble field as a pointer to the next command
885 * in a queue
886 */
887
888 SET_NEXT(cmd, NULL);
889 cmd->scsi_done = done;
890
891 cmd->result = 0;
892
893
894 /*
895 * Insert the cmd into the issue queue. Note that REQUEST SENSE
896 * commands are added to the head of the queue since any command will
897 * clear the contingent allegiance condition that exists and the
898 * sense data is only guaranteed to be valid while the condition exists.
899 */
900
901 local_irq_save(flags);
902 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
903 * Otherwise a running NCR5380_main may steal the lock.
904 * Lock before actually inserting due to fairness reasons explained in
905 * atari_scsi.c. If we insert first, then it's impossible for this driver
906 * to release the lock.
907 * Stop timer for this command while waiting for the lock, or timeouts
908 * may happen (and they really do), and it's no good if the command doesn't
909 * appear in any of the queues.
910 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
911 * because also a timer int can trigger an abort or reset, which would
912 * alter queues and touch the lock.
913 */
914 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
915 LIST(cmd, hostdata->issue_queue);
916 SET_NEXT(cmd, hostdata->issue_queue);
917 hostdata->issue_queue = cmd;
918 } else {
919 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
920 NEXT(tmp); tmp = NEXT(tmp))
921 ;
922 LIST(cmd, tmp);
923 SET_NEXT(tmp, cmd);
924 }
925
926 local_irq_restore(flags);
927
928 dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
929 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
930
931 /* If queue_command() is called from an interrupt (real one or bottom
932 * half), we let queue_main() do the job of taking care about main. If it
933 * is already running, this is a no-op, else main will be queued.
934 *
935 * If we're not in an interrupt, we can call NCR5380_main()
936 * unconditionally, because it cannot be already running.
937 */
938 if (in_interrupt() || ((flags >> 8) & 7) >= 6)
939 queue_main();
940 else
941 NCR5380_main(NULL);
942 return 0;
943}
944
945static DEF_SCSI_QCMD(NCR5380_queue_command)
946
947/*
948 * Function : NCR5380_main (void)
949 *
950 * Purpose : NCR5380_main is a coroutine that runs as long as more work can
951 * be done on the NCR5380 host adapters in a system. Both
952 * NCR5380_queue_command() and NCR5380_intr() will try to start it
953 * in case it is not running.
954 *
955 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
956 * reenable them. This prevents reentrancy and kernel stack overflow.
957 */
958
959static void NCR5380_main (struct work_struct *bl)
960{
961 struct scsi_cmnd *tmp, *prev;
962 struct Scsi_Host *instance = first_instance;
963 struct NCR5380_hostdata *hostdata = HOSTDATA(instance);
964 int done;
965 unsigned long flags;
966
967 /*
968 * We run (with interrupts disabled) until we're sure that none of
969 * the host adapters have anything that can be done, at which point
970 * we set main_running to 0 and exit.
971 *
972 * Interrupts are enabled before doing various other internal
973 * instructions, after we've decided that we need to run through
974 * the loop again.
975 *
976 * this should prevent any race conditions.
977 *
978 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
979 * because also a timer int can trigger an abort or reset, which can
980 * alter queues and touch the Falcon lock.
981 */
982
983 /* Tell int handlers main() is now already executing. Note that
984 no races are possible here. If an int comes in before
985 'main_running' is set here, and queues/executes main via the
986 task queue, it doesn't do any harm, just this instance of main
987 won't find any work left to do. */
988 if (main_running)
989 return;
990 main_running = 1;
991
992 local_save_flags(flags);
993 do {
994 local_irq_disable(); /* Freeze request queues */
995 done = 1;
996
997 if (!hostdata->connected) {
998 dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO );
999 /*
1000 * Search through the issue_queue for a command destined
1001 * for a target that's not busy.
1002 */
1003#if (NDEBUG & NDEBUG_LISTS)
1004 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL;
1005 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
1006 ;
1007 if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/
1008#endif
1009 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
1010 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
1011
1012 if (prev != tmp)
1013 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
1014 /* When we find one, remove it from the issue queue. */
1015 /* ++guenther: possible race with Falcon locking */
1016 if (
1017#ifdef SUPPORT_TAGS
1018 !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE)
1019#else
1020 !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))
1021#endif
1022 ) {
1023 /* ++guenther: just to be sure, this must be atomic */
1024 local_irq_disable();
1025 if (prev) {
1026 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
1027 SET_NEXT(prev, NEXT(tmp));
1028 } else {
1029 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
1030 hostdata->issue_queue = NEXT(tmp);
1031 }
1032 SET_NEXT(tmp, NULL);
1033
1034 /* reenable interrupts after finding one */
1035 local_irq_restore(flags);
1036
1037 /*
1038 * Attempt to establish an I_T_L nexus here.
1039 * On success, instance->hostdata->connected is set.
1040 * On failure, we must add the command back to the
1041 * issue queue so we can keep trying.
1042 */
1043 dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
1044 "lun %llu removed from issue_queue\n",
1045 HOSTNO, tmp->device->id, tmp->device->lun);
1046 /*
1047 * REQUEST SENSE commands are issued without tagged
1048 * queueing, even on SCSI-II devices because the
1049 * contingent allegiance condition exists for the
1050 * entire unit.
1051 */
1052 /* ++roman: ...and the standard also requires that
1053 * REQUEST SENSE command are untagged.
1054 */
1055
1056#ifdef SUPPORT_TAGS
1057 cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE );
1058#endif
1059 if (!NCR5380_select(instance, tmp,
1060 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
1061 TAG_NEXT)) {
1062 break;
1063 } else {
1064 local_irq_disable();
1065 LIST(tmp, hostdata->issue_queue);
1066 SET_NEXT(tmp, hostdata->issue_queue);
1067 hostdata->issue_queue = tmp;
1068#ifdef SUPPORT_TAGS
1069 cmd_free_tag( tmp );
1070#endif
1071 local_irq_restore(flags);
1072 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1073 "returned to issue_queue\n", HOSTNO);
1074 if (hostdata->connected)
1075 break;
1076 }
1077 } /* if target/lun/target queue is not busy */
1078 } /* for issue_queue */
1079 } /* if (!hostdata->connected) */
1080 if (hostdata->connected
1081#ifdef REAL_DMA
1082 && !hostdata->dma_len
1083#endif
1084 ) {
1085 local_irq_restore(flags);
1086 dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
1087 HOSTNO);
1088 NCR5380_information_transfer(instance);
1089 dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
1090 done = 0;
1091 }
1092 } while (!done);
1093
1094 /* Better allow ints _after_ 'main_running' has been cleared, else
1095 an interrupt could believe we'll pick up the work it left for
1096 us, but we won't see it anymore here... */
1097 main_running = 0;
1098 local_irq_restore(flags);
1099}
1100
1101
1102#ifdef REAL_DMA
1103/*
1104 * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
1105 *
1106 * Purpose : Called by interrupt handler when DMA finishes or a phase
1107 * mismatch occurs (which would finish the DMA transfer).
1108 *
1109 * Inputs : instance - this instance of the NCR5380.
1110 *
1111 */
1112
1113static void NCR5380_dma_complete( struct Scsi_Host *instance )
1114{
1115 SETUP_HOSTDATA(instance);
1116 int transfered;
1117 unsigned char **data;
1118 volatile int *count;
1119
1120 if (!hostdata->connected) {
1121 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
1122 "no connected cmd\n", HOSTNO);
1123 return;
1124 }
1125
1126 dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
1127 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1128 NCR5380_read(STATUS_REG));
1129
1130 if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
1131 printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO);
1132 printk("please e-mail sammy@sammy.net with a description of how this\n");
1133 printk("error was produced.\n");
1134 BUG();
1135 }
1136
1137 /* make sure we're not stuck in a data phase */
1138 if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH |
1139 BASR_ACK)) ==
1140 (BASR_PHASE_MATCH | BASR_ACK)) {
1141 printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG));
1142 printk("scsi%d: bus stuck in data phase -- probably a single byte "
1143 "overrun!\n", HOSTNO);
1144 printk("not prepared for this error!\n");
1145 printk("please e-mail sammy@sammy.net with a description of how this\n");
1146 printk("error was produced.\n");
1147 BUG();
1148 }
1149
1150
1151
1152 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1153 NCR5380_write(MODE_REG, MR_BASE);
1154 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1155
1156 transfered = hostdata->dma_len - NCR5380_dma_residual(instance);
1157 hostdata->dma_len = 0;
1158
1159 data = (unsigned char **) &(hostdata->connected->SCp.ptr);
1160 count = &(hostdata->connected->SCp.this_residual);
1161 *data += transfered;
1162 *count -= transfered;
1163
1164}
1165#endif /* REAL_DMA */
1166
1167
1168/*
1169 * Function : void NCR5380_intr (int irq)
1170 *
1171 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
1172 * from the disconnected queue, and restarting NCR5380_main()
1173 * as required.
1174 *
1175 * Inputs : int irq, irq that caused this interrupt.
1176 *
1177 */
1178
1179static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1180{
1181 struct Scsi_Host *instance = first_instance;
1182 int done = 1, handled = 0;
1183 unsigned char basr;
1184
1185 dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
1186
1187 /* Look for pending interrupts */
1188 basr = NCR5380_read(BUS_AND_STATUS_REG);
1189 dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
1190 /* dispatch to appropriate routine if found and done=0 */
1191 if (basr & BASR_IRQ) {
1192 NCR5380_dprint(NDEBUG_INTR, instance);
1193 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1194 done = 0;
1195// ENABLE_IRQ();
1196 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1197 NCR5380_reselect(instance);
1198 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1199 }
1200 else if (basr & BASR_PARITY_ERROR) {
1201 dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
1202 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1203 }
1204 else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1205 dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
1206 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1207 }
1208 else {
1209 /*
1210 * The rest of the interrupt conditions can occur only during a
1211 * DMA transfer
1212 */
1213
1214#if defined(REAL_DMA)
1215 /*
1216 * We should only get PHASE MISMATCH and EOP interrupts if we have
1217 * DMA enabled, so do a sanity check based on the current setting
1218 * of the MODE register.
1219 */
1220
1221 if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) &&
1222 ((basr & BASR_END_DMA_TRANSFER) ||
1223 !(basr & BASR_PHASE_MATCH))) {
1224
1225 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1226 NCR5380_dma_complete( instance );
1227 done = 0;
1228// ENABLE_IRQ();
1229 } else
1230#endif /* REAL_DMA */
1231 {
1232/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1233 if (basr & BASR_PHASE_MATCH)
1234 dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
1235 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1236 HOSTNO, basr, NCR5380_read(MODE_REG),
1237 NCR5380_read(STATUS_REG));
1238 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1239#ifdef SUN3_SCSI_VME
1240 dregs->csr |= CSR_DMA_ENABLE;
1241#endif
1242 }
1243 } /* if !(SELECTION || PARITY) */
1244 handled = 1;
1245 } /* BASR & IRQ */
1246 else {
1247
1248 printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, "
1249 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
1250 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1251 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1252#ifdef SUN3_SCSI_VME
1253 dregs->csr |= CSR_DMA_ENABLE;
1254#endif
1255 }
1256
1257 if (!done) {
1258 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1259 /* Put a call to NCR5380_main() on the queue... */
1260 queue_main();
1261 }
1262 return IRQ_RETVAL(handled);
1263}
1264
1265#ifdef NCR5380_STATS
1266static void collect_stats(struct NCR5380_hostdata *hostdata,
1267 struct scsi_cmnd *cmd)
1268{
1269# ifdef NCR5380_STAT_LIMIT
1270 if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
1271# endif
1272 switch (cmd->cmnd[0])
1273 {
1274 case WRITE:
1275 case WRITE_6:
1276 case WRITE_10:
1277 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1278 /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
1279 hostdata->pendingw--;
1280 break;
1281 case READ:
1282 case READ_6:
1283 case READ_10:
1284 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1285 /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
1286 hostdata->pendingr--;
1287 break;
1288 }
1289}
1290#endif
1291
1292/*
1293 * Function : int NCR5380_select(struct Scsi_Host *instance,
1294 * struct scsi_cmnd *cmd, int tag);
1295 *
1296 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1297 * including ARBITRATION, SELECTION, and initial message out for
1298 * IDENTIFY and queue messages.
1299 *
1300 * Inputs : instance - instantiation of the 5380 driver on which this
1301 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
1302 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1303 * the command that is presently connected.
1304 *
1305 * Returns : -1 if selection could not execute for some reason,
1306 * 0 if selection succeeded or failed because the target
1307 * did not respond.
1308 *
1309 * Side effects :
1310 * If bus busy, arbitration failed, etc, NCR5380_select() will exit
1311 * with registers as they should have been on entry - ie
1312 * SELECT_ENABLE will be set appropriately, the NCR5380
1313 * will cease to drive any SCSI bus signals.
1314 *
1315 * If successful : I_T_L or I_T_L_Q nexus will be established,
1316 * instance->connected will be set to cmd.
1317 * SELECT interrupt will be disabled.
1318 *
1319 * If failed (no target) : cmd->scsi_done() will be called, and the
1320 * cmd->result host byte set to DID_BAD_TARGET.
1321 */
1322
1323static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1324 int tag)
1325{
1326 SETUP_HOSTDATA(instance);
1327 unsigned char tmp[3], phase;
1328 unsigned char *data;
1329 int len;
1330 unsigned long timeout;
1331 unsigned long flags;
1332
1333 hostdata->restart_select = 0;
1334 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
1335 dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
1336 instance->this_id);
1337
1338 /*
1339 * Set the phase bits to 0, otherwise the NCR5380 won't drive the
1340 * data bus during SELECTION.
1341 */
1342
1343 local_irq_save(flags);
1344 if (hostdata->connected) {
1345 local_irq_restore(flags);
1346 return -1;
1347 }
1348 NCR5380_write(TARGET_COMMAND_REG, 0);
1349
1350
1351 /*
1352 * Start arbitration.
1353 */
1354
1355 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
1356 NCR5380_write(MODE_REG, MR_ARBITRATE);
1357
1358 local_irq_restore(flags);
1359
1360 /* Wait for arbitration logic to complete */
1361#ifdef NCR_TIMEOUT
1362 {
1363 unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
1364
1365 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
1366 && time_before(jiffies, timeout) && !hostdata->connected)
1367 ;
1368 if (time_after_eq(jiffies, timeout))
1369 {
1370 printk("scsi : arbitration timeout at %d\n", __LINE__);
1371 NCR5380_write(MODE_REG, MR_BASE);
1372 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1373 return -1;
1374 }
1375 }
1376#else /* NCR_TIMEOUT */
1377 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
1378 && !hostdata->connected);
1379#endif
1380
1381 dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
1382
1383 if (hostdata->connected) {
1384 NCR5380_write(MODE_REG, MR_BASE);
1385 return -1;
1386 }
1387 /*
1388 * The arbitration delay is 2.2us, but this is a minimum and there is
1389 * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
1390 * the integral nature of udelay().
1391 *
1392 */
1393
1394 udelay(3);
1395
1396 /* Check for lost arbitration */
1397 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1398 (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
1399 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1400 hostdata->connected) {
1401 NCR5380_write(MODE_REG, MR_BASE);
1402 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1403 HOSTNO);
1404 return -1;
1405 }
1406
1407 /* after/during arbitration, BSY should be asserted.
1408 IBM DPES-31080 Version S31Q works now */
1409 /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */
1410 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL |
1411 ICR_ASSERT_BSY ) ;
1412
1413 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1414 hostdata->connected) {
1415 NCR5380_write(MODE_REG, MR_BASE);
1416 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1417 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1418 HOSTNO);
1419 return -1;
1420 }
1421
1422 /*
1423 * Again, bus clear + bus settle time is 1.2us, however, this is
1424 * a minimum so we'll udelay ceil(1.2)
1425 */
1426
1427#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY
1428 /* ++roman: But some targets (see above :-) seem to need a bit more... */
1429 udelay(15);
1430#else
1431 udelay(2);
1432#endif
1433
1434 if (hostdata->connected) {
1435 NCR5380_write(MODE_REG, MR_BASE);
1436 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1437 return -1;
1438 }
1439
1440 dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
1441
1442 /*
1443 * Now that we have won arbitration, start Selection process, asserting
1444 * the host and target ID's on the SCSI bus.
1445 */
1446
1447 NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id)));
1448
1449 /*
1450 * Raise ATN while SEL is true before BSY goes false from arbitration,
1451 * since this is the only way to guarantee that we'll get a MESSAGE OUT
1452 * phase immediately after selection.
1453 */
1454
1455 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
1456 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
1457 NCR5380_write(MODE_REG, MR_BASE);
1458
1459 /*
1460 * Reselect interrupts must be turned off prior to the dropping of BSY,
1461 * otherwise we will trigger an interrupt.
1462 */
1463
1464 if (hostdata->connected) {
1465 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1466 return -1;
1467 }
1468
1469 NCR5380_write(SELECT_ENABLE_REG, 0);
1470
1471 /*
1472 * The initiator shall then wait at least two deskew delays and release
1473 * the BSY signal.
1474 */
1475 udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
1476
1477 /* Reset BSY */
1478 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
1479 ICR_ASSERT_ATN | ICR_ASSERT_SEL));
1480
1481 /*
1482 * Something weird happens when we cease to drive BSY - looks
1483 * like the board/chip is letting us do another read before the
1484 * appropriate propagation delay has expired, and we're confusing
1485 * a BSY signal from ourselves as the target's response to SELECTION.
1486 *
1487 * A small delay (the 'C++' frontend breaks the pipeline with an
1488 * unnecessary jump, making it work on my 386-33/Trantor T128, the
1489 * tighter 'C' code breaks and requires this) solves the problem -
1490 * the 1 us delay is arbitrary, and only used because this delay will
1491 * be the same on other platforms and since it works here, it should
1492 * work there.
1493 *
1494 * wingel suggests that this could be due to failing to wait
1495 * one deskew delay.
1496 */
1497
1498 udelay(1);
1499
1500 dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1501
1502 /*
1503 * The SCSI specification calls for a 250 ms timeout for the actual
1504 * selection.
1505 */
1506
1507 timeout = jiffies + 25;
1508
1509 /*
1510 * XXX very interesting - we're seeing a bounce where the BSY we
1511 * asserted is being reflected / still asserted (propagation delay?)
1512 * and it's detecting as true. Sigh.
1513 */
1514
1515#if 0
1516 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert
1517 * IO while SEL is true. But again, there are some disks out the in the
1518 * world that do that nevertheless. (Somebody claimed that this announces
1519 * reselection capability of the target.) So we better skip that test and
1520 * only wait for BSY... (Famous german words: Der Klügere gibt nach :-)
1521 */
1522
1523 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) &
1524 (SR_BSY | SR_IO)));
1525
1526 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
1527 (SR_SEL | SR_IO)) {
1528 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1529 NCR5380_reselect(instance);
1530 printk (KERN_ERR "scsi%d: reselection after won arbitration?\n",
1531 HOSTNO);
1532 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1533 return -1;
1534 }
1535#else
1536 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY));
1537#endif
1538
1539 /*
1540 * No less than two deskew delays after the initiator detects the
1541 * BSY signal is true, it shall release the SEL signal and may
1542 * change the DATA BUS. -wingel
1543 */
1544
1545 udelay(1);
1546
1547 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1548
1549 if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
1550 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1551 if (hostdata->targets_present & (1 << cmd->device->id)) {
1552 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
1553 if (hostdata->restart_select)
1554 printk(KERN_NOTICE "\trestart select\n");
1555 NCR5380_dprint(NDEBUG_ANY, instance);
1556 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1557 return -1;
1558 }
1559 cmd->result = DID_BAD_TARGET << 16;
1560#ifdef NCR5380_STATS
1561 collect_stats(hostdata, cmd);
1562#endif
1563#ifdef SUPPORT_TAGS
1564 cmd_free_tag( cmd );
1565#endif
1566 cmd->scsi_done(cmd);
1567 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1568 dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
1569 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1570 return 0;
1571 }
1572
1573 hostdata->targets_present |= (1 << cmd->device->id);
1574
1575 /*
1576 * Since we followed the SCSI spec, and raised ATN while SEL
1577 * was true but before BSY was false during selection, the information
1578 * transfer phase should be a MESSAGE OUT phase so that we can send the
1579 * IDENTIFY message.
1580 *
1581 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
1582 * message (2 bytes) with a tag ID that we increment with every command
1583 * until it wraps back to 0.
1584 *
1585 * XXX - it turns out that there are some broken SCSI-II devices,
1586 * which claim to support tagged queuing but fail when more than
1587 * some number of commands are issued at once.
1588 */
1589
1590 /* Wait for start of REQ/ACK handshake */
1591 while (!(NCR5380_read(STATUS_REG) & SR_REQ));
1592
1593 dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
1594 HOSTNO, cmd->device->id);
1595 tmp[0] = IDENTIFY(1, cmd->device->lun);
1596
1597#ifdef SUPPORT_TAGS
1598 if (cmd->tag != TAG_NONE) {
1599 tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
1600 tmp[2] = cmd->tag;
1601 len = 3;
1602 } else
1603 len = 1;
1604#else
1605 len = 1;
1606 cmd->tag=0;
1607#endif /* SUPPORT_TAGS */
1608
1609 /* Send message(s) */
1610 data = tmp;
1611 phase = PHASE_MSGOUT;
1612 NCR5380_transfer_pio(instance, &phase, &len, &data);
1613 dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
1614 /* XXX need to handle errors here */
1615 hostdata->connected = cmd;
1616#ifndef SUPPORT_TAGS
1617 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
1618#endif
1619#ifdef SUN3_SCSI_VME
1620 dregs->csr |= CSR_INTR;
1621#endif
1622 initialize_SCp(cmd);
1623
1624
1625 return 0;
1626}
1627
1628/*
1629 * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
1630 * unsigned char *phase, int *count, unsigned char **data)
1631 *
1632 * Purpose : transfers data in given phase using polled I/O
1633 *
1634 * Inputs : instance - instance of driver, *phase - pointer to
1635 * what phase is expected, *count - pointer to number of
1636 * bytes to transfer, **data - pointer to data pointer.
1637 *
1638 * Returns : -1 when different phase is entered without transferring
1639 * maximum number of bytes, 0 if all bytes are transferred or exit
1640 * is in same phase.
1641 *
1642 * Also, *phase, *count, *data are modified in place.
1643 *
1644 * XXX Note : handling for bus free may be useful.
1645 */
1646
1647/*
1648 * Note : this code is not as quick as it could be, however it
1649 * IS 100% reliable, and for the actual data transfer where speed
1650 * counts, we will always do a pseudo DMA or DMA transfer.
1651 */
1652
1653static int NCR5380_transfer_pio( struct Scsi_Host *instance,
1654 unsigned char *phase, int *count,
1655 unsigned char **data)
1656{
1657 register unsigned char p = *phase, tmp;
1658 register int c = *count;
1659 register unsigned char *d = *data;
1660
1661 /*
1662 * The NCR5380 chip will only drive the SCSI bus when the
1663 * phase specified in the appropriate bits of the TARGET COMMAND
1664 * REGISTER match the STATUS REGISTER
1665 */
1666
1667 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
1668
1669 do {
1670 /*
1671 * Wait for assertion of REQ, after which the phase bits will be
1672 * valid
1673 */
1674 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1675
1676 dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
1677
1678 /* Check for phase mismatch */
1679 if ((tmp & PHASE_MASK) != p) {
1680 dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
1681 NCR5380_dprint_phase(NDEBUG_PIO, instance);
1682 break;
1683 }
1684
1685 /* Do actual transfer from SCSI bus to / from memory */
1686 if (!(p & SR_IO))
1687 NCR5380_write(OUTPUT_DATA_REG, *d);
1688 else
1689 *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
1690
1691 ++d;
1692
1693 /*
1694 * The SCSI standard suggests that in MSGOUT phase, the initiator
1695 * should drop ATN on the last byte of the message phase
1696 * after REQ has been asserted for the handshake but before
1697 * the initiator raises ACK.
1698 */
1699
1700 if (!(p & SR_IO)) {
1701 if (!((p & SR_MSG) && c > 1)) {
1702 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1703 ICR_ASSERT_DATA);
1704 NCR5380_dprint(NDEBUG_PIO, instance);
1705 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1706 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1707 } else {
1708 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1709 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1710 NCR5380_dprint(NDEBUG_PIO, instance);
1711 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1712 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1713 }
1714 } else {
1715 NCR5380_dprint(NDEBUG_PIO, instance);
1716 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1717 }
1718
1719 while (NCR5380_read(STATUS_REG) & SR_REQ);
1720
1721 dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
1722
1723/*
1724 * We have several special cases to consider during REQ/ACK handshaking :
1725 * 1. We were in MSGOUT phase, and we are on the last byte of the
1726 * message. ATN must be dropped as ACK is dropped.
1727 *
1728 * 2. We are in a MSGIN phase, and we are on the last byte of the
1729 * message. We must exit with ACK asserted, so that the calling
1730 * code may raise ATN before dropping ACK to reject the message.
1731 *
1732 * 3. ACK and ATN are clear and the target may proceed as normal.
1733 */
1734 if (!(p == PHASE_MSGIN && c == 1)) {
1735 if (p == PHASE_MSGOUT && c > 1)
1736 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1737 else
1738 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1739 }
1740 } while (--c);
1741
1742 dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
1743
1744 *count = c;
1745 *data = d;
1746 tmp = NCR5380_read(STATUS_REG);
1747 /* The phase read from the bus is valid if either REQ is (already)
1748 * asserted or if ACK hasn't been released yet. The latter is the case if
1749 * we're in MSGIN and all wanted bytes have been received. */
1750 if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0))
1751 *phase = tmp & PHASE_MASK;
1752 else
1753 *phase = PHASE_UNKNOWN;
1754
1755 if (!c || (*phase == p))
1756 return 0;
1757 else
1758 return -1;
1759}
1760
1761/*
1762 * Function : do_abort (Scsi_Host *host)
1763 *
1764 * Purpose : abort the currently established nexus. Should only be
1765 * called from a routine which can drop into a
1766 *
1767 * Returns : 0 on success, -1 on failure.
1768 */
1769
1770static int do_abort (struct Scsi_Host *host)
1771{
1772 unsigned char tmp, *msgptr, phase;
1773 int len;
1774
1775 /* Request message out phase */
1776 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1777
1778 /*
1779 * Wait for the target to indicate a valid phase by asserting
1780 * REQ. Once this happens, we'll have either a MSGOUT phase
1781 * and can immediately send the ABORT message, or we'll have some
1782 * other phase and will have to source/sink data.
1783 *
1784 * We really don't care what value was on the bus or what value
1785 * the target sees, so we just handshake.
1786 */
1787
1788 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1789
1790 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1791
1792 if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
1793 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1794 ICR_ASSERT_ACK);
1795 while (NCR5380_read(STATUS_REG) & SR_REQ);
1796 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1797 }
1798
1799 tmp = ABORT;
1800 msgptr = &tmp;
1801 len = 1;
1802 phase = PHASE_MSGOUT;
1803 NCR5380_transfer_pio (host, &phase, &len, &msgptr);
1804
1805 /*
1806 * If we got here, and the command completed successfully,
1807 * we're about to go into bus free state.
1808 */
1809
1810 return len ? -1 : 0;
1811}
1812
1813#if defined(REAL_DMA)
1814/*
1815 * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
1816 * unsigned char *phase, int *count, unsigned char **data)
1817 *
1818 * Purpose : transfers data in given phase using either real
1819 * or pseudo DMA.
1820 *
1821 * Inputs : instance - instance of driver, *phase - pointer to
1822 * what phase is expected, *count - pointer to number of
1823 * bytes to transfer, **data - pointer to data pointer.
1824 *
1825 * Returns : -1 when different phase is entered without transferring
1826 * maximum number of bytes, 0 if all bytes or transferred or exit
1827 * is in same phase.
1828 *
1829 * Also, *phase, *count, *data are modified in place.
1830 *
1831 */
1832
1833
1834static int NCR5380_transfer_dma( struct Scsi_Host *instance,
1835 unsigned char *phase, int *count,
1836 unsigned char **data)
1837{
1838 SETUP_HOSTDATA(instance);
1839 register int c = *count;
1840 register unsigned char p = *phase;
1841 unsigned long flags;
1842
1843 /* sanity check */
1844 if(!sun3_dma_setup_done) {
1845 printk("scsi%d: transfer_dma without setup!\n", HOSTNO);
1846 BUG();
1847 }
1848 hostdata->dma_len = c;
1849
1850 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1851 HOSTNO, (p & SR_IO) ? "reading" : "writing",
1852 c, (p & SR_IO) ? "to" : "from", *data);
1853
1854 /* netbsd turns off ints here, why not be safe and do it too */
1855 local_irq_save(flags);
1856
1857 /* send start chain */
1858 sun3scsi_dma_start(c, *data);
1859
1860 if (p & SR_IO) {
1861 NCR5380_write(TARGET_COMMAND_REG, 1);
1862 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1863 NCR5380_write(INITIATOR_COMMAND_REG, 0);
1864 NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1865 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
1866 } else {
1867 NCR5380_write(TARGET_COMMAND_REG, 0);
1868 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1869 NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
1870 NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR));
1871 NCR5380_write(START_DMA_SEND_REG, 0);
1872 }
1873
1874#ifdef SUN3_SCSI_VME
1875 dregs->csr |= CSR_DMA_ENABLE;
1876#endif
1877
1878 local_irq_restore(flags);
1879
1880 sun3_dma_active = 1;
1881 return 0;
1882}
1883#endif /* defined(REAL_DMA) */
1884
1885/*
1886 * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
1887 *
1888 * Purpose : run through the various SCSI phases and do as the target
1889 * directs us to. Operates on the currently connected command,
1890 * instance->connected.
1891 *
1892 * Inputs : instance, instance for which we are doing commands
1893 *
1894 * Side effects : SCSI things happen, the disconnected queue will be
1895 * modified if a command disconnects, *instance->connected will
1896 * change.
1897 *
1898 * XXX Note : we need to watch for bus free or a reset condition here
1899 * to recover from an unexpected bus free condition.
1900 */
1901
1902static void NCR5380_information_transfer (struct Scsi_Host *instance)
1903{
1904 SETUP_HOSTDATA(instance);
1905 unsigned long flags;
1906 unsigned char msgout = NOP;
1907 int sink = 0;
1908 int len;
1909#if defined(REAL_DMA)
1910 int transfersize;
1911#endif
1912 unsigned char *data;
1913 unsigned char phase, tmp, extended_msg[10], old_phase=0xff;
1914 struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected;
1915
1916#ifdef SUN3_SCSI_VME
1917 dregs->csr |= CSR_INTR;
1918#endif
1919
1920 while (1) {
1921 tmp = NCR5380_read(STATUS_REG);
1922 /* We only have a valid SCSI phase when REQ is asserted */
1923 if (tmp & SR_REQ) {
1924 phase = (tmp & PHASE_MASK);
1925 if (phase != old_phase) {
1926 old_phase = phase;
1927 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1928 }
1929
1930 if(phase == PHASE_CMDOUT) {
1931 void *d;
1932 unsigned long count;
1933
1934 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1935 count = cmd->SCp.buffer->length;
1936 d = SGADDR(cmd->SCp.buffer);
1937 } else {
1938 count = cmd->SCp.this_residual;
1939 d = cmd->SCp.ptr;
1940 }
1941#ifdef REAL_DMA
1942 /* this command setup for dma yet? */
1943 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
1944 != cmd))
1945 {
1946 if (cmd->request->cmd_type == REQ_TYPE_FS) {
1947 sun3scsi_dma_setup(d, count,
1948 rq_data_dir(cmd->request));
1949 sun3_dma_setup_done = cmd;
1950 }
1951 }
1952#endif
1953#ifdef SUN3_SCSI_VME
1954 dregs->csr |= CSR_INTR;
1955#endif
1956 }
1957
1958
1959 if (sink && (phase != PHASE_MSGOUT)) {
1960 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1961
1962 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1963 ICR_ASSERT_ACK);
1964 while (NCR5380_read(STATUS_REG) & SR_REQ);
1965 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1966 ICR_ASSERT_ATN);
1967 sink = 0;
1968 continue;
1969 }
1970
1971 switch (phase) {
1972 case PHASE_DATAOUT:
1973#if (NDEBUG & NDEBUG_NO_DATAOUT)
1974 printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT "
1975 "aborted\n", HOSTNO);
1976 sink = 1;
1977 do_abort(instance);
1978 cmd->result = DID_ERROR << 16;
1979 cmd->scsi_done(cmd);
1980 return;
1981#endif
1982 case PHASE_DATAIN:
1983 /*
1984 * If there is no room left in the current buffer in the
1985 * scatter-gather list, move onto the next one.
1986 */
1987 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
1988 ++cmd->SCp.buffer;
1989 --cmd->SCp.buffers_residual;
1990 cmd->SCp.this_residual = cmd->SCp.buffer->length;
1991 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
1992 dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
1993 HOSTNO, cmd->SCp.this_residual,
1994 cmd->SCp.buffers_residual);
1995 }
1996
1997 /*
1998 * The preferred transfer method is going to be
1999 * PSEUDO-DMA for systems that are strictly PIO,
2000 * since we can let the hardware do the handshaking.
2001 *
2002 * For this to work, we need to know the transfersize
2003 * ahead of time, since the pseudo-DMA code will sit
2004 * in an unconditional loop.
2005 */
2006
2007/* ++roman: I suggest, this should be
2008 * #if def(REAL_DMA)
2009 * instead of leaving REAL_DMA out.
2010 */
2011
2012#if defined(REAL_DMA)
2013// if (!cmd->device->borken &&
2014 if((transfersize =
2015 NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) {
2016 len = transfersize;
2017 cmd->SCp.phase = phase;
2018
2019 if (NCR5380_transfer_dma(instance, &phase,
2020 &len, (unsigned char **) &cmd->SCp.ptr)) {
2021 /*
2022 * If the watchdog timer fires, all future
2023 * accesses to this device will use the
2024 * polled-IO. */
2025 printk(KERN_NOTICE "scsi%d: switching target %d "
2026 "lun %llu to slow handshake\n", HOSTNO,
2027 cmd->device->id, cmd->device->lun);
2028 cmd->device->borken = 1;
2029 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2030 ICR_ASSERT_ATN);
2031 sink = 1;
2032 do_abort(instance);
2033 cmd->result = DID_ERROR << 16;
2034 cmd->scsi_done(cmd);
2035 /* XXX - need to source or sink data here, as appropriate */
2036 } else {
2037#ifdef REAL_DMA
2038 /* ++roman: When using real DMA,
2039 * information_transfer() should return after
2040 * starting DMA since it has nothing more to
2041 * do.
2042 */
2043 return;
2044#else
2045 cmd->SCp.this_residual -= transfersize - len;
2046#endif
2047 }
2048 } else
2049#endif /* defined(REAL_DMA) */
2050 NCR5380_transfer_pio(instance, &phase,
2051 (int *) &cmd->SCp.this_residual, (unsigned char **)
2052 &cmd->SCp.ptr);
2053#ifdef REAL_DMA
2054 /* if we had intended to dma that command clear it */
2055 if(sun3_dma_setup_done == cmd)
2056 sun3_dma_setup_done = NULL;
2057#endif
2058
2059 break;
2060 case PHASE_MSGIN:
2061 len = 1;
2062 data = &tmp;
2063 NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */
2064 NCR5380_transfer_pio(instance, &phase, &len, &data);
2065 cmd->SCp.Message = tmp;
2066
2067 switch (tmp) {
2068 /*
2069 * Linking lets us reduce the time required to get the
2070 * next command out to the device, hopefully this will
2071 * mean we don't waste another revolution due to the delays
2072 * required by ARBITRATION and another SELECTION.
2073 *
2074 * In the current implementation proposal, low level drivers
2075 * merely have to start the next command, pointed to by
2076 * next_link, done() is called as with unlinked commands.
2077 */
2078#ifdef LINKED
2079 case LINKED_CMD_COMPLETE:
2080 case LINKED_FLG_CMD_COMPLETE:
2081 /* Accept message by clearing ACK */
2082 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2083
2084 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command "
2085 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
2086
2087 /* Enable reselect interrupts */
2088 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2089 /*
2090 * Sanity check : A linked command should only terminate
2091 * with one of these messages if there are more linked
2092 * commands available.
2093 */
2094
2095 if (!cmd->next_link) {
2096 printk(KERN_NOTICE "scsi%d: target %d lun %llu "
2097 "linked command complete, no next_link\n",
2098 HOSTNO, cmd->device->id, cmd->device->lun);
2099 sink = 1;
2100 do_abort (instance);
2101 return;
2102 }
2103
2104 initialize_SCp(cmd->next_link);
2105 /* The next command is still part of this process; copy it
2106 * and don't free it! */
2107 cmd->next_link->tag = cmd->tag;
2108 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2109 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
2110 "done, calling scsi_done().\n",
2111 HOSTNO, cmd->device->id, cmd->device->lun);
2112#ifdef NCR5380_STATS
2113 collect_stats(hostdata, cmd);
2114#endif
2115 cmd->scsi_done(cmd);
2116 cmd = hostdata->connected;
2117 break;
2118#endif /* def LINKED */
2119 case ABORT:
2120 case COMMAND_COMPLETE:
2121 /* Accept message by clearing ACK */
2122 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2123 hostdata->connected = NULL;
2124 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
2125 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2126#ifdef SUPPORT_TAGS
2127 cmd_free_tag( cmd );
2128 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
2129 /* Turn a QUEUE FULL status into BUSY, I think the
2130 * mid level cannot handle QUEUE FULL :-( (The
2131 * command is retried after BUSY). Also update our
2132 * queue size to the number of currently issued
2133 * commands now.
2134 */
2135 /* ++Andreas: the mid level code knows about
2136 QUEUE_FULL now. */
2137 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
2138 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
2139 "QUEUE_FULL after %d commands\n",
2140 HOSTNO, cmd->device->id, cmd->device->lun,
2141 ta->nr_allocated);
2142 if (ta->queue_size > ta->nr_allocated)
2143 ta->nr_allocated = ta->queue_size;
2144 }
2145#else
2146 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2147#endif
2148 /* Enable reselect interrupts */
2149 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2150
2151 /*
2152 * I'm not sure what the correct thing to do here is :
2153 *
2154 * If the command that just executed is NOT a request
2155 * sense, the obvious thing to do is to set the result
2156 * code to the values of the stored parameters.
2157 *
2158 * If it was a REQUEST SENSE command, we need some way to
2159 * differentiate between the failure code of the original
2160 * and the failure code of the REQUEST sense - the obvious
2161 * case is success, where we fall through and leave the
2162 * result code unchanged.
2163 *
2164 * The non-obvious place is where the REQUEST SENSE failed
2165 */
2166
2167 if (cmd->cmnd[0] != REQUEST_SENSE)
2168 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2169 else if (status_byte(cmd->SCp.Status) != GOOD)
2170 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2171
2172#ifdef AUTOSENSE
2173 if ((cmd->cmnd[0] == REQUEST_SENSE) &&
2174 hostdata->ses.cmd_len) {
2175 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
2176 hostdata->ses.cmd_len = 0 ;
2177 }
2178
2179 if ((cmd->cmnd[0] != REQUEST_SENSE) &&
2180 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2181 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
2182 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
2183 HOSTNO);
2184 /* this is initialized from initialize_SCp
2185 cmd->SCp.buffer = NULL;
2186 cmd->SCp.buffers_residual = 0;
2187 */
2188
2189 local_irq_save(flags);
2190 LIST(cmd,hostdata->issue_queue);
2191 SET_NEXT(cmd, hostdata->issue_queue);
2192 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2193 local_irq_restore(flags);
2194 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2195 "issue queue\n", H_NO(cmd));
2196 } else
2197#endif /* def AUTOSENSE */
2198 {
2199#ifdef NCR5380_STATS
2200 collect_stats(hostdata, cmd);
2201#endif
2202 cmd->scsi_done(cmd);
2203 }
2204
2205 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2206 /*
2207 * Restore phase bits to 0 so an interrupted selection,
2208 * arbitration can resume.
2209 */
2210 NCR5380_write(TARGET_COMMAND_REG, 0);
2211
2212 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2213 barrier();
2214
2215 return;
2216 case MESSAGE_REJECT:
2217 /* Accept message by clearing ACK */
2218 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2219 /* Enable reselect interrupts */
2220 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2221 switch (hostdata->last_message) {
2222 case HEAD_OF_QUEUE_TAG:
2223 case ORDERED_QUEUE_TAG:
2224 case SIMPLE_QUEUE_TAG:
2225 /* The target obviously doesn't support tagged
2226 * queuing, even though it announced this ability in
2227 * its INQUIRY data ?!? (maybe only this LUN?) Ok,
2228 * clear 'tagged_supported' and lock the LUN, since
2229 * the command is treated as untagged further on.
2230 */
2231 cmd->device->tagged_supported = 0;
2232 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
2233 cmd->tag = TAG_NONE;
2234 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected "
2235 "QUEUE_TAG message; tagged queuing "
2236 "disabled\n",
2237 HOSTNO, cmd->device->id, cmd->device->lun);
2238 break;
2239 }
2240 break;
2241 case DISCONNECT:
2242 /* Accept message by clearing ACK */
2243 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2244 local_irq_save(flags);
2245 cmd->device->disconnect = 1;
2246 LIST(cmd,hostdata->disconnected_queue);
2247 SET_NEXT(cmd, hostdata->disconnected_queue);
2248 hostdata->connected = NULL;
2249 hostdata->disconnected_queue = cmd;
2250 local_irq_restore(flags);
2251 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was "
2252 "moved from connected to the "
2253 "disconnected_queue\n", HOSTNO,
2254 cmd->device->id, cmd->device->lun);
2255 /*
2256 * Restore phase bits to 0 so an interrupted selection,
2257 * arbitration can resume.
2258 */
2259 NCR5380_write(TARGET_COMMAND_REG, 0);
2260
2261 /* Enable reselect interrupts */
2262 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2263 /* Wait for bus free to avoid nasty timeouts */
2264 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2265 barrier();
2266#ifdef SUN3_SCSI_VME
2267 dregs->csr |= CSR_DMA_ENABLE;
2268#endif
2269 return;
2270 /*
2271 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
2272 * operation, in violation of the SCSI spec so we can safely
2273 * ignore SAVE/RESTORE pointers calls.
2274 *
2275 * Unfortunately, some disks violate the SCSI spec and
2276 * don't issue the required SAVE_POINTERS message before
2277 * disconnecting, and we have to break spec to remain
2278 * compatible.
2279 */
2280 case SAVE_POINTERS:
2281 case RESTORE_POINTERS:
2282 /* Accept message by clearing ACK */
2283 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2284 /* Enable reselect interrupts */
2285 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2286 break;
2287 case EXTENDED_MESSAGE:
2288/*
2289 * Extended messages are sent in the following format :
2290 * Byte
2291 * 0 EXTENDED_MESSAGE == 1
2292 * 1 length (includes one byte for code, doesn't
2293 * include first two bytes)
2294 * 2 code
2295 * 3..length+1 arguments
2296 *
2297 * Start the extended message buffer with the EXTENDED_MESSAGE
2298 * byte, since spi_print_msg() wants the whole thing.
2299 */
2300 extended_msg[0] = EXTENDED_MESSAGE;
2301 /* Accept first byte by clearing ACK */
2302 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2303
2304 dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
2305
2306 len = 2;
2307 data = extended_msg + 1;
2308 phase = PHASE_MSGIN;
2309 NCR5380_transfer_pio(instance, &phase, &len, &data);
2310 dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
2311 (int)extended_msg[1], (int)extended_msg[2]);
2312
2313 if (!len && extended_msg[1] <=
2314 (sizeof (extended_msg) - 1)) {
2315 /* Accept third byte by clearing ACK */
2316 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2317 len = extended_msg[1] - 1;
2318 data = extended_msg + 3;
2319 phase = PHASE_MSGIN;
2320
2321 NCR5380_transfer_pio(instance, &phase, &len, &data);
2322 dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
2323 HOSTNO, len);
2324
2325 switch (extended_msg[2]) {
2326 case EXTENDED_SDTR:
2327 case EXTENDED_WDTR:
2328 case EXTENDED_MODIFY_DATA_POINTER:
2329 case EXTENDED_EXTENDED_IDENTIFY:
2330 tmp = 0;
2331 }
2332 } else if (len) {
2333 printk(KERN_NOTICE "scsi%d: error receiving "
2334 "extended message\n", HOSTNO);
2335 tmp = 0;
2336 } else {
2337 printk(KERN_NOTICE "scsi%d: extended message "
2338 "code %02x length %d is too long\n",
2339 HOSTNO, extended_msg[2], extended_msg[1]);
2340 tmp = 0;
2341 }
2342 /* Fall through to reject message */
2343
2344 /*
2345 * If we get something weird that we aren't expecting,
2346 * reject it.
2347 */
2348 default:
2349 if (!tmp) {
2350 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO);
2351 spi_print_msg(extended_msg);
2352 printk("\n");
2353 } else if (tmp != EXTENDED_MESSAGE)
2354 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2355 "message %02x from target %d, lun %llu\n",
2356 HOSTNO, tmp, cmd->device->id, cmd->device->lun);
2357 else
2358 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2359 "extended message "
2360 "code %02x, length %d from target %d, lun %llu\n",
2361 HOSTNO, extended_msg[1], extended_msg[0],
2362 cmd->device->id, cmd->device->lun);
2363
2364
2365 msgout = MESSAGE_REJECT;
2366 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2367 ICR_ASSERT_ATN);
2368 break;
2369 } /* switch (tmp) */
2370 break;
2371 case PHASE_MSGOUT:
2372 len = 1;
2373 data = &msgout;
2374 hostdata->last_message = msgout;
2375 NCR5380_transfer_pio(instance, &phase, &len, &data);
2376 if (msgout == ABORT) {
2377#ifdef SUPPORT_TAGS
2378 cmd_free_tag( cmd );
2379#else
2380 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2381#endif
2382 hostdata->connected = NULL;
2383 cmd->result = DID_ERROR << 16;
2384#ifdef NCR5380_STATS
2385 collect_stats(hostdata, cmd);
2386#endif
2387 cmd->scsi_done(cmd);
2388 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2389 return;
2390 }
2391 msgout = NOP;
2392 break;
2393 case PHASE_CMDOUT:
2394 len = cmd->cmd_len;
2395 data = cmd->cmnd;
2396 /*
2397 * XXX for performance reasons, on machines with a
2398 * PSEUDO-DMA architecture we should probably
2399 * use the dma transfer function.
2400 */
2401 NCR5380_transfer_pio(instance, &phase, &len,
2402 &data);
2403 break;
2404 case PHASE_STATIN:
2405 len = 1;
2406 data = &tmp;
2407 NCR5380_transfer_pio(instance, &phase, &len, &data);
2408 cmd->SCp.Status = tmp;
2409 break;
2410 default:
2411 printk("scsi%d: unknown phase\n", HOSTNO);
2412 NCR5380_dprint(NDEBUG_ANY, instance);
2413 } /* switch(phase) */
2414 } /* if (tmp * SR_REQ) */
2415 } /* while (1) */
2416}
2417
2418/*
2419 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2420 *
2421 * Purpose : does reselection, initializing the instance->connected
2422 * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q
2423 * nexus has been reestablished,
2424 *
2425 * Inputs : instance - this instance of the NCR5380.
2426 *
2427 */
2428
2429/* it might eventually prove necessary to do a dma setup on
2430 reselection, but it doesn't seem to be needed now -- sam */
2431
2432static void NCR5380_reselect (struct Scsi_Host *instance)
2433{
2434 SETUP_HOSTDATA(instance);
2435 unsigned char target_mask;
2436 unsigned char lun;
2437#ifdef SUPPORT_TAGS
2438 unsigned char tag;
2439#endif
2440 unsigned char msg[3];
2441 struct scsi_cmnd *tmp = NULL, *prev;
2442/* unsigned long flags; */
2443
2444 /*
2445 * Disable arbitration, etc. since the host adapter obviously
2446 * lost, and tell an interrupted NCR5380_select() to restart.
2447 */
2448
2449 NCR5380_write(MODE_REG, MR_BASE);
2450 hostdata->restart_select = 1;
2451
2452 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2453
2454 dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
2455
2456 /*
2457 * At this point, we have detected that our SCSI ID is on the bus,
2458 * SEL is true and BSY was false for at least one bus settle delay
2459 * (400 ns).
2460 *
2461 * We must assert BSY ourselves, until the target drops the SEL
2462 * signal.
2463 */
2464
2465 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
2466
2467 while (NCR5380_read(STATUS_REG) & SR_SEL);
2468 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2469
2470 /*
2471 * Wait for target to go into MSGIN.
2472 */
2473
2474 while (!(NCR5380_read(STATUS_REG) & SR_REQ));
2475
2476#if 1
2477 // acknowledge toggle to MSGIN
2478 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
2479
2480 // peek at the byte without really hitting the bus
2481 msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
2482#endif
2483
2484 if (!(msg[0] & 0x80)) {
2485 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
2486 spi_print_msg(msg);
2487 do_abort(instance);
2488 return;
2489 }
2490 lun = (msg[0] & 0x07);
2491
2492 /*
2493 * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
2494 * just reestablished, and remove it from the disconnected queue.
2495 */
2496
2497 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL;
2498 tmp; prev = tmp, tmp = NEXT(tmp) ) {
2499 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
2500#ifdef SUPPORT_TAGS
2501 && (tag == tmp->tag)
2502#endif
2503 ) {
2504 if (prev) {
2505 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2506 SET_NEXT(prev, NEXT(tmp));
2507 } else {
2508 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
2509 hostdata->disconnected_queue = NEXT(tmp);
2510 }
2511 SET_NEXT(tmp, NULL);
2512 break;
2513 }
2514 }
2515
2516 if (!tmp) {
2517 printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d "
2518#ifdef SUPPORT_TAGS
2519 "tag %d "
2520#endif
2521 "not in disconnected_queue.\n",
2522 HOSTNO, target_mask, lun
2523#ifdef SUPPORT_TAGS
2524 , tag
2525#endif
2526 );
2527 /*
2528 * Since we have an established nexus that we can't do anything
2529 * with, we must abort it.
2530 */
2531 do_abort(instance);
2532 return;
2533 }
2534#if 1
2535 /* engage dma setup for the command we just saw */
2536 {
2537 void *d;
2538 unsigned long count;
2539
2540 if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
2541 count = tmp->SCp.buffer->length;
2542 d = SGADDR(tmp->SCp.buffer);
2543 } else {
2544 count = tmp->SCp.this_residual;
2545 d = tmp->SCp.ptr;
2546 }
2547#ifdef REAL_DMA
2548 /* setup this command for dma if not already */
2549 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp))
2550 {
2551 sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
2552 sun3_dma_setup_done = tmp;
2553 }
2554#endif
2555 }
2556#endif
2557
2558 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
2559 /* Accept message by clearing ACK */
2560 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2561
2562#ifdef SUPPORT_TAGS
2563 /* If the phase is still MSGIN, the target wants to send some more
2564 * messages. In case it supports tagged queuing, this is probably a
2565 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2566 */
2567 tag = TAG_NONE;
2568 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
2569 /* Accept previous IDENTIFY message by clearing ACK */
2570 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
2571 len = 2;
2572 data = msg+1;
2573 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2574 msg[1] == SIMPLE_QUEUE_TAG)
2575 tag = msg[2];
2576 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
2577 "reselection\n", HOSTNO, target_mask, lun, tag);
2578 }
2579#endif
2580
2581 hostdata->connected = tmp;
2582 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
2583 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2584}
2585
2586
2587/*
2588 * Function : int NCR5380_abort(struct scsi_cmnd *cmd)
2589 *
2590 * Purpose : abort a command
2591 *
2592 * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
2593 * host byte of the result field to, if zero DID_ABORTED is
2594 * used.
2595 *
2596 * Returns : 0 - success, -1 on failure.
2597 *
2598 * XXX - there is no way to abort the command that is currently
2599 * connected, you have to wait for it to complete. If this is
2600 * a problem, we could implement longjmp() / setjmp(), setjmp()
2601 * called where the loop started in NCR5380_main().
2602 */
2603
2604static int NCR5380_abort(struct scsi_cmnd *cmd)
2605{
2606 struct Scsi_Host *instance = cmd->device->host;
2607 SETUP_HOSTDATA(instance);
2608 struct scsi_cmnd *tmp, **prev;
2609 unsigned long flags;
2610
2611 printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO);
2612 scsi_print_command(cmd);
2613
2614 NCR5380_print_status (instance);
2615
2616 local_irq_save(flags);
2617
2618 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2619 NCR5380_read(BUS_AND_STATUS_REG),
2620 NCR5380_read(STATUS_REG));
2621
2622#if 1
2623/*
2624 * Case 1 : If the command is the currently executing command,
2625 * we'll set the aborted flag and return control so that
2626 * information transfer routine can exit cleanly.
2627 */
2628
2629 if (hostdata->connected == cmd) {
2630
2631 dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
2632/*
2633 * We should perform BSY checking, and make sure we haven't slipped
2634 * into BUS FREE.
2635 */
2636
2637/* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */
2638/*
2639 * Since we can't change phases until we've completed the current
2640 * handshake, we have to source or sink a byte of data if the current
2641 * phase is not MSGOUT.
2642 */
2643
2644/*
2645 * Return control to the executing NCR drive so we can clear the
2646 * aborted flag and get back into our main loop.
2647 */
2648
2649 if (do_abort(instance) == 0) {
2650 hostdata->aborted = 1;
2651 hostdata->connected = NULL;
2652 cmd->result = DID_ABORT << 16;
2653#ifdef SUPPORT_TAGS
2654 cmd_free_tag( cmd );
2655#else
2656 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2657#endif
2658 local_irq_restore(flags);
2659 cmd->scsi_done(cmd);
2660 return SUCCESS;
2661 } else {
2662/* local_irq_restore(flags); */
2663 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2664 return FAILED;
2665 }
2666 }
2667#endif
2668
2669/*
2670 * Case 2 : If the command hasn't been issued yet, we simply remove it
2671 * from the issue queue.
2672 */
2673 for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue),
2674 tmp = (struct scsi_cmnd *) hostdata->issue_queue;
2675 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp))
2676 if (cmd == tmp) {
2677 REMOVE(5, *prev, tmp, NEXT(tmp));
2678 (*prev) = NEXT(tmp);
2679 SET_NEXT(tmp, NULL);
2680 tmp->result = DID_ABORT << 16;
2681 local_irq_restore(flags);
2682 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2683 HOSTNO);
2684 /* Tagged queuing note: no tag to free here, hasn't been assigned
2685 * yet... */
2686 tmp->scsi_done(tmp);
2687 return SUCCESS;
2688 }
2689
2690/*
2691 * Case 3 : If any commands are connected, we're going to fail the abort
2692 * and let the high level SCSI driver retry at a later time or
2693 * issue a reset.
2694 *
2695 * Timeouts, and therefore aborted commands, will be highly unlikely
2696 * and handling them cleanly in this situation would make the common
2697 * case of noresets less efficient, and would pollute our code. So,
2698 * we fail.
2699 */
2700
2701 if (hostdata->connected) {
2702 local_irq_restore(flags);
2703 dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
2704 return FAILED;
2705 }
2706
2707/*
2708 * Case 4: If the command is currently disconnected from the bus, and
2709 * there are no connected commands, we reconnect the I_T_L or
2710 * I_T_L_Q nexus associated with it, go into message out, and send
2711 * an abort message.
2712 *
2713 * This case is especially ugly. In order to reestablish the nexus, we
2714 * need to call NCR5380_select(). The easiest way to implement this
2715 * function was to abort if the bus was busy, and let the interrupt
2716 * handler triggered on the SEL for reselect take care of lost arbitrations
2717 * where necessary, meaning interrupts need to be enabled.
2718 *
2719 * When interrupts are enabled, the queues may change - so we
2720 * can't remove it from the disconnected queue before selecting it
2721 * because that could cause a failure in hashing the nexus if that
2722 * device reselected.
2723 *
2724 * Since the queues may change, we can't use the pointers from when we
2725 * first locate it.
2726 *
2727 * So, we must first locate the command, and if NCR5380_select()
2728 * succeeds, then issue the abort, relocate the command and remove
2729 * it from the disconnected queue.
2730 */
2731
2732 for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp;
2733 tmp = NEXT(tmp))
2734 if (cmd == tmp) {
2735 local_irq_restore(flags);
2736 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2737
2738 if (NCR5380_select (instance, cmd, (int) cmd->tag))
2739 return FAILED;
2740
2741 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
2742
2743 do_abort (instance);
2744
2745 local_irq_save(flags);
2746 for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue),
2747 tmp = (struct scsi_cmnd *) hostdata->disconnected_queue;
2748 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) )
2749 if (cmd == tmp) {
2750 REMOVE(5, *prev, tmp, NEXT(tmp));
2751 *prev = NEXT(tmp);
2752 SET_NEXT(tmp, NULL);
2753 tmp->result = DID_ABORT << 16;
2754 /* We must unlock the tag/LUN immediately here, since the
2755 * target goes to BUS FREE and doesn't send us another
2756 * message (COMMAND_COMPLETE or the like)
2757 */
2758#ifdef SUPPORT_TAGS
2759 cmd_free_tag( tmp );
2760#else
2761 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2762#endif
2763 local_irq_restore(flags);
2764 tmp->scsi_done(tmp);
2765 return SUCCESS;
2766 }
2767 }
2768
2769/*
2770 * Case 5 : If we reached this point, the command was not found in any of
2771 * the queues.
2772 *
2773 * We probably reached this point because of an unlikely race condition
2774 * between the command completing successfully and the abortion code,
2775 * so we won't panic, but we will notify the user in case something really
2776 * broke.
2777 */
2778
2779 local_irq_restore(flags);
2780 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2781
2782 return FAILED;
2783}
2784
2785
2786/*
2787 * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2788 *
2789 * Purpose : reset the SCSI bus.
2790 *
2791 * Returns : SUCCESS or FAILURE
2792 *
2793 */
2794
2795static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2796{
2797 SETUP_HOSTDATA(cmd->device->host);
2798 int i;
2799 unsigned long flags;
2800#if defined(RESET_RUN_DONE)
2801 struct scsi_cmnd *connected, *disconnected_queue;
2802#endif
2803
2804
2805 NCR5380_print_status (cmd->device->host);
2806
2807 /* get in phase */
2808 NCR5380_write( TARGET_COMMAND_REG,
2809 PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
2810 /* assert RST */
2811 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
2812 udelay (40);
2813 /* reset NCR registers */
2814 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
2815 NCR5380_write( MODE_REG, MR_BASE );
2816 NCR5380_write( TARGET_COMMAND_REG, 0 );
2817 NCR5380_write( SELECT_ENABLE_REG, 0 );
2818 /* ++roman: reset interrupt condition! otherwise no interrupts don't get
2819 * through anymore ... */
2820 (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
2821
2822 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2823 * should go.
2824 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2825 * not be released by atari_scsi_reset()!
2826 */
2827
2828#if defined(RESET_RUN_DONE)
2829 /* XXX Should now be done by midlevel code, but it's broken XXX */
2830 /* XXX see below XXX */
2831
2832 /* MSch: old-style reset: actually abort all command processing here */
2833
2834 /* After the reset, there are no more connected or disconnected commands
2835 * and no busy units; to avoid problems with re-inserting the commands
2836 * into the issue_queue (via scsi_done()), the aborted commands are
2837 * remembered in local variables first.
2838 */
2839 local_irq_save(flags);
2840 connected = (struct scsi_cmnd *)hostdata->connected;
2841 hostdata->connected = NULL;
2842 disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue;
2843 hostdata->disconnected_queue = NULL;
2844#ifdef SUPPORT_TAGS
2845 free_all_tags();
2846#endif
2847 for( i = 0; i < 8; ++i )
2848 hostdata->busy[i] = 0;
2849#ifdef REAL_DMA
2850 hostdata->dma_len = 0;
2851#endif
2852 local_irq_restore(flags);
2853
2854 /* In order to tell the mid-level code which commands were aborted,
2855 * set the command status to DID_RESET and call scsi_done() !!!
2856 * This ultimately aborts processing of these commands in the mid-level.
2857 */
2858
2859 if ((cmd = connected)) {
2860 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2861 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2862 cmd->scsi_done( cmd );
2863 }
2864
2865 for (i = 0; (cmd = disconnected_queue); ++i) {
2866 disconnected_queue = NEXT(cmd);
2867 SET_NEXT(cmd, NULL);
2868 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2869 cmd->scsi_done( cmd );
2870 }
2871 if (i > 0)
2872 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2873
2874
2875 /* since all commands have been explicitly terminated, we need to tell
2876 * the midlevel code that the reset was SUCCESSFUL, and there is no
2877 * need to 'wake up' the commands by a request_sense
2878 */
2879 return SUCCESS;
2880#else /* 1 */
2881
2882 /* MSch: new-style reset handling: let the mid-level do what it can */
2883
2884 /* ++guenther: MID-LEVEL IS STILL BROKEN.
2885 * Mid-level is supposed to requeue all commands that were active on the
2886 * various low-level queues. In fact it does this, but that's not enough
2887 * because all these commands are subject to timeout. And if a timeout
2888 * happens for any removed command, *_abort() is called but all queues
2889 * are now empty. Abort then gives up the falcon lock, which is fatal,
2890 * since the mid-level will queue more commands and must have the lock
2891 * (it's all happening inside timer interrupt handler!!).
2892 * Even worse, abort will return NOT_RUNNING for all those commands not
2893 * on any queue, so they won't be retried ...
2894 *
2895 * Conclusion: either scsi.c disables timeout for all resetted commands
2896 * immediately, or we lose! As of linux-2.0.20 it doesn't.
2897 */
2898
2899 /* After the reset, there are no more connected or disconnected commands
2900 * and no busy units; so clear the low-level status here to avoid
2901 * conflicts when the mid-level code tries to wake up the affected
2902 * commands!
2903 */
2904
2905 if (hostdata->issue_queue)
2906 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
2907 if (hostdata->connected)
2908 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2909 if (hostdata->disconnected_queue)
2910 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
2911
2912 local_irq_save(flags);
2913 hostdata->issue_queue = NULL;
2914 hostdata->connected = NULL;
2915 hostdata->disconnected_queue = NULL;
2916#ifdef SUPPORT_TAGS
2917 free_all_tags();
2918#endif
2919 for( i = 0; i < 8; ++i )
2920 hostdata->busy[i] = 0;
2921#ifdef REAL_DMA
2922 hostdata->dma_len = 0;
2923#endif
2924 local_irq_restore(flags);
2925
2926 /* we did no complete reset of all commands, so a wakeup is required */
2927 return SUCCESS;
2928#endif /* 1 */
2929}
2930
2931/* Local Variables: */
2932/* tab-width: 8 */
2933/* End: */
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9707b7494a89..2a906d1d34ba 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -20,89 +20,58 @@
20 * Generic Generic NCR5380 driver 20 * Generic Generic NCR5380 driver
21 * 21 *
22 * Copyright 1995, Russell King 22 * Copyright 1995, Russell King
23 *
24 * ALPHA RELEASE 1.
25 *
26 * For more information, please consult
27 *
28 * NCR 5380 Family
29 * SCSI Protocol Controller
30 * Databook
31 *
32 * NCR Microelectronics
33 * 1635 Aeroplaza Drive
34 * Colorado Springs, CO 80916
35 * 1+ (719) 578-3400
36 * 1+ (800) 334-5454
37 */ 23 */
38 24
39
40/*
41 * This is from mac_scsi.h, but hey, maybe this is useful for Sun3 too! :)
42 *
43 * Options :
44 *
45 * PARITY - enable parity checking. Not supported.
46 *
47 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
48 *
49 * USLEEP - enable support for devices that don't disconnect. Untested.
50 */
51
52#define AUTOSENSE
53
54#include <linux/types.h> 25#include <linux/types.h>
55#include <linux/stddef.h>
56#include <linux/ctype.h>
57#include <linux/delay.h> 26#include <linux/delay.h>
58
59#include <linux/module.h> 27#include <linux/module.h>
60#include <linux/signal.h>
61#include <linux/ioport.h> 28#include <linux/ioport.h>
62#include <linux/init.h> 29#include <linux/init.h>
63#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/platform_device.h>
64 32
65#include <asm/io.h> 33#include <asm/io.h>
66
67#include <asm/sun3ints.h>
68#include <asm/dvma.h> 34#include <asm/dvma.h>
69#include <asm/idprom.h>
70#include <asm/machines.h>
71 35
72/* dma on! */
73#define REAL_DMA
74
75#include "scsi.h"
76#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
77#include "sun3_scsi.h" 37#include "sun3_scsi.h"
78#include "NCR5380.h"
79 38
80extern int sun3_map_test(unsigned long, char *); 39/* Definitions for the core NCR5380 driver. */
81 40
82#define USE_WRAPPER 41#define REAL_DMA
83/*#define RESET_BOOT */ 42/* #define SUPPORT_TAGS */
84#define DRIVER_SETUP 43/* minimum number of bytes to do dma on */
44#define DMA_MIN_SIZE 129
85 45
86/* 46/* #define MAX_TAGS 32 */
87 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
88 */
89#ifdef BUG
90#undef RESET_BOOT
91#undef DRIVER_SETUP
92#endif
93 47
94/* #define SUPPORT_TAGS */ 48#define NCR5380_implementation_fields /* none */
95 49
96#ifdef SUN3_SCSI_VME 50#define NCR5380_read(reg) sun3scsi_read(reg)
97#define ENABLE_IRQ() 51#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
98#else 52
99#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); 53#define NCR5380_queue_command sun3scsi_queue_command
100#endif 54#define NCR5380_bus_reset sun3scsi_bus_reset
55#define NCR5380_abort sun3scsi_abort
56#define NCR5380_show_info sun3scsi_show_info
57#define NCR5380_info sun3scsi_info
101 58
59#define NCR5380_dma_read_setup(instance, data, count) \
60 sun3scsi_dma_setup(data, count, 0)
61#define NCR5380_dma_write_setup(instance, data, count) \
62 sun3scsi_dma_setup(data, count, 1)
63#define NCR5380_dma_residual(instance) \
64 sun3scsi_dma_residual(instance)
65#define NCR5380_dma_xfer_len(instance, cmd, phase) \
66 sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
102 67
103static irqreturn_t scsi_sun3_intr(int irq, void *dummy); 68#define NCR5380_acquire_dma_irq(instance) (1)
104static inline unsigned char sun3scsi_read(int reg); 69#define NCR5380_release_dma_irq(instance)
105static inline void sun3scsi_write(int reg, int value); 70
71#include "NCR5380.h"
72
73
74extern int sun3_map_test(unsigned long, char *);
106 75
107static int setup_can_queue = -1; 76static int setup_can_queue = -1;
108module_param(setup_can_queue, int, 0); 77module_param(setup_can_queue, int, 0);
@@ -117,9 +86,7 @@ module_param(setup_use_tagged_queuing, int, 0);
117static int setup_hostid = -1; 86static int setup_hostid = -1;
118module_param(setup_hostid, int, 0); 87module_param(setup_hostid, int, 0);
119 88
120static struct scsi_cmnd *sun3_dma_setup_done = NULL; 89/* #define RESET_BOOT */
121
122#define RESET_RUN_DONE
123 90
124#define AFTER_RESET_DELAY (HZ/2) 91#define AFTER_RESET_DELAY (HZ/2)
125 92
@@ -129,18 +96,15 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;
129/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ 96/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
130#define SUN3_DVMA_BUFSIZE 0xe000 97#define SUN3_DVMA_BUFSIZE 0xe000
131 98
132/* minimum number of bytes to do dma on */ 99static struct scsi_cmnd *sun3_dma_setup_done;
133#define SUN3_DMA_MINSIZE 128 100static unsigned char *sun3_scsi_regp;
134
135static volatile unsigned char *sun3_scsi_regp;
136static volatile struct sun3_dma_regs *dregs; 101static volatile struct sun3_dma_regs *dregs;
137#ifndef SUN3_SCSI_VME 102static struct sun3_udc_regs *udc_regs;
138static struct sun3_udc_regs *udc_regs = NULL;
139#endif
140static unsigned char *sun3_dma_orig_addr = NULL; 103static unsigned char *sun3_dma_orig_addr = NULL;
141static unsigned long sun3_dma_orig_count = 0; 104static unsigned long sun3_dma_orig_count = 0;
142static int sun3_dma_active = 0; 105static int sun3_dma_active = 0;
143static unsigned long last_residual = 0; 106static unsigned long last_residual = 0;
107static struct Scsi_Host *default_instance;
144 108
145/* 109/*
146 * NCR 5380 register access functions 110 * NCR 5380 register access functions
@@ -148,12 +112,12 @@ static unsigned long last_residual = 0;
148 112
149static inline unsigned char sun3scsi_read(int reg) 113static inline unsigned char sun3scsi_read(int reg)
150{ 114{
151 return( sun3_scsi_regp[reg] ); 115 return in_8(sun3_scsi_regp + reg);
152} 116}
153 117
154static inline void sun3scsi_write(int reg, int value) 118static inline void sun3scsi_write(int reg, int value)
155{ 119{
156 sun3_scsi_regp[reg] = value; 120 out_8(sun3_scsi_regp + reg, value);
157} 121}
158 122
159#ifndef SUN3_SCSI_VME 123#ifndef SUN3_SCSI_VME
@@ -180,213 +144,10 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)
180} 144}
181#endif 145#endif
182 146
183/*
184 * XXX: status debug
185 */
186static struct Scsi_Host *default_instance;
187
188/*
189 * Function : int sun3scsi_detect(struct scsi_host_template * tpnt)
190 *
191 * Purpose : initializes mac NCR5380 driver based on the
192 * command line / compile time port and irq definitions.
193 *
194 * Inputs : tpnt - template for this SCSI adapter.
195 *
196 * Returns : 1 if a host adapter was found, 0 if not.
197 *
198 */
199
200static int __init sun3scsi_detect(struct scsi_host_template *tpnt)
201{
202 unsigned long ioaddr, irq;
203 static int called = 0;
204 struct Scsi_Host *instance;
205#ifdef SUN3_SCSI_VME
206 int i;
207 unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
208 IOBASE_SUN3_VMESCSI + 0x4000,
209 0 };
210 unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
211 SUN3_VEC_VMESCSI1,
212 0 };
213#endif
214
215 /* check that this machine has an onboard 5380 */
216 switch(idprom->id_machtype) {
217#ifdef SUN3_SCSI_VME
218 case SM_SUN3|SM_3_160:
219 case SM_SUN3|SM_3_260:
220 break;
221#else
222 case SM_SUN3|SM_3_50:
223 case SM_SUN3|SM_3_60:
224 break;
225#endif
226
227 default:
228 return 0;
229 }
230
231 if(called)
232 return 0;
233
234#ifdef SUN3_SCSI_VME
235 tpnt->proc_name = "Sun3 5380 VME SCSI";
236#else
237 tpnt->proc_name = "Sun3 5380 SCSI";
238#endif
239
240 /* setup variables */
241 tpnt->can_queue =
242 (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
243 tpnt->cmd_per_lun =
244 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
245 tpnt->sg_tablesize =
246 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
247
248 if (setup_hostid >= 0)
249 tpnt->this_id = setup_hostid;
250 else {
251 /* use 7 as default */
252 tpnt->this_id = 7;
253 }
254
255#ifdef SUN3_SCSI_VME
256 ioaddr = 0;
257 for (i = 0; addrs[i] != 0; i++) {
258 unsigned char x;
259
260 ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
261 SUN3_PAGE_TYPE_VME16);
262 irq = vecs[i];
263 sun3_scsi_regp = (unsigned char *)ioaddr;
264
265 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
266
267 if (sun3_map_test((unsigned long)dregs, &x)) {
268 unsigned short oldcsr;
269
270 oldcsr = dregs->csr;
271 dregs->csr = 0;
272 udelay(SUN3_DMA_DELAY);
273 if (dregs->csr == 0x1400)
274 break;
275
276 dregs->csr = oldcsr;
277 }
278
279 iounmap((void *)ioaddr);
280 ioaddr = 0;
281 }
282
283 if (!ioaddr)
284 return 0;
285#else
286 irq = IRQ_SUN3_SCSI;
287 ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);
288 sun3_scsi_regp = (unsigned char *)ioaddr;
289
290 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
291
292 if((udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs)))
293 == NULL) {
294 printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
295 return 0;
296 }
297#endif
298#ifdef SUPPORT_TAGS
299 if (setup_use_tagged_queuing < 0)
300 setup_use_tagged_queuing = USE_TAGGED_QUEUING;
301#endif
302
303 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
304 if(instance == NULL)
305 return 0;
306
307 default_instance = instance;
308
309 instance->io_port = (unsigned long) ioaddr;
310 instance->irq = irq;
311
312 NCR5380_init(instance, 0);
313
314 instance->n_io_port = 32;
315
316 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
317
318 if (request_irq(instance->irq, scsi_sun3_intr,
319 0, "Sun3SCSI-5380", instance)) {
320#ifndef REAL_DMA
321 printk("scsi%d: IRQ%d not free, interrupts disabled\n",
322 instance->host_no, instance->irq);
323 instance->irq = SCSI_IRQ_NONE;
324#else
325 printk("scsi%d: IRQ%d not free, bailing out\n",
326 instance->host_no, instance->irq);
327 return 0;
328#endif
329 }
330
331 pr_info("scsi%d: %s at port %lX irq", instance->host_no,
332 tpnt->proc_name, instance->io_port);
333 if (instance->irq == SCSI_IRQ_NONE)
334 printk ("s disabled");
335 else
336 printk (" %d", instance->irq);
337 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
338 instance->can_queue, instance->cmd_per_lun,
339 SUN3SCSI_PUBLIC_RELEASE);
340 printk("\nscsi%d:", instance->host_no);
341 NCR5380_print_options(instance);
342 printk("\n");
343
344 dregs->csr = 0;
345 udelay(SUN3_DMA_DELAY);
346 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
347 udelay(SUN3_DMA_DELAY);
348 dregs->fifo_count = 0;
349#ifdef SUN3_SCSI_VME
350 dregs->fifo_count_hi = 0;
351 dregs->dma_addr_hi = 0;
352 dregs->dma_addr_lo = 0;
353 dregs->dma_count_hi = 0;
354 dregs->dma_count_lo = 0;
355
356 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
357#endif
358
359 called = 1;
360
361#ifdef RESET_BOOT
362 sun3_scsi_reset_boot(instance);
363#endif
364
365 return 1;
366}
367
368int sun3scsi_release (struct Scsi_Host *shpnt)
369{
370 if (shpnt->irq != SCSI_IRQ_NONE)
371 free_irq(shpnt->irq, shpnt);
372
373 iounmap((void *)sun3_scsi_regp);
374
375 NCR5380_exit(shpnt);
376 return 0;
377}
378
379#ifdef RESET_BOOT 147#ifdef RESET_BOOT
380/*
381 * Our 'bus reset on boot' function
382 */
383
384static void sun3_scsi_reset_boot(struct Scsi_Host *instance) 148static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
385{ 149{
386 unsigned long end; 150 unsigned long end;
387
388 NCR5380_local_declare();
389 NCR5380_setup(instance);
390 151
391 /* 152 /*
392 * Do a SCSI reset to clean up the bus during initialization. No 153 * Do a SCSI reset to clean up the bus during initialization. No
@@ -422,11 +183,6 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
422} 183}
423#endif 184#endif
424 185
425static const char *sun3scsi_info(struct Scsi_Host *spnt)
426{
427 return "";
428}
429
430// safe bits for the CSR 186// safe bits for the CSR
431#define CSR_GOOD 0x060f 187#define CSR_GOOD 0x060f
432 188
@@ -468,7 +224,6 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
468void sun3_sun3_debug (void) 224void sun3_sun3_debug (void)
469{ 225{
470 unsigned long flags; 226 unsigned long flags;
471 NCR5380_local_declare();
472 227
473 if (default_instance) { 228 if (default_instance) {
474 local_irq_save(flags); 229 local_irq_save(flags);
@@ -732,25 +487,200 @@ static int sun3scsi_dma_finish(int write_flag)
732 487
733} 488}
734 489
735#include "sun3_NCR5380.c" 490#include "atari_NCR5380.c"
736 491
737static struct scsi_host_template driver_template = { 492#ifdef SUN3_SCSI_VME
493#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
494#define DRV_MODULE_NAME "sun3_scsi_vme"
495#else
496#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
497#define DRV_MODULE_NAME "sun3_scsi"
498#endif
499
500#define PFX DRV_MODULE_NAME ": "
501
502static struct scsi_host_template sun3_scsi_template = {
503 .module = THIS_MODULE,
504 .proc_name = DRV_MODULE_NAME,
738 .show_info = sun3scsi_show_info, 505 .show_info = sun3scsi_show_info,
739 .name = SUN3_SCSI_NAME, 506 .name = SUN3_SCSI_NAME,
740 .detect = sun3scsi_detect,
741 .release = sun3scsi_release,
742 .info = sun3scsi_info, 507 .info = sun3scsi_info,
743 .queuecommand = sun3scsi_queue_command, 508 .queuecommand = sun3scsi_queue_command,
744 .eh_abort_handler = sun3scsi_abort, 509 .eh_abort_handler = sun3scsi_abort,
745 .eh_bus_reset_handler = sun3scsi_bus_reset, 510 .eh_bus_reset_handler = sun3scsi_bus_reset,
746 .can_queue = CAN_QUEUE, 511 .can_queue = 16,
747 .this_id = 7, 512 .this_id = 7,
748 .sg_tablesize = SG_TABLESIZE, 513 .sg_tablesize = SG_NONE,
749 .cmd_per_lun = CMD_PER_LUN, 514 .cmd_per_lun = 2,
750 .use_clustering = DISABLE_CLUSTERING 515 .use_clustering = DISABLE_CLUSTERING
751}; 516};
752 517
518static int __init sun3_scsi_probe(struct platform_device *pdev)
519{
520 struct Scsi_Host *instance;
521 int error;
522 struct resource *irq, *mem;
523 unsigned char *ioaddr;
524 int host_flags = 0;
525#ifdef SUN3_SCSI_VME
526 int i;
527#endif
528
529 if (setup_can_queue > 0)
530 sun3_scsi_template.can_queue = setup_can_queue;
531 if (setup_cmd_per_lun > 0)
532 sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
533 if (setup_sg_tablesize >= 0)
534 sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
535 if (setup_hostid >= 0)
536 sun3_scsi_template.this_id = setup_hostid & 7;
537
538#ifdef SUN3_SCSI_VME
539 ioaddr = NULL;
540 for (i = 0; i < 2; i++) {
541 unsigned char x;
542
543 irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
544 mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
545 if (!irq || !mem)
546 break;
547
548 ioaddr = sun3_ioremap(mem->start, resource_size(mem),
549 SUN3_PAGE_TYPE_VME16);
550 dregs = (struct sun3_dma_regs *)(ioaddr + 8);
551
552 if (sun3_map_test((unsigned long)dregs, &x)) {
553 unsigned short oldcsr;
554
555 oldcsr = dregs->csr;
556 dregs->csr = 0;
557 udelay(SUN3_DMA_DELAY);
558 if (dregs->csr == 0x1400)
559 break;
560
561 dregs->csr = oldcsr;
562 }
563
564 iounmap(ioaddr);
565 ioaddr = NULL;
566 }
567 if (!ioaddr)
568 return -ENODEV;
569#else
570 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
571 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
572 if (!irq || !mem)
573 return -ENODEV;
574
575 ioaddr = ioremap(mem->start, resource_size(mem));
576 dregs = (struct sun3_dma_regs *)(ioaddr + 8);
577
578 udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs));
579 if (!udc_regs) {
580 pr_err(PFX "couldn't allocate DVMA memory!\n");
581 iounmap(ioaddr);
582 return -ENOMEM;
583 }
584#endif
585
586 sun3_scsi_regp = ioaddr;
587
588 instance = scsi_host_alloc(&sun3_scsi_template,
589 sizeof(struct NCR5380_hostdata));
590 if (!instance) {
591 error = -ENOMEM;
592 goto fail_alloc;
593 }
594 default_instance = instance;
595
596 instance->io_port = (unsigned long)ioaddr;
597 instance->irq = irq->start;
598
599#ifdef SUPPORT_TAGS
600 host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
601#endif
602
603 NCR5380_init(instance, host_flags);
604
605 error = request_irq(instance->irq, scsi_sun3_intr, 0,
606 "NCR5380", instance);
607 if (error) {
608#ifdef REAL_DMA
609 pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n",
610 instance->host_no, instance->irq);
611 goto fail_irq;
612#else
613 pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n",
614 instance->host_no, instance->irq);
615 instance->irq = NO_IRQ;
616#endif
617 }
618
619 dregs->csr = 0;
620 udelay(SUN3_DMA_DELAY);
621 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
622 udelay(SUN3_DMA_DELAY);
623 dregs->fifo_count = 0;
624#ifdef SUN3_SCSI_VME
625 dregs->fifo_count_hi = 0;
626 dregs->dma_addr_hi = 0;
627 dregs->dma_addr_lo = 0;
628 dregs->dma_count_hi = 0;
629 dregs->dma_count_lo = 0;
630
631 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
632#endif
633
634#ifdef RESET_BOOT
635 sun3_scsi_reset_boot(instance);
636#endif
637
638 error = scsi_add_host(instance, NULL);
639 if (error)
640 goto fail_host;
641
642 platform_set_drvdata(pdev, instance);
643
644 scsi_scan_host(instance);
645 return 0;
646
647fail_host:
648 if (instance->irq != NO_IRQ)
649 free_irq(instance->irq, instance);
650fail_irq:
651 NCR5380_exit(instance);
652 scsi_host_put(instance);
653fail_alloc:
654 if (udc_regs)
655 dvma_free(udc_regs);
656 iounmap(sun3_scsi_regp);
657 return error;
658}
659
660static int __exit sun3_scsi_remove(struct platform_device *pdev)
661{
662 struct Scsi_Host *instance = platform_get_drvdata(pdev);
663
664 scsi_remove_host(instance);
665 if (instance->irq != NO_IRQ)
666 free_irq(instance->irq, instance);
667 NCR5380_exit(instance);
668 scsi_host_put(instance);
669 if (udc_regs)
670 dvma_free(udc_regs);
671 iounmap(sun3_scsi_regp);
672 return 0;
673}
674
675static struct platform_driver sun3_scsi_driver = {
676 .remove = __exit_p(sun3_scsi_remove),
677 .driver = {
678 .name = DRV_MODULE_NAME,
679 .owner = THIS_MODULE,
680 },
681};
753 682
754#include "scsi_module.c" 683module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe);
755 684
685MODULE_ALIAS("platform:" DRV_MODULE_NAME);
756MODULE_LICENSE("GPL"); 686MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index e96a37cf06ac..d22745fae328 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -13,95 +13,11 @@
13 * (Unix and Linux consulting and custom programming) 13 * (Unix and Linux consulting and custom programming)
14 * drew@colorado.edu 14 * drew@colorado.edu
15 * +1 (303) 440-4894 15 * +1 (303) 440-4894
16 *
17 * ALPHA RELEASE 1.
18 *
19 * For more information, please consult
20 *
21 * NCR 5380 Family
22 * SCSI Protocol Controller
23 * Databook
24 *
25 * NCR Microelectronics
26 * 1635 Aeroplaza Drive
27 * Colorado Springs, CO 80916
28 * 1+ (719) 578-3400
29 * 1+ (800) 334-5454
30 */ 16 */
31 17
32#ifndef SUN3_SCSI_H 18#ifndef SUN3_SCSI_H
33#define SUN3_SCSI_H 19#define SUN3_SCSI_H
34 20
35#define SUN3SCSI_PUBLIC_RELEASE 1
36
37/*
38 * Int: level 2 autovector
39 * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0>
40 */
41#define IRQ_SUN3_SCSI 2
42#define IOBASE_SUN3_SCSI 0x00140000
43
44#define IOBASE_SUN3_VMESCSI 0xff200000
45
46static int sun3scsi_abort(struct scsi_cmnd *);
47static int sun3scsi_detect (struct scsi_host_template *);
48static const char *sun3scsi_info (struct Scsi_Host *);
49static int sun3scsi_bus_reset(struct scsi_cmnd *);
50static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
51static int sun3scsi_release (struct Scsi_Host *);
52
53#ifndef CMD_PER_LUN
54#define CMD_PER_LUN 2
55#endif
56
57#ifndef CAN_QUEUE
58#define CAN_QUEUE 16
59#endif
60
61#ifndef SG_TABLESIZE
62#define SG_TABLESIZE SG_NONE
63#endif
64
65#ifndef MAX_TAGS
66#define MAX_TAGS 32
67#endif
68
69#ifndef USE_TAGGED_QUEUING
70#define USE_TAGGED_QUEUING 1
71#endif
72
73#include <scsi/scsicam.h>
74
75#ifdef SUN3_SCSI_VME
76#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
77#else
78#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
79#endif
80
81#define NCR5380_implementation_fields \
82 int port, ctrl
83
84#define NCR5380_local_declare() \
85 struct Scsi_Host *_instance
86
87#define NCR5380_setup(instance) \
88 _instance = instance
89
90#define NCR5380_read(reg) sun3scsi_read(reg)
91#define NCR5380_write(reg, value) sun3scsi_write(reg, value)
92
93#define NCR5380_intr sun3scsi_intr
94#define NCR5380_queue_command sun3scsi_queue_command
95#define NCR5380_bus_reset sun3scsi_bus_reset
96#define NCR5380_abort sun3scsi_abort
97#define NCR5380_show_info sun3scsi_show_info
98#define NCR5380_dma_xfer_len(i, cmd, phase) \
99 sun3scsi_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
100
101#define NCR5380_dma_write_setup(instance, data, count) sun3scsi_dma_setup(data, count, 1)
102#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)
103#define NCR5380_dma_residual sun3scsi_dma_residual
104
105/* additional registers - mainly DMA control regs */ 21/* additional registers - mainly DMA control regs */
106/* these start at regbase + 8 -- directly after the NCR regs */ 22/* these start at regbase + 8 -- directly after the NCR regs */
107struct sun3_dma_regs { 23struct sun3_dma_regs {
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index e59e6f96b725..5d00e514ff28 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -820,9 +820,7 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
820 if (reqtags > SYM_CONF_MAX_TAG) 820 if (reqtags > SYM_CONF_MAX_TAG)
821 reqtags = SYM_CONF_MAX_TAG; 821 reqtags = SYM_CONF_MAX_TAG;
822 depth_to_use = reqtags ? reqtags : 1; 822 depth_to_use = reqtags ? reqtags : 1;
823 scsi_adjust_queue_depth(sdev, 823 scsi_change_queue_depth(sdev, depth_to_use);
824 sdev->tagged_supported ? MSG_SIMPLE_TAG : 0,
825 depth_to_use);
826 lp->s.scdev_depth = depth_to_use; 824 lp->s.scdev_depth = depth_to_use;
827 sym_tune_dev_queuing(tp, sdev->lun, reqtags); 825 sym_tune_dev_queuing(tp, sdev->lun, reqtags);
828 826
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 8cc80931df14..87828acbf7c6 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -1,4 +1,3 @@
1#define AUTOSENSE
2#define PSEUDO_DMA 1#define PSEUDO_DMA
3 2
4/* 3/*
@@ -12,8 +11,6 @@
12 * drew@colorado.edu 11 * drew@colorado.edu
13 * +1 (303) 440-4894 12 * +1 (303) 440-4894
14 * 13 *
15 * DISTRIBUTION RELEASE 3.
16 *
17 * For more information, please consult 14 * For more information, please consult
18 * 15 *
19 * Trantor Systems, Ltd. 16 * Trantor Systems, Ltd.
@@ -24,40 +21,9 @@
24 * 5415 Randall Place 21 * 5415 Randall Place
25 * Fremont, CA 94538 22 * Fremont, CA 94538
26 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 23 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
27 *
28 * and
29 *
30 * NCR 5380 Family
31 * SCSI Protocol Controller
32 * Databook
33 *
34 * NCR Microelectronics
35 * 1635 Aeroplaza Drive
36 * Colorado Springs, CO 80916
37 * 1+ (719) 578-3400
38 * 1+ (800) 334-5454
39 */ 24 */
40 25
41/* 26/*
42 * Options :
43 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
44 * for commands that return with a CHECK CONDITION status.
45 *
46 * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
47 * increase compared to polled I/O.
48 *
49 * PARITY - enable parity checking. Not supported.
50 *
51 * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
52 *
53 *
54 * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
55 * only really want to use this if you're having a problem with
56 * dropped characters during high speed communications, and even
57 * then, you're going to be better off twiddling with transfersize.
58 *
59 * USLEEP - enable support for devices that don't disconnect. Untested.
60 *
61 * The card is detected and initialized in one of several ways : 27 * The card is detected and initialized in one of several ways :
62 * 1. Autoprobe (default) - since the board is memory mapped, 28 * 1. Autoprobe (default) - since the board is memory mapped,
63 * a BIOS signature is scanned for to locate the registers. 29 * a BIOS signature is scanned for to locate the registers.
@@ -111,7 +77,6 @@
111#include <linux/module.h> 77#include <linux/module.h>
112#include <linux/delay.h> 78#include <linux/delay.h>
113 79
114#include "scsi.h"
115#include <scsi/scsi_host.h> 80#include <scsi/scsi_host.h>
116#include "t128.h" 81#include "t128.h"
117#define AUTOPROBE_IRQ 82#define AUTOPROBE_IRQ
@@ -148,6 +113,7 @@ static struct signature {
148 113
149#define NO_SIGNATURES ARRAY_SIZE(signatures) 114#define NO_SIGNATURES ARRAY_SIZE(signatures)
150 115
116#ifndef MODULE
151/* 117/*
152 * Function : t128_setup(char *str, int *ints) 118 * Function : t128_setup(char *str, int *ints)
153 * 119 *
@@ -158,9 +124,13 @@ static struct signature {
158 * 124 *
159 */ 125 */
160 126
161void __init t128_setup(char *str, int *ints){ 127static int __init t128_setup(char *str)
128{
162 static int commandline_current = 0; 129 static int commandline_current = 0;
163 int i; 130 int i;
131 int ints[10];
132
133 get_options(str, ARRAY_SIZE(ints), ints);
164 if (ints[0] != 2) 134 if (ints[0] != 2)
165 printk("t128_setup : usage t128=address,irq\n"); 135 printk("t128_setup : usage t128=address,irq\n");
166 else 136 else
@@ -174,8 +144,12 @@ void __init t128_setup(char *str, int *ints){
174 } 144 }
175 ++commandline_current; 145 ++commandline_current;
176 } 146 }
147 return 1;
177} 148}
178 149
150__setup("t128=", t128_setup);
151#endif
152
179/* 153/*
180 * Function : int t128_detect(struct scsi_host_template * tpnt) 154 * Function : int t128_detect(struct scsi_host_template * tpnt)
181 * 155 *
@@ -189,17 +163,14 @@ void __init t128_setup(char *str, int *ints){
189 * 163 *
190 */ 164 */
191 165
192int __init t128_detect(struct scsi_host_template * tpnt){ 166static int __init t128_detect(struct scsi_host_template *tpnt)
167{
193 static int current_override = 0, current_base = 0; 168 static int current_override = 0, current_base = 0;
194 struct Scsi_Host *instance; 169 struct Scsi_Host *instance;
195 unsigned long base; 170 unsigned long base;
196 void __iomem *p; 171 void __iomem *p;
197 int sig, count; 172 int sig, count;
198 173
199 tpnt->proc_name = "t128";
200 tpnt->show_info = t128_show_info;
201 tpnt->write_info = t128_write_info;
202
203 for (count = 0; current_override < NO_OVERRIDES; ++current_override) { 174 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
204 base = 0; 175 base = 0;
205 p = NULL; 176 p = NULL;
@@ -254,15 +225,19 @@ found:
254 else 225 else
255 instance->irq = NCR5380_probe_irq(instance, T128_IRQS); 226 instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
256 227
257 if (instance->irq != SCSI_IRQ_NONE) 228 /* Compatibility with documented NCR5380 kernel parameters */
229 if (instance->irq == 255)
230 instance->irq = NO_IRQ;
231
232 if (instance->irq != NO_IRQ)
258 if (request_irq(instance->irq, t128_intr, 0, "t128", 233 if (request_irq(instance->irq, t128_intr, 0, "t128",
259 instance)) { 234 instance)) {
260 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 235 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
261 instance->host_no, instance->irq); 236 instance->host_no, instance->irq);
262 instance->irq = SCSI_IRQ_NONE; 237 instance->irq = NO_IRQ;
263 } 238 }
264 239
265 if (instance->irq == SCSI_IRQ_NONE) { 240 if (instance->irq == NO_IRQ) {
266 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 241 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
267 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 242 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
268 } 243 }
@@ -271,16 +246,6 @@ found:
271 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); 246 printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
272#endif 247#endif
273 248
274 printk("scsi%d : at 0x%08lx", instance->host_no, instance->base);
275 if (instance->irq == SCSI_IRQ_NONE)
276 printk (" interrupts disabled");
277 else
278 printk (" irq %d", instance->irq);
279 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
280 CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
281 NCR5380_print_options(instance);
282 printk("\n");
283
284 ++current_override; 249 ++current_override;
285 ++count; 250 ++count;
286 } 251 }
@@ -291,7 +256,7 @@ static int t128_release(struct Scsi_Host *shost)
291{ 256{
292 NCR5380_local_declare(); 257 NCR5380_local_declare();
293 NCR5380_setup(shost); 258 NCR5380_setup(shost);
294 if (shost->irq) 259 if (shost->irq != NO_IRQ)
295 free_irq(shost->irq, shost); 260 free_irq(shost->irq, shost);
296 NCR5380_exit(shost); 261 NCR5380_exit(shost);
297 if (shost->io_port && shost->n_io_port) 262 if (shost->io_port && shost->n_io_port)
@@ -321,8 +286,8 @@ static int t128_release(struct Scsi_Host *shost)
321 * and matching the H_C_S coordinates to what DOS uses. 286 * and matching the H_C_S coordinates to what DOS uses.
322 */ 287 */
323 288
324int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, 289static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
325 sector_t capacity, int * ip) 290 sector_t capacity, int *ip)
326{ 291{
327 ip[0] = 64; 292 ip[0] = 64;
328 ip[1] = 32; 293 ip[1] = 32;
@@ -430,6 +395,10 @@ static struct scsi_host_template driver_template = {
430 .name = "Trantor T128/T128F/T228", 395 .name = "Trantor T128/T128F/T228",
431 .detect = t128_detect, 396 .detect = t128_detect,
432 .release = t128_release, 397 .release = t128_release,
398 .proc_name = "t128",
399 .show_info = t128_show_info,
400 .write_info = t128_write_info,
401 .info = t128_info,
433 .queuecommand = t128_queue_command, 402 .queuecommand = t128_queue_command,
434 .eh_abort_handler = t128_abort, 403 .eh_abort_handler = t128_abort,
435 .eh_bus_reset_handler = t128_bus_reset, 404 .eh_bus_reset_handler = t128_bus_reset,
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index fd68cecc62af..2c7371454dfd 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -8,8 +8,6 @@
8 * drew@colorado.edu 8 * drew@colorado.edu
9 * +1 (303) 440-4894 9 * +1 (303) 440-4894
10 * 10 *
11 * DISTRIBUTION RELEASE 3.
12 *
13 * For more information, please consult 11 * For more information, please consult
14 * 12 *
15 * Trantor Systems, Ltd. 13 * Trantor Systems, Ltd.
@@ -20,25 +18,11 @@
20 * 5415 Randall Place 18 * 5415 Randall Place
21 * Fremont, CA 94538 19 * Fremont, CA 94538
22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 20 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
23 *
24 * and
25 *
26 * NCR 5380 Family
27 * SCSI Protocol Controller
28 * Databook
29 *
30 * NCR Microelectronics
31 * 1635 Aeroplaza Drive
32 * Colorado Springs, CO 80916
33 * 1+ (719) 578-3400
34 * 1+ (800) 334-5454
35 */ 21 */
36 22
37#ifndef T128_H 23#ifndef T128_H
38#define T128_H 24#define T128_H
39 25
40#define T128_PUBLIC_RELEASE 3
41
42#define TDEBUG 0 26#define TDEBUG 0
43#define TDEBUG_INIT 0x1 27#define TDEBUG_INIT 0x1
44#define TDEBUG_TRANSFER 0x2 28#define TDEBUG_TRANSFER 0x2
@@ -88,12 +72,6 @@
88#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ 72#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
89 73
90#ifndef ASM 74#ifndef ASM
91static int t128_abort(struct scsi_cmnd *);
92static int t128_biosparam(struct scsi_device *, struct block_device *,
93 sector_t, int*);
94static int t128_detect(struct scsi_host_template *);
95static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
96static int t128_bus_reset(struct scsi_cmnd *);
97 75
98#ifndef CMD_PER_LUN 76#ifndef CMD_PER_LUN
99#define CMD_PER_LUN 2 77#define CMD_PER_LUN 2
@@ -134,6 +112,7 @@ static int t128_bus_reset(struct scsi_cmnd *);
134#define NCR5380_queue_command t128_queue_command 112#define NCR5380_queue_command t128_queue_command
135#define NCR5380_abort t128_abort 113#define NCR5380_abort t128_abort
136#define NCR5380_bus_reset t128_bus_reset 114#define NCR5380_bus_reset t128_bus_reset
115#define NCR5380_info t128_info
137#define NCR5380_show_info t128_show_info 116#define NCR5380_show_info t128_show_info
138#define NCR5380_write_info t128_write_info 117#define NCR5380_write_info t128_write_info
139 118
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
deleted file mode 100644
index 764575726c85..000000000000
--- a/drivers/scsi/tmscsim.c
+++ /dev/null
@@ -1,2620 +0,0 @@
1/************************************************************************
2 * FILE NAME : TMSCSIM.C *
3 * BY : C.L. Huang, ching@tekram.com.tw *
4 * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
5 * Bus Master Host Adapter *
6 * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. *
7 ************************************************************************
8 * (C) Copyright: put under GNU GPL in 10/96 *
9 * (see Documentation/scsi/tmscsim.txt) *
10 ************************************************************************
11 * $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ *
12 * Enhancements and bugfixes by *
13 * Kurt Garloff <kurt@garloff.de> <garloff@suse.de> *
14 ************************************************************************
15 * HISTORY: *
16 * *
17 * REV# DATE NAME DESCRIPTION *
18 * 1.00 96/04/24 CLH First release *
19 * 1.01 96/06/12 CLH Fixed bug of Media Change for Removable *
20 * Device, scan all LUN. Support Pre2.0.10 *
21 * 1.02 96/06/18 CLH Fixed bug of Command timeout ... *
22 * 1.03 96/09/25 KG Added tmscsim_proc_info() *
23 * 1.04 96/10/11 CLH Updating for support KV 2.0.x *
24 * 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)*
25 * 1.06 96/10/25 KG Fixed module support *
26 * 1.07 96/11/09 KG Fixed tmscsim_proc_info() *
27 * 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() *
28 * 1.09 96/11/30 KG Added register the allocated IO space *
29 * 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset *
30 * pending interrupt in DC390_detect() *
31 * 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater *
32 * than 1GB *
33 * 1.12 98/02/15 MJ Rewritten PCI probing *
34 * 1.13 98/04/08 KG Support for non DC390, __initfunc decls,*
35 * changed max devs from 10 to 16 *
36 * 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev *
37 * for LUNs if LUN_SCAN (BIOS) not set *
38 * runtime config using /proc interface *
39 * 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks *
40 * 1.14c 98/05/07 KG 2.0.x compatibility *
41 * 1.20a 98/05/07 KG changed names of funcs to be consistent *
42 * DC390_ (entry points), dc390_ (internal)*
43 * reworked locking *
44 * 1.20b 98/05/12 KG bugs: version, kfree, _ctmp *
45 * debug output *
46 * 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults *
47 * 1.20d 98/05/14 KG bugs: list linkage, clear flag after *
48 * reset on startup, code cleanup *
49 * 1.20e 98/05/15 KG spinlock comments, name space cleanup *
50 * pLastDCB now part of ACB structure *
51 * added stats, timeout for 2.1, TagQ bug *
52 * RESET and INQUIRY interface commands *
53 * 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs *
54 * for missing LUNs, pending int *
55 * 1.20g 98/05/19 KG Clean up: Avoid short *
56 * 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... *
57 * 1.20i 98/05/21 KG Aiiie: Bug with TagQMask *
58 * 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB *
59 * == 0 in remove_dev and DoingSRB_Done *
60 * 1.20k 98/05/25 KG DMA_INT (experimental) *
61 * 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; *
62 * 1.20m 98/06/10 KG glitch configurable; made some global *
63 * vars part of ACB; use DC390_readX *
64 * 1.20n 98/06/11 KG startup params *
65 * 1.20o 98/06/15 KG added TagMaxNum to boot/module params *
66 * Device Nr -> Idx, TagMaxNum power of 2 *
67 * 1.20p 98/06/17 KG Docu updates. Reset depends on settings *
68 * pci_set_master added; 2.0.xx: pcibios_* *
69 * used instead of MechNum things ... *
70 * 1.20q 98/06/23 KG Changed defaults. Added debug code for *
71 * removable media and fixed it. TagMaxNum *
72 * fixed for DC390. Locking: ACB, DRV for *
73 * better IRQ sharing. Spelling: Queueing *
74 * Parsing and glitch_cfg changes. Display *
75 * real SyncSpeed value. Made DisConn *
76 * functional (!) *
77 * 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set *
78 * BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module *
79 * param -1 fixed. *
80 * 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,*
81 * phys_to_bus instead of phys_to_virt, *
82 * fixed sel. process, fixed locking, *
83 * added MODULE_XXX infos, changed IRQ *
84 * request flags, disable DMA_INT *
85 * 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;*
86 * initfunc -> __init; better abort; *
87 * Timeout for XFER_DONE & BLAST_COMPLETE; *
88 * Allow up to 33 commands being processed *
89 * 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing *
90 * all flags. Clear within while() loops *
91 * in DataIn_0/Out_0. Null ptr in dumpinfo *
92 * for pSRB==0. Better locking during init.*
93 * bios_param() now respects part. table. *
94 * 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. *
95 * Disallow illegal idx in INQUIRY/REMOVE *
96 * 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, *
97 * Write Erase DMA (1.20t) caused problems *
98 * 2.0d 98/12/25 KG Christmas release ;-) Message handling *
99 * completely reworked. Handle target ini- *
100 * tiated SDTR correctly. *
101 * 2.0d1 99/01/25 KG Try to handle RESTORE_PTR *
102 * 2.0d2 99/02/08 KG Check for failure of kmalloc, correct *
103 * inclusion of scsicam.h, DelayReset *
104 * 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, *
105 * detect Target mode and warn. *
106 * pcmd->result handling cleaned up. *
107 * 2.0d4 99/06/01 KG Cleaned selection process. Found bug *
108 * which prevented more than 16 tags. Now: *
109 * 24. SDTR cleanup. Cleaner multi-LUN *
110 * handling. Don't modify ControlRegs/FIFO *
111 * when connected. *
112 * 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. *
113 * 2.0d6 99/06/02 KG Added ADD special command to allow cfg. *
114 * before detection. Reset SYNC_NEGO_DONE *
115 * after a bus reset. *
116 * 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands *
117 * 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. *
118 * Fixed Oops in _release(). *
119 * 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... *
120 * Allow arb. no. of Tagged Cmnds. Max 32 *
121 * 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing *
122 * clearified (renamed ..) TagMask handling*
123 * cleaned. *
124 * 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 *
125 * 2.0d1299/07/04 KG Changed order of processing in IRQ *
126 * 2.0d1399/07/05 KG Don't update DCB fields if removed *
127 * 2.0d1499/07/05 KG remove_dev: Move kfree() to the end *
128 * 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where *
129 * appropriate *
130 * 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg *
131 * 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when *
132 * there are queued cmnds *
133 * 2.0d1899/07/18 KG Selection timeout: Don't requeue *
134 * 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued *
135 * 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done *
136 * 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for*
137 * RETRY, SRBdone does DID_ABORT for the *
138 * cmd passed by DC390_reset() *
139 * 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 *
140 * 2.0d2399/08/25 KG Removed some debugging code. dev_id *
141 * now is set to pACB. Use u8,u16,u32. *
142 * 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call *
143 * done () w/ DID_BAD_TARGET in case of *
144 * missing DCB. We are old EH!! *
145 * 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz *
146 * set unique_id. Disable RETRY message. *
147 * 2.0d2600/01/29 KG Go to new EH. *
148 * 2.0d2700/01/31 KG ... but maintain 2.0 compat. *
149 * and fix DCB freeing *
150 * 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd*
151 * Waiting_Timer for failed StartSCSI *
152 * New EH: Don't return cmnds to ML on RST *
153 * Use old EH (don't have new EH fns yet) *
154 * Reset: Unlock, but refuse to queue *
155 * 2.3 __setup function *
156 * 2.0e 00/05/22 KG Return residual for 2.3 *
157 * 2.0e1 00/05/25 KG Compile fixes for 2.3.99 *
158 * 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() *
159 * 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego *
160 * before INQUIRY has reported ability. *
161 * Recognise INQUIRY as scanning command. *
162 * 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel *
163 * 2.0e5 00/11/17 KG Store Inq.flags in DCB *
164 * 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) *
165 * 2.4 PCI device table (Thx to A.Richter) *
166 * 2.0e7 00/11/28 KG Allow overriding of BIOS settings *
167 * 2.0f 00/12/20 KG Handle failed INQUIRYs during scan *
168 * 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to *
169 * use the current PCI-mapping API, update *
170 * command-queuing. *
171 * 2.1b 04/04/13 GL Fix for 64-bit platforms *
172 * 2.1b1 04/01/31 GL (applied 05.04) Remove internal *
173 * command-queuing. *
174 * 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling *
175 * 2.1c 04/05/23 GL Update to use the new pci_driver API, *
176 * some scsi EH updates, more cleanup. *
177 * 2.1d 04/05/27 GL Moved setting of scan_devices to *
178 * slave_alloc/_configure/_destroy, as *
179 * suggested by CH. *
180 ***********************************************************************/
181
182/* DEBUG options */
183//#define DC390_DEBUG0
184//#define DC390_DEBUG1
185//#define DC390_DCBDEBUG
186//#define DC390_PARSEDEBUG
187//#define DC390_REMOVABLEDEBUG
188//#define DC390_LOCKDEBUG
189
190//#define NOP do{}while(0)
191#define C_NOP
192
193/* Debug definitions */
194#ifdef DC390_DEBUG0
195# define DEBUG0(x) x
196#else
197# define DEBUG0(x) C_NOP
198#endif
199#ifdef DC390_DEBUG1
200# define DEBUG1(x) x
201#else
202# define DEBUG1(x) C_NOP
203#endif
204#ifdef DC390_DCBDEBUG
205# define DCBDEBUG(x) x
206#else
207# define DCBDEBUG(x) C_NOP
208#endif
209#ifdef DC390_PARSEDEBUG
210# define PARSEDEBUG(x) x
211#else
212# define PARSEDEBUG(x) C_NOP
213#endif
214#ifdef DC390_REMOVABLEDEBUG
215# define REMOVABLEDEBUG(x) x
216#else
217# define REMOVABLEDEBUG(x) C_NOP
218#endif
219#define DCBDEBUG1(x) C_NOP
220
221#include <linux/module.h>
222#include <linux/delay.h>
223#include <linux/signal.h>
224#include <linux/errno.h>
225#include <linux/kernel.h>
226#include <linux/ioport.h>
227#include <linux/pci.h>
228#include <linux/proc_fs.h>
229#include <linux/string.h>
230#include <linux/mm.h>
231#include <linux/blkdev.h>
232#include <linux/timer.h>
233#include <linux/interrupt.h>
234#include <linux/init.h>
235#include <linux/spinlock.h>
236#include <linux/slab.h>
237#include <asm/io.h>
238
239#include <scsi/scsi.h>
240#include <scsi/scsi_cmnd.h>
241#include <scsi/scsi_device.h>
242#include <scsi/scsi_host.h>
243#include <scsi/scsicam.h>
244#include <scsi/scsi_tcq.h>
245
246
247#define DC390_BANNER "Tekram DC390/AM53C974"
248#define DC390_VERSION "2.1d 2004-05-27"
249
250#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI
251
252#include "tmscsim.h"
253
254
255static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
256static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
257static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
258static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
259static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
260static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
261static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
262static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
263static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
264static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
265static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
266static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
267static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
268static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
269
270static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB );
271static void dc390_Disconnect( struct dc390_acb* pACB );
272static void dc390_Reselect( struct dc390_acb* pACB );
273static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB );
274static void dc390_ScsiRstDetect( struct dc390_acb* pACB );
275static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*);
276static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB);
277static void dc390_ResetDevParam(struct dc390_acb* pACB);
278
279static u32 dc390_laststatus = 0;
280static u8 dc390_adapterCnt = 0;
281
282static int disable_clustering;
283module_param(disable_clustering, int, S_IRUGO);
284MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1");
285
286/* Startup values, to be overriden on the commandline */
287static int tmscsim[] = {-2, -2, -2, -2, -2, -2};
288
289module_param_array(tmscsim, int, NULL, 0);
290MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)");
291MODULE_AUTHOR("C.L. Huang / Kurt Garloff");
292MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters");
293MODULE_LICENSE("GPL");
294MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
295
296static void *dc390_phase0[]={
297 dc390_DataOut_0,
298 dc390_DataIn_0,
299 dc390_Command_0,
300 dc390_Status_0,
301 dc390_Nop_0,
302 dc390_Nop_0,
303 dc390_MsgOut_0,
304 dc390_MsgIn_0,
305 dc390_Nop_1
306 };
307
308static void *dc390_phase1[]={
309 dc390_DataOutPhase,
310 dc390_DataInPhase,
311 dc390_CommandPhase,
312 dc390_StatusPhase,
313 dc390_Nop_0,
314 dc390_Nop_0,
315 dc390_MsgOutPhase,
316 dc390_MsgInPhase,
317 dc390_Nop_1
318 };
319
320#ifdef DC390_DEBUG1
321static char* dc390_p0_str[] = {
322 "dc390_DataOut_0",
323 "dc390_DataIn_0",
324 "dc390_Command_0",
325 "dc390_Status_0",
326 "dc390_Nop_0",
327 "dc390_Nop_0",
328 "dc390_MsgOut_0",
329 "dc390_MsgIn_0",
330 "dc390_Nop_1"
331 };
332
333static char* dc390_p1_str[] = {
334 "dc390_DataOutPhase",
335 "dc390_DataInPhase",
336 "dc390_CommandPhase",
337 "dc390_StatusPhase",
338 "dc390_Nop_0",
339 "dc390_Nop_0",
340 "dc390_MsgOutPhase",
341 "dc390_MsgInPhase",
342 "dc390_Nop_1"
343 };
344#endif
345
346static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN];
347static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20};
348static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20};
349
350/***********************************************************************
351 * Functions for the management of the internal structures
352 * (DCBs, SRBs, Queueing)
353 *
354 **********************************************************************/
355static void inline dc390_start_segment(struct dc390_srb* pSRB)
356{
357 struct scatterlist *psgl = pSRB->pSegmentList;
358
359 /* start new sg segment */
360 pSRB->SGBusAddr = sg_dma_address(psgl);
361 pSRB->SGToBeXferLen = sg_dma_len(psgl);
362}
363
364static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue)
365{
366 unsigned long xfer = pSRB->SGToBeXferLen - residue;
367
368 /* xfer more bytes transferred */
369 pSRB->SGBusAddr += xfer;
370 pSRB->TotalXferredLen += xfer;
371 pSRB->SGToBeXferLen = residue;
372
373 return xfer;
374}
375
376static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun)
377{
378 struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL;
379 while (pDCB->TargetID != id || pDCB->TargetLUN != lun)
380 {
381 pDCB = pDCB->pNextDCB;
382 if (pDCB == pACB->pLinkDCB)
383 return NULL;
384 }
385 DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \
386 pDCB, pDCB->TargetID, pDCB->TargetLUN));
387 return pDCB;
388}
389
390/* Insert SRB oin top of free list */
391static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB)
392{
393 DEBUG0(printk ("DC390: Free SRB %p\n", pSRB));
394 pSRB->pNextSRB = pACB->pFreeSRB;
395 pACB->pFreeSRB = pSRB;
396}
397
398static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
399{
400 pDCB->GoingSRBCnt++;
401 DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB));
402 /* Append to the list of Going commands */
403 if( pDCB->pGoingSRB )
404 pDCB->pGoingLast->pNextSRB = pSRB;
405 else
406 pDCB->pGoingSRB = pSRB;
407
408 pDCB->pGoingLast = pSRB;
409 /* No next one in sent list */
410 pSRB->pNextSRB = NULL;
411}
412
413static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
414{
415 DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB));
416 if (pSRB == pDCB->pGoingSRB)
417 pDCB->pGoingSRB = pSRB->pNextSRB;
418 else
419 {
420 struct dc390_srb* psrb = pDCB->pGoingSRB;
421 while (psrb && psrb->pNextSRB != pSRB)
422 psrb = psrb->pNextSRB;
423 if (!psrb)
424 { printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; }
425 psrb->pNextSRB = pSRB->pNextSRB;
426 if (pSRB == pDCB->pGoingLast)
427 pDCB->pGoingLast = psrb;
428 }
429 pDCB->GoingSRBCnt--;
430}
431
432static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
433{
434 sg_init_one(sg, addr, length);
435 return sg;
436}
437
438/* Create pci mapping */
439static int dc390_pci_map (struct dc390_srb* pSRB)
440{
441 int error = 0;
442 struct scsi_cmnd *pcmd = pSRB->pcmd;
443 struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
444 dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp));
445
446 /* Map sense buffer */
447 if (pSRB->SRBFlag & AUTO_REQSENSE) {
448 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
449 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
450 DMA_FROM_DEVICE);
451 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
452
453 /* TODO: error handling */
454 if (pSRB->SGcount != 1)
455 error = 1;
456 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
457 /* Map SG list */
458 } else if (scsi_sg_count(pcmd)) {
459 int nseg;
460
461 nseg = scsi_dma_map(pcmd);
462
463 pSRB->pSegmentList = scsi_sglist(pcmd);
464 pSRB->SGcount = nseg;
465
466 /* TODO: error handling */
467 if (nseg < 0)
468 error = 1;
469 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
470 __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
471 /* Map single segment */
472 } else
473 pSRB->SGcount = 0;
474
475 return error;
476}
477
478/* Remove pci mapping */
479static void dc390_pci_unmap (struct dc390_srb* pSRB)
480{
481 struct scsi_cmnd *pcmd = pSRB->pcmd;
482 struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
483 DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)));
484
485 if (pSRB->SRBFlag) {
486 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
487 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
488 } else {
489 scsi_dma_unmap(pcmd);
490 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
491 __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
492 }
493}
494
495static void __inline__
496dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
497{
498 if (pSRB->TagNumber != SCSI_NO_TAG) {
499 pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */
500 pSRB->TagNumber = SCSI_NO_TAG;
501 }
502}
503
504
505static int
506dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
507{
508 struct scsi_cmnd *scmd = pSRB->pcmd;
509 struct scsi_device *sdev = scmd->device;
510 u8 cmd, disc_allowed, try_sync_nego;
511 char tag[2];
512
513 pSRB->ScsiPhase = SCSI_NOP0;
514
515 if (pACB->Connected)
516 {
517 // Should not happen normally
518 printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n",
519 pSRB->SRBState, pSRB->SRBFlag);
520 pSRB->SRBState = SRB_READY;
521 pACB->SelConn++;
522 return 1;
523 }
524 if (time_before (jiffies, pACB->last_reset))
525 {
526 DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
527 return 1;
528 }
529 /* KG: Moved pci mapping here */
530 dc390_pci_map(pSRB);
531 /* TODO: error handling */
532 DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
533 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
534 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
535 DC390_write8 (CtrlReg1, pDCB->CtrlR1);
536 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
537 DC390_write8 (CtrlReg4, pDCB->CtrlR4);
538 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */
539 DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\
540 scmd->cmnd[0], pDCB->SyncMode));
541
542 /* Don't disconnect on AUTO_REQSENSE, cause it might be an
543 * Contingent Allegiance Condition (6.6), where no tags should be used.
544 * All other have to be allowed to disconnect to prevent Incorrect
545 * Initiator Connection (6.8.2/6.5.2) */
546 /* Changed KG, 99/06/06 */
547 if (! (pSRB->SRBFlag & AUTO_REQSENSE))
548 disc_allowed = pDCB->DevMode & EN_DISCONNECT_;
549 else
550 disc_allowed = 0;
551
552 if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr &&
553 (((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) &&
554 !(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY))
555 try_sync_nego = 1;
556 else
557 try_sync_nego = 0;
558
559 pSRB->MsgCnt = 0;
560 cmd = SEL_W_ATN;
561 DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN));
562 /* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */
563 if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && scsi_populate_tag_msg(scmd, tag)) {
564 DC390_write8(ScsiFifo, tag[0]);
565 pDCB->TagMask |= 1 << tag[1];
566 pSRB->TagNumber = tag[1];
567 DC390_write8(ScsiFifo, tag[1]);
568 DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1]));
569 cmd = SEL_W_ATN3;
570 } else {
571 /* No TagQ */
572//no_tag:
573 DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB));
574 }
575
576 pSRB->SRBState = SRB_START_;
577
578 if (try_sync_nego)
579 {
580 u8 Sync_Off = pDCB->SyncOffset;
581 DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN));
582 pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE;
583 pSRB->MsgOutBuf[1] = 3;
584 pSRB->MsgOutBuf[2] = EXTENDED_SDTR;
585 pSRB->MsgOutBuf[3] = pDCB->NegoPeriod;
586 if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET;
587 pSRB->MsgOutBuf[4] = Sync_Off;
588 pSRB->MsgCnt = 5;
589 //pSRB->SRBState = SRB_MSGOUT_;
590 pSRB->SRBState |= DO_SYNC_NEGO;
591 cmd = SEL_W_ATN_STOP;
592 }
593
594 /* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */
595 if (cmd != SEL_W_ATN_STOP)
596 {
597 if( pSRB->SRBFlag & AUTO_REQSENSE )
598 {
599 DC390_write8 (ScsiFifo, REQUEST_SENSE);
600 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
601 DC390_write8 (ScsiFifo, 0);
602 DC390_write8 (ScsiFifo, 0);
603 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
604 DC390_write8 (ScsiFifo, 0);
605 DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n"));
606 }
607 else /* write cmnd to bus */
608 {
609 u8 *ptr; u8 i;
610 ptr = (u8 *)scmd->cmnd;
611 for (i = 0; i < scmd->cmd_len; i++)
612 DC390_write8 (ScsiFifo, *(ptr++));
613 }
614 }
615 DEBUG0(if (pACB->pActiveDCB) \
616 printk (KERN_WARNING "DC390: ActiveDCB != 0\n"));
617 DEBUG0(if (pDCB->pActiveSRB) \
618 printk (KERN_WARNING "DC390: ActiveSRB != 0\n"));
619 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
620 if (DC390_read8 (Scsi_Status) & INTERRUPT)
621 {
622 dc390_freetag (pDCB, pSRB);
623 DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
624 scmd->device->id, (u8)scmd->device->lun));
625 pSRB->SRBState = SRB_READY;
626 //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
627 pACB->SelLost++;
628 return 1;
629 }
630 DC390_write8 (ScsiCmd, cmd);
631 pACB->pActiveDCB = pDCB;
632 pDCB->pActiveSRB = pSRB;
633 pACB->Connected = 1;
634 pSRB->ScsiPhase = SCSI_NOP1;
635 return 0;
636}
637
638
639static void __inline__
640dc390_InvalidCmd(struct dc390_acb* pACB)
641{
642 if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT))
643 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
644}
645
646
647static irqreturn_t __inline__
648DC390_Interrupt(void *dev_id)
649{
650 struct dc390_acb *pACB = dev_id;
651 struct dc390_dcb *pDCB;
652 struct dc390_srb *pSRB;
653 u8 sstatus=0;
654 u8 phase;
655 void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *);
656 u8 istate, istatus;
657
658 sstatus = DC390_read8 (Scsi_Status);
659 if( !(sstatus & INTERRUPT) )
660 return IRQ_NONE;
661
662 DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
663
664 //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
665 //dstatus = DC390_read8 (DMA_Status);
666 //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
667
668 spin_lock_irq(pACB->pScsiHost->host_lock);
669
670 istate = DC390_read8 (Intern_State);
671 istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */
672
673 DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus));
674 dc390_laststatus &= ~0x00ffffff;
675 dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus;
676
677 if (sstatus & ILLEGAL_OP_ERR)
678 {
679 printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus);
680 dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB);
681 }
682
683 else if (istatus & INVALID_CMD)
684 {
685 printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus);
686 dc390_InvalidCmd( pACB );
687 goto unlock;
688 }
689
690 if (istatus & SCSI_RESET)
691 {
692 dc390_ScsiRstDetect( pACB );
693 goto unlock;
694 }
695
696 if (istatus & DISCONNECTED)
697 {
698 dc390_Disconnect( pACB );
699 goto unlock;
700 }
701
702 if (istatus & RESELECTED)
703 {
704 dc390_Reselect( pACB );
705 goto unlock;
706 }
707
708 else if (istatus & (SELECTED | SEL_ATTENTION))
709 {
710 printk (KERN_ERR "DC390: Target mode not supported!\n");
711 goto unlock;
712 }
713
714 if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) )
715 {
716 pDCB = pACB->pActiveDCB;
717 if (!pDCB)
718 {
719 printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n");
720 goto unlock;
721 }
722 pSRB = pDCB->pActiveSRB;
723 if( pDCB->DCBFlag & ABORT_DEV_ )
724 dc390_EnableMsgOut_Abort (pACB, pSRB);
725
726 phase = pSRB->ScsiPhase;
727 DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus));
728 stateV = (void *) dc390_phase0[phase];
729 ( *stateV )( pACB, pSRB, &sstatus );
730
731 pSRB->ScsiPhase = sstatus & 7;
732 phase = (u8) sstatus & 7;
733 DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus));
734 stateV = (void *) dc390_phase1[phase];
735 ( *stateV )( pACB, pSRB, &sstatus );
736 }
737
738 unlock:
739 spin_unlock_irq(pACB->pScsiHost->host_lock);
740 return IRQ_HANDLED;
741}
742
743static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id)
744{
745 irqreturn_t ret;
746 DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq));
747 /* Locking is done in DC390_Interrupt */
748 ret = DC390_Interrupt(dev_id);
749 DEBUG1(printk (".. IRQ returned\n"));
750 return ret;
751}
752
753static void
754dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
755{
756 u8 sstatus;
757 u32 ResidCnt;
758 u8 dstate = 0;
759
760 sstatus = *psstatus;
761
762 if( !(pSRB->SRBState & SRB_XFERPAD) )
763 {
764 if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) )
765 pSRB->SRBStatus |= PARITY_ERROR;
766
767 if( sstatus & COUNT_2_ZERO )
768 {
769 unsigned long timeout = jiffies + HZ;
770
771 /* Function called from the ISR with the host_lock held and interrupts disabled */
772 if (pSRB->SGToBeXferLen)
773 while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
774 spin_unlock_irq(pACB->pScsiHost->host_lock);
775 udelay(50);
776 spin_lock_irq(pACB->pScsiHost->host_lock);
777 }
778 if (!time_before(jiffies, timeout))
779 printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n",
780 DC390_read32 (DMA_Wk_ByteCntr));
781 dc390_laststatus &= ~0xff000000;
782 dc390_laststatus |= dstate << 24;
783 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
784 pSRB->SGIndex++;
785 if( pSRB->SGIndex < pSRB->SGcount )
786 {
787 pSRB->pSegmentList++;
788
789 dc390_start_segment(pSRB);
790 }
791 else
792 pSRB->SGToBeXferLen = 0;
793 }
794 else
795 {
796 ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) +
797 (((u32) DC390_read8 (CtcReg_High) << 16) |
798 ((u32) DC390_read8 (CtcReg_Mid) << 8) |
799 (u32) DC390_read8 (CtcReg_Low));
800
801 dc390_advance_segment(pSRB, ResidCnt);
802 }
803 }
804 if ((*psstatus & 7) != SCSI_DATA_OUT)
805 {
806 DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD);
807 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
808 }
809}
810
811static void
812dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
813{
814 u8 sstatus, residual, bval;
815 u32 ResidCnt, i;
816 unsigned long xferCnt;
817
818 sstatus = *psstatus;
819
820 if( !(pSRB->SRBState & SRB_XFERPAD) )
821 {
822 if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR))
823 pSRB->SRBStatus |= PARITY_ERROR;
824
825 if( sstatus & COUNT_2_ZERO )
826 {
827 int dstate = 0;
828 unsigned long timeout = jiffies + HZ;
829
830 /* Function called from the ISR with the host_lock held and interrupts disabled */
831 if (pSRB->SGToBeXferLen)
832 while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
833 spin_unlock_irq(pACB->pScsiHost->host_lock);
834 udelay(50);
835 spin_lock_irq(pACB->pScsiHost->host_lock);
836 }
837 if (!time_before(jiffies, timeout)) {
838 printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n",
839 DC390_read32 (DMA_Wk_ByteCntr));
840 printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate);
841 }
842 dc390_laststatus &= ~0xff000000;
843 dc390_laststatus |= dstate << 24;
844 DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \
845 + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \
846 + ((unsigned long) DC390_read8 (CtcReg_Low)));
847 DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen));
848
849 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
850
851 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
852 pSRB->SGIndex++;
853 if( pSRB->SGIndex < pSRB->SGcount )
854 {
855 pSRB->pSegmentList++;
856
857 dc390_start_segment(pSRB);
858 }
859 else
860 pSRB->SGToBeXferLen = 0;
861 }
862 else /* phase changed */
863 {
864 residual = 0;
865 bval = DC390_read8 (Current_Fifo);
866 while( bval & 0x1f )
867 {
868 DEBUG1(printk (KERN_DEBUG "Check for residuals,"));
869 if( (bval & 0x1f) == 1 )
870 {
871 for(i=0; i < 0x100; i++)
872 {
873 bval = DC390_read8 (Current_Fifo);
874 if( !(bval & 0x1f) )
875 goto din_1;
876 else if( i == 0x0ff )
877 {
878 residual = 1; /* ;1 residual byte */
879 goto din_1;
880 }
881 }
882 }
883 else
884 bval = DC390_read8 (Current_Fifo);
885 }
886din_1:
887 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD);
888 for (i = 0xa000; i; i--)
889 {
890 bval = DC390_read8 (DMA_Status);
891 if (bval & BLAST_COMPLETE)
892 break;
893 }
894 /* It seems a DMA Blast abort isn't that bad ... */
895 if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n");
896 //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
897 dc390_laststatus &= ~0xff000000;
898 dc390_laststatus |= bval << 24;
899
900 DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval));
901 ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) |
902 ((u32) DC390_read8 (CtcReg_Mid) << 8)) |
903 (u32) DC390_read8 (CtcReg_Low);
904
905 xferCnt = dc390_advance_segment(pSRB, ResidCnt);
906
907 if (residual) {
908 size_t count = 1;
909 size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList);
910 unsigned long flags;
911 u8 *ptr;
912
913 bval = DC390_read8 (ScsiFifo); /* get one residual byte */
914
915 local_irq_save(flags);
916 ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count);
917 if (likely(ptr)) {
918 *(ptr + offset) = bval;
919 scsi_kunmap_atomic_sg(ptr);
920 }
921 local_irq_restore(flags);
922 WARN_ON(!ptr);
923
924 /* 1 more byte read */
925 xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1);
926 }
927 DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\
928 pSRB->TotalXferredLen, pSRB->SGToBeXferLen));
929 }
930 }
931 if ((*psstatus & 7) != SCSI_DATA_IN)
932 {
933 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
934 DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
935 }
936}
937
938static void
939dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
940{
941}
942
943static void
944dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
945{
946
947 pSRB->TargetStatus = DC390_read8 (ScsiFifo);
948 //udelay (1);
949 pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */
950
951 *psstatus = SCSI_NOP0;
952 pSRB->SRBState = SRB_COMPLETED;
953 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
954}
955
956static void
957dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
958{
959 if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) )
960 *psstatus = SCSI_NOP0;
961 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
962}
963
964
965static void __inline__
966dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB)
967{
968 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
969 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
970 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
971 DC390_write8 (CtrlReg4, pDCB->CtrlR4);
972 dc390_SetXferRate (pACB, pDCB);
973}
974
975
976#ifdef DC390_DEBUG0
977static void
978dc390_printMsg (u8 *MsgBuf, u8 len)
979{
980 int i;
981 printk (" %02x", MsgBuf[0]);
982 for (i = 1; i < len; i++)
983 printk (" %02x", MsgBuf[i]);
984 printk ("\n");
985}
986#endif
987
988#define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD)
989
990/* reject_msg */
991static void __inline__
992dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB)
993{
994 pSRB->MsgOutBuf[0] = MESSAGE_REJECT;
995 pSRB->MsgCnt = 1;
996 DC390_ENABLE_MSGOUT;
997 DEBUG0 (printk (KERN_INFO "DC390: Reject message\n"));
998}
999
1000/* abort command */
1001static void
1002dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB )
1003{
1004 pSRB->MsgOutBuf[0] = ABORT;
1005 pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
1006 pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_;
1007}
1008
1009static struct dc390_srb*
1010dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag)
1011{
1012 struct dc390_srb* pSRB = pDCB->pGoingSRB;
1013
1014 if (pSRB)
1015 {
1016 struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag);
1017 pSRB = (struct dc390_srb *)scmd->host_scribble;
1018
1019 if (pDCB->DCBFlag & ABORT_DEV_)
1020 {
1021 pSRB->SRBState = SRB_ABORT_SENT;
1022 dc390_EnableMsgOut_Abort( pACB, pSRB );
1023 }
1024
1025 if (!(pSRB->SRBState & SRB_DISCONNECT))
1026 goto mingx0;
1027
1028 pDCB->pActiveSRB = pSRB;
1029 pSRB->SRBState = SRB_DATA_XFER;
1030 }
1031 else
1032 {
1033 mingx0:
1034 pSRB = pACB->pTmpSRB;
1035 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1036 pDCB->pActiveSRB = pSRB;
1037 pSRB->MsgOutBuf[0] = ABORT_TAG;
1038 pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
1039 }
1040 return pSRB;
1041}
1042
1043
1044/* set async transfer mode */
1045static void
1046dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1047{
1048 struct dc390_dcb* pDCB = pSRB->pSRBDCB;
1049 if (!(pSRB->SRBState & DO_SYNC_NEGO))
1050 printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID);
1051 pSRB->SRBState &= ~DO_SYNC_NEGO;
1052 pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE);
1053 pDCB->SyncPeriod = 0;
1054 pDCB->SyncOffset = 0;
1055 //pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
1056 pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */
1057 pDCB->CtrlR4 &= 0x3f;
1058 pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
1059 dc390_reprog (pACB, pDCB);
1060}
1061
1062/* set sync transfer mode */
1063static void
1064dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1065{
1066 u8 bval;
1067 u16 wval, wval1;
1068 struct dc390_dcb* pDCB = pSRB->pSRBDCB;
1069 u8 oldsyncperiod = pDCB->SyncPeriod;
1070 u8 oldsyncoffset = pDCB->SyncOffset;
1071
1072 if (!(pSRB->SRBState & DO_SYNC_NEGO))
1073 {
1074 printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n",
1075 pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]);
1076
1077 /* reject */
1078 //dc390_MsgIn_reject (pACB, pSRB);
1079 //return dc390_MsgIn_set_async (pACB, pSRB);
1080
1081 /* Reply with corrected SDTR Message */
1082 if (pSRB->MsgInBuf[4] > 15)
1083 {
1084 printk (KERN_INFO "DC390: Lower Sync Offset to 15\n");
1085 pSRB->MsgInBuf[4] = 15;
1086 }
1087 if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod)
1088 {
1089 printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2);
1090 pSRB->MsgInBuf[3] = pDCB->NegoPeriod;
1091 }
1092 memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5);
1093 pSRB->MsgCnt = 5;
1094 DC390_ENABLE_MSGOUT;
1095 }
1096
1097 pSRB->SRBState &= ~DO_SYNC_NEGO;
1098 pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE;
1099 pDCB->SyncOffset &= 0x0f0;
1100 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1101 pDCB->NegoPeriod = pSRB->MsgInBuf[3];
1102
1103 wval = (u16) pSRB->MsgInBuf[3];
1104 wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */
1105 if( (wval1 * 25) != wval) wval1++;
1106 bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */
1107
1108 pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */
1109 if (pACB->glitch_cfg != NS_TO_GLITCH(0))
1110 pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1));
1111 else
1112 pDCB->CtrlR4 |= NS_TO_GLITCH(0);
1113 if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */
1114
1115 if (wval1 >= 8)
1116 {
1117 wval1--; /* Timing computation differs by 1 from FAST_SCSI */
1118 bval = FAST_CLK; /* fast clock / normal scsi */
1119 pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
1120 }
1121
1122 pDCB->CtrlR3 = bval;
1123 pDCB->SyncPeriod = (u8)wval1;
1124
1125 if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0)
1126 {
1127 if (! (bval & FAST_SCSI)) wval1++;
1128 printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID,
1129 40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f);
1130 }
1131
1132 dc390_reprog (pACB, pDCB);
1133}
1134
1135
1136/* handle RESTORE_PTR */
1137/* This doesn't look very healthy... to-be-fixed */
1138static void
1139dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1140{
1141 struct scsi_cmnd *pcmd = pSRB->pcmd;
1142 struct scatterlist *psgl;
1143 pSRB->TotalXferredLen = 0;
1144 pSRB->SGIndex = 0;
1145 if (scsi_sg_count(pcmd)) {
1146 size_t saved;
1147 pSRB->pSegmentList = scsi_sglist(pcmd);
1148 psgl = pSRB->pSegmentList;
1149 //dc390_pci_sync(pSRB);
1150
1151 while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr)
1152 {
1153 pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl);
1154 pSRB->SGIndex++;
1155 if( pSRB->SGIndex < pSRB->SGcount )
1156 {
1157 pSRB->pSegmentList++;
1158
1159 dc390_start_segment(pSRB);
1160 }
1161 else
1162 pSRB->SGToBeXferLen = 0;
1163 }
1164
1165 saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen;
1166 pSRB->SGToBeXferLen -= saved;
1167 pSRB->SGBusAddr += saved;
1168 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
1169 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
1170
1171 } else {
1172 pSRB->SGcount = 0;
1173 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
1174 }
1175
1176 pSRB->TotalXferredLen = pSRB->Saved_Ptr;
1177}
1178
1179
1180/* According to the docs, the AM53C974 reads the message and
1181 * generates a Successful Operation IRQ before asserting ACK for
1182 * the last byte (how does it know whether it's the last ?) */
1183/* The old code handled it in another way, indicating, that on
1184 * every message byte an IRQ is generated and every byte has to
1185 * be manually ACKed. Hmmm ? (KG, 98/11/28) */
1186/* The old implementation was correct. Sigh! */
1187
1188/* Check if the message is complete */
1189static u8 __inline__
1190dc390_MsgIn_complete (u8 *msgbuf, u32 len)
1191{
1192 if (*msgbuf == EXTENDED_MESSAGE)
1193 {
1194 if (len < 2) return 0;
1195 if (len < msgbuf[1] + 2) return 0;
1196 }
1197 else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages
1198 if (len < 2) return 0;
1199 return 1;
1200}
1201
1202
1203
1204/* read and eval received messages */
1205static void
1206dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1207{
1208 struct dc390_dcb* pDCB = pACB->pActiveDCB;
1209
1210 /* Read the msg */
1211
1212 pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo);
1213 //pSRB->SRBState = 0;
1214
1215 /* Msg complete ? */
1216 if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen))
1217 {
1218 DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen));
1219 /* Now eval the msg */
1220 switch (pSRB->MsgInBuf[0])
1221 {
1222 case DISCONNECT:
1223 pSRB->SRBState = SRB_DISCONNECT; break;
1224
1225 case SIMPLE_QUEUE_TAG:
1226 case HEAD_OF_QUEUE_TAG:
1227 case ORDERED_QUEUE_TAG:
1228 pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]);
1229 break;
1230
1231 case MESSAGE_REJECT:
1232 DC390_write8 (ScsiCmd, RESET_ATN_CMD);
1233 pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
1234 if( pSRB->SRBState & DO_SYNC_NEGO)
1235 dc390_MsgIn_set_async (pACB, pSRB);
1236 break;
1237
1238 case EXTENDED_MESSAGE:
1239 /* reject every extended msg but SDTR */
1240 if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR)
1241 dc390_MsgIn_reject (pACB, pSRB);
1242 else
1243 {
1244 if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0)
1245 dc390_MsgIn_set_async (pACB, pSRB);
1246 else
1247 dc390_MsgIn_set_sync (pACB, pSRB);
1248 }
1249
1250 // nothing has to be done
1251 case COMMAND_COMPLETE: break;
1252
1253 // SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the
1254 // scsi command. Thanks, Gerard, for pointing it out.
1255 case SAVE_POINTERS:
1256 pSRB->Saved_Ptr = pSRB->TotalXferredLen;
1257 break;
1258 // The device might want to restart transfer with a RESTORE
1259 case RESTORE_POINTERS:
1260 DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n"));
1261 dc390_restore_ptr (pACB, pSRB);
1262 break;
1263
1264 // reject unknown messages
1265 default: dc390_MsgIn_reject (pACB, pSRB);
1266 }
1267
1268 /* Clear counter and MsgIn state */
1269 pSRB->SRBState &= ~SRB_MSGIN;
1270 pACB->MsgLen = 0;
1271 }
1272
1273 *psstatus = SCSI_NOP0;
1274 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
1275 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1276}
1277
1278
1279static void
1280dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
1281{
1282 unsigned long lval;
1283 struct dc390_dcb* pDCB = pACB->pActiveDCB;
1284
1285 if (pSRB == pACB->pTmpSRB)
1286 {
1287 if (pDCB)
1288 printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN);
1289 else
1290 printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n");
1291
1292 /* Try to recover - some broken disks react badly to tagged INQUIRY */
1293 if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) {
1294 pSRB = pDCB->pGoingSRB;
1295 pDCB->pActiveSRB = pSRB;
1296 } else {
1297 pSRB->pSRBDCB = pDCB;
1298 dc390_EnableMsgOut_Abort(pACB, pSRB);
1299 if (pDCB)
1300 pDCB->DCBFlag |= ABORT_DEV;
1301 return;
1302 }
1303 }
1304
1305 if( pSRB->SGIndex < pSRB->SGcount )
1306 {
1307 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1308 if( !pSRB->SGToBeXferLen )
1309 {
1310 dc390_start_segment(pSRB);
1311
1312 DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment."));
1313 }
1314 lval = pSRB->SGToBeXferLen;
1315 DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr));
1316 DC390_write8 (CtcReg_Low, (u8) lval);
1317 lval >>= 8;
1318 DC390_write8 (CtcReg_Mid, (u8) lval);
1319 lval >>= 8;
1320 DC390_write8 (CtcReg_High, (u8) lval);
1321
1322 DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen);
1323 DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr);
1324
1325 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1326 pSRB->SRBState = SRB_DATA_XFER;
1327
1328 DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD);
1329
1330 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1331 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT));
1332 //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status)));
1333 //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT));
1334 }
1335 else /* xfer pad */
1336 {
1337 if( pSRB->SGcount )
1338 {
1339 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1340 pSRB->SRBStatus |= OVER_RUN;
1341 DEBUG0(printk (KERN_WARNING " DC390: Overrun -"));
1342 }
1343 DEBUG0(printk (KERN_WARNING " Clear transfer pad \n"));
1344 DC390_write8 (CtcReg_Low, 0);
1345 DC390_write8 (CtcReg_Mid, 0);
1346 DC390_write8 (CtcReg_High, 0);
1347
1348 pSRB->SRBState |= SRB_XFERPAD;
1349 DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE);
1350/*
1351 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
1352 DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
1353*/
1354 }
1355}
1356
1357
1358static void
1359dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1360{
1361 dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION);
1362}
1363
1364static void
1365dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1366{
1367 dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION);
1368}
1369
1370static void
1371dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1372{
1373 struct dc390_dcb* pDCB;
1374 u8 i, cnt;
1375 u8 *ptr;
1376
1377 DC390_write8 (ScsiCmd, RESET_ATN_CMD);
1378 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1379 if( !(pSRB->SRBFlag & AUTO_REQSENSE) )
1380 {
1381 cnt = (u8) pSRB->pcmd->cmd_len;
1382 ptr = (u8 *) pSRB->pcmd->cmnd;
1383 for(i=0; i < cnt; i++)
1384 DC390_write8 (ScsiFifo, *(ptr++));
1385 }
1386 else
1387 {
1388 DC390_write8 (ScsiFifo, REQUEST_SENSE);
1389 pDCB = pACB->pActiveDCB;
1390 DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
1391 DC390_write8 (ScsiFifo, 0);
1392 DC390_write8 (ScsiFifo, 0);
1393 DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
1394 DC390_write8 (ScsiFifo, 0);
1395 DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
1396 }
1397 pSRB->SRBState = SRB_COMMAND;
1398 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1399}
1400
1401static void
1402dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1403{
1404 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1405 pSRB->SRBState = SRB_STATUS;
1406 DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE);
1407 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1408}
1409
1410static void
1411dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1412{
1413 u8 bval, i, cnt;
1414 u8 *ptr;
1415 struct dc390_dcb* pDCB;
1416
1417 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1418 pDCB = pACB->pActiveDCB;
1419 if( !(pSRB->SRBState & SRB_MSGOUT) )
1420 {
1421 cnt = pSRB->MsgCnt;
1422 if( cnt )
1423 {
1424 ptr = (u8 *) pSRB->MsgOutBuf;
1425 for(i=0; i < cnt; i++)
1426 DC390_write8 (ScsiFifo, *(ptr++));
1427 pSRB->MsgCnt = 0;
1428 if( (pDCB->DCBFlag & ABORT_DEV_) &&
1429 (pSRB->MsgOutBuf[0] == ABORT) )
1430 pSRB->SRBState = SRB_ABORT_SENT;
1431 }
1432 else
1433 {
1434 bval = ABORT; /* ??? MSG_NOP */
1435 if( (pSRB->pcmd->cmnd[0] == INQUIRY ) ||
1436 (pSRB->pcmd->cmnd[0] == REQUEST_SENSE) ||
1437 (pSRB->SRBFlag & AUTO_REQSENSE) )
1438 {
1439 if( pDCB->SyncMode & SYNC_ENABLE )
1440 goto mop1;
1441 }
1442 DC390_write8 (ScsiFifo, bval);
1443 }
1444 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1445 }
1446 else
1447 {
1448mop1:
1449 printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN);
1450 DC390_write8 (ScsiFifo, EXTENDED_MESSAGE);
1451 DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */
1452 DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */
1453 DC390_write8 (ScsiFifo, pDCB->NegoPeriod);
1454 if (pDCB->SyncOffset & 0x0f)
1455 DC390_write8 (ScsiFifo, pDCB->SyncOffset);
1456 else
1457 DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET);
1458 pSRB->SRBState |= DO_SYNC_NEGO;
1459 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1460 }
1461}
1462
1463static void
1464dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1465{
1466 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1467 if( !(pSRB->SRBState & SRB_MSGIN) )
1468 {
1469 pSRB->SRBState &= ~SRB_DISCONNECT;
1470 pSRB->SRBState |= SRB_MSGIN;
1471 }
1472 DC390_write8 (ScsiCmd, INFO_XFER_CMD);
1473 //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1474}
1475
1476static void
1477dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1478{
1479}
1480
1481static void
1482dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
1483{
1484}
1485
1486
1487static void
1488dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB )
1489{
1490 u8 bval, i, cnt;
1491 struct dc390_dcb* ptr;
1492
1493 if( !(pDCB->TargetLUN) )
1494 {
1495 if( !pACB->scan_devices )
1496 {
1497 ptr = pACB->pLinkDCB;
1498 cnt = pACB->DCBCnt;
1499 bval = pDCB->TargetID;
1500 for(i=0; i<cnt; i++)
1501 {
1502 if( ptr->TargetID == bval )
1503 {
1504 ptr->SyncPeriod = pDCB->SyncPeriod;
1505 ptr->SyncOffset = pDCB->SyncOffset;
1506 ptr->CtrlR3 = pDCB->CtrlR3;
1507 ptr->CtrlR4 = pDCB->CtrlR4;
1508 ptr->SyncMode = pDCB->SyncMode;
1509 }
1510 ptr = ptr->pNextDCB;
1511 }
1512 }
1513 }
1514 return;
1515}
1516
1517
1518static void
1519dc390_Disconnect( struct dc390_acb* pACB )
1520{
1521 struct dc390_dcb *pDCB;
1522 struct dc390_srb *pSRB, *psrb;
1523 u8 i, cnt;
1524
1525 DEBUG0(printk(KERN_INFO "DISC,"));
1526
1527 if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n");
1528 pACB->Connected = 0;
1529 pDCB = pACB->pActiveDCB;
1530 if (!pDCB)
1531 {
1532 DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\
1533 pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel));
1534 mdelay(400);
1535 DC390_read8 (INT_Status); /* Reset Pending INT */
1536 DC390_write8 (ScsiCmd, EN_SEL_RESEL);
1537 return;
1538 }
1539 DC390_write8 (ScsiCmd, EN_SEL_RESEL);
1540 pSRB = pDCB->pActiveSRB;
1541 pACB->pActiveDCB = NULL;
1542 pSRB->ScsiPhase = SCSI_NOP0;
1543 if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
1544 pSRB->SRBState = 0;
1545 else if( pSRB->SRBState & SRB_ABORT_SENT )
1546 {
1547 pDCB->TagMask = 0;
1548 pDCB->DCBFlag = 0;
1549 cnt = pDCB->GoingSRBCnt;
1550 pDCB->GoingSRBCnt = 0;
1551 pSRB = pDCB->pGoingSRB;
1552 for( i=0; i < cnt; i++)
1553 {
1554 psrb = pSRB->pNextSRB;
1555 dc390_Free_insert (pACB, pSRB);
1556 pSRB = psrb;
1557 }
1558 pDCB->pGoingSRB = NULL;
1559 }
1560 else
1561 {
1562 if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
1563 !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
1564 { /* Selection time out */
1565 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1566 pSRB->TargetStatus = 0;
1567 goto disc1;
1568 }
1569 else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
1570 {
1571disc1:
1572 dc390_freetag (pDCB, pSRB);
1573 pDCB->pActiveSRB = NULL;
1574 pSRB->SRBState = SRB_FREE;
1575 dc390_SRBdone( pACB, pDCB, pSRB);
1576 }
1577 }
1578 pACB->MsgLen = 0;
1579}
1580
1581
1582static void
1583dc390_Reselect( struct dc390_acb* pACB )
1584{
1585 struct dc390_dcb* pDCB;
1586 struct dc390_srb* pSRB;
1587 u8 id, lun;
1588
1589 DEBUG0(printk(KERN_INFO "RSEL,"));
1590 pACB->Connected = 1;
1591 pDCB = pACB->pActiveDCB;
1592 if( pDCB )
1593 { /* Arbitration lost but Reselection won */
1594 DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n"));
1595 pSRB = pDCB->pActiveSRB;
1596 if( !( pACB->scan_devices ) )
1597 {
1598 struct scsi_cmnd *pcmd = pSRB->pcmd;
1599 scsi_set_resid(pcmd, scsi_bufflen(pcmd));
1600 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1601 dc390_Going_remove(pDCB, pSRB);
1602 dc390_Free_insert(pACB, pSRB);
1603 pcmd->scsi_done (pcmd);
1604 DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB));
1605 }
1606 }
1607 /* Get ID */
1608 lun = DC390_read8 (ScsiFifo);
1609 DEBUG0(printk ("Dev %02x,", lun));
1610 if (!(lun & (1 << pACB->pScsiHost->this_id)))
1611 printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun);
1612 else
1613 lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */
1614 id = 0; while (lun >>= 1) id++;
1615 /* Get LUN */
1616 lun = DC390_read8 (ScsiFifo);
1617 if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n");
1618 lun &= 7;
1619 DEBUG0(printk ("(%02i-%i),", id, lun));
1620 pDCB = dc390_findDCB (pACB, id, lun);
1621 if (!pDCB)
1622 {
1623 printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n",
1624 id, lun);
1625 return;
1626 }
1627 pACB->pActiveDCB = pDCB;
1628 /* TagQ: We expect a message soon, so never mind the exact SRB */
1629 if( pDCB->SyncMode & EN_TAG_QUEUEING )
1630 {
1631 pSRB = pACB->pTmpSRB;
1632 pDCB->pActiveSRB = pSRB;
1633 }
1634 else
1635 {
1636 pSRB = pDCB->pActiveSRB;
1637 if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
1638 {
1639 pSRB= pACB->pTmpSRB;
1640 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1641 printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n",
1642 id, lun);
1643 pDCB->pActiveSRB = pSRB;
1644 dc390_EnableMsgOut_Abort ( pACB, pSRB );
1645 }
1646 else
1647 {
1648 if( pDCB->DCBFlag & ABORT_DEV_ )
1649 {
1650 pSRB->SRBState = SRB_ABORT_SENT;
1651 printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n",
1652 id, lun);
1653 dc390_EnableMsgOut_Abort( pACB, pSRB );
1654 }
1655 else
1656 pSRB->SRBState = SRB_DATA_XFER;
1657 }
1658 }
1659
1660 DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber));
1661 pSRB->ScsiPhase = SCSI_NOP0;
1662 DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
1663 DC390_write8 (Sync_Period, pDCB->SyncPeriod);
1664 DC390_write8 (Sync_Offset, pDCB->SyncOffset);
1665 DC390_write8 (CtrlReg1, pDCB->CtrlR1);
1666 DC390_write8 (CtrlReg3, pDCB->CtrlR3);
1667 DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */
1668 DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */
1669}
1670
1671static int __inline__
1672dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
1673{
1674 struct scsi_cmnd *pcmd;
1675
1676 pcmd = pSRB->pcmd;
1677
1678 REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\
1679 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
1680
1681 pSRB->SRBFlag |= AUTO_REQSENSE;
1682 pSRB->SavedTotXLen = pSRB->TotalXferredLen;
1683 pSRB->AdaptStatus = 0;
1684 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
1685
1686 /* We are called from SRBdone, original PCI mapping has been removed
1687 * already, new one is set up from StartSCSI */
1688 pSRB->SGIndex = 0;
1689
1690 pSRB->TotalXferredLen = 0;
1691 pSRB->SGToBeXferLen = 0;
1692 return dc390_StartSCSI(pACB, pDCB, pSRB);
1693}
1694
1695
1696static void
1697dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
1698{
1699 u8 status;
1700 struct scsi_cmnd *pcmd;
1701
1702 pcmd = pSRB->pcmd;
1703 /* KG: Moved pci_unmap here */
1704 dc390_pci_unmap(pSRB);
1705
1706 status = pSRB->TargetStatus;
1707
1708 DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB));
1709 if(pSRB->SRBFlag & AUTO_REQSENSE)
1710 { /* Last command was a Request Sense */
1711 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1712 pSRB->AdaptStatus = 0;
1713 pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
1714
1715 //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
1716 if (status == SAM_STAT_CHECK_CONDITION)
1717 pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
1718 else /* Retry */
1719 {
1720 if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
1721 {
1722 /* Don't retry on TEST_UNIT_READY */
1723 pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
1724 REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
1725 (u32) pcmd->result, (u32) pSRB->TotalXferredLen));
1726 } else {
1727 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1728 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1729 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
1730 pSRB->TotalXferredLen = 0;
1731 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1732 }
1733 }
1734 goto cmd_done;
1735 }
1736 if( status )
1737 {
1738 if (status == SAM_STAT_CHECK_CONDITION)
1739 {
1740 if (dc390_RequestSense(pACB, pDCB, pSRB)) {
1741 SET_RES_DID(pcmd->result, DID_ERROR);
1742 goto cmd_done;
1743 }
1744 return;
1745 }
1746 else if (status == SAM_STAT_TASK_SET_FULL)
1747 {
1748 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1749 DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
1750 pSRB->TotalXferredLen = 0;
1751 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1752 }
1753 else if (status == SAM_STAT_BUSY &&
1754 (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
1755 pACB->scan_devices)
1756 {
1757 pSRB->AdaptStatus = 0;
1758 pSRB->TargetStatus = status;
1759 pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0);
1760 }
1761 else
1762 { /* Another error */
1763 pSRB->TotalXferredLen = 0;
1764 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1765 goto cmd_done;
1766 }
1767 }
1768 else
1769 { /* Target status == 0 */
1770 status = pSRB->AdaptStatus;
1771 if (status == H_OVER_UNDER_RUN)
1772 {
1773 pSRB->TargetStatus = 0;
1774 SET_RES_DID(pcmd->result,DID_OK);
1775 SET_RES_MSG(pcmd->result,pSRB->EndMessage);
1776 }
1777 else if (status == H_SEL_TIMEOUT)
1778 {
1779 pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
1780 /* Devices are removed below ... */
1781 }
1782 else if( pSRB->SRBStatus & PARITY_ERROR)
1783 {
1784 //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
1785 SET_RES_DID(pcmd->result,DID_PARITY);
1786 SET_RES_MSG(pcmd->result,pSRB->EndMessage);
1787 }
1788 else /* No error */
1789 {
1790 pSRB->AdaptStatus = 0;
1791 pSRB->TargetStatus = 0;
1792 SET_RES_DID(pcmd->result,DID_OK);
1793 }
1794 }
1795
1796cmd_done:
1797 scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
1798
1799 dc390_Going_remove (pDCB, pSRB);
1800 /* Add to free list */
1801 dc390_Free_insert (pACB, pSRB);
1802
1803 DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n"));
1804 pcmd->scsi_done (pcmd);
1805
1806 return;
1807}
1808
1809
1810/* Remove all SRBs from Going list and inform midlevel */
1811static void
1812dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd)
1813{
1814 struct dc390_dcb *pDCB, *pdcb;
1815 struct dc390_srb *psrb, *psrb2;
1816 int i;
1817 struct scsi_cmnd *pcmd;
1818
1819 pDCB = pACB->pLinkDCB;
1820 pdcb = pDCB;
1821 if (! pdcb) return;
1822 do
1823 {
1824 psrb = pdcb->pGoingSRB;
1825 for (i = 0; i < pdcb->GoingSRBCnt; i++)
1826 {
1827 psrb2 = psrb->pNextSRB;
1828 pcmd = psrb->pcmd;
1829 dc390_Free_insert (pACB, psrb);
1830 psrb = psrb2;
1831 }
1832 pdcb->GoingSRBCnt = 0;
1833 pdcb->pGoingSRB = NULL;
1834 pdcb->TagMask = 0;
1835 pdcb = pdcb->pNextDCB;
1836 } while( pdcb != pDCB );
1837}
1838
1839
1840static void
1841dc390_ResetSCSIBus( struct dc390_acb* pACB )
1842{
1843 //DC390_write8 (ScsiCmd, RST_DEVICE_CMD);
1844 //udelay (250);
1845 //DC390_write8 (ScsiCmd, NOP_CMD);
1846
1847 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1848 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1849 DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD);
1850 pACB->Connected = 0;
1851
1852 return;
1853}
1854
1855static void
1856dc390_ScsiRstDetect( struct dc390_acb* pACB )
1857{
1858 printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus);
1859 //DEBUG0(printk(KERN_INFO "RST_DETECT,"));
1860
1861 DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
1862 /* Unlock before ? */
1863 /* delay half a second */
1864 udelay (1000);
1865 DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
1866 pACB->last_reset = jiffies + 5*HZ/2
1867 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
1868 pACB->Connected = 0;
1869
1870 if( pACB->ACBFlag & RESET_DEV )
1871 pACB->ACBFlag |= RESET_DONE;
1872 else
1873 { /* Reset was issued by sb else */
1874 pACB->ACBFlag |= RESET_DETECT;
1875
1876 dc390_ResetDevParam( pACB );
1877 dc390_DoingSRB_Done( pACB, NULL);
1878 //dc390_RecoverSRB( pACB );
1879 pACB->pActiveDCB = NULL;
1880 pACB->ACBFlag = 0;
1881 }
1882 return;
1883}
1884
1885static int DC390_queuecommand_lck(struct scsi_cmnd *cmd,
1886 void (*done)(struct scsi_cmnd *))
1887{
1888 struct scsi_device *sdev = cmd->device;
1889 struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
1890 struct dc390_dcb *dcb = sdev->hostdata;
1891 struct dc390_srb *srb;
1892
1893 if (sdev->queue_depth <= dcb->GoingSRBCnt)
1894 goto device_busy;
1895 if (acb->pActiveDCB)
1896 goto host_busy;
1897 if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV))
1898 goto host_busy;
1899
1900 srb = acb->pFreeSRB;
1901 if (unlikely(srb == NULL))
1902 goto host_busy;
1903
1904 cmd->scsi_done = done;
1905 cmd->result = 0;
1906 acb->Cmds++;
1907
1908 acb->pFreeSRB = srb->pNextSRB;
1909 srb->pNextSRB = NULL;
1910
1911 srb->pSRBDCB = dcb;
1912 srb->pcmd = cmd;
1913 cmd->host_scribble = (char *)srb;
1914
1915 srb->SGIndex = 0;
1916 srb->AdaptStatus = 0;
1917 srb->TargetStatus = 0;
1918 srb->MsgCnt = 0;
1919
1920 srb->SRBStatus = 0;
1921 srb->SRBFlag = 0;
1922 srb->SRBState = 0;
1923 srb->TotalXferredLen = 0;
1924 srb->SGBusAddr = 0;
1925 srb->SGToBeXferLen = 0;
1926 srb->ScsiPhase = 0;
1927 srb->EndMessage = 0;
1928 srb->TagNumber = SCSI_NO_TAG;
1929
1930 if (dc390_StartSCSI(acb, dcb, srb)) {
1931 dc390_Free_insert(acb, srb);
1932 goto host_busy;
1933 }
1934
1935 dc390_Going_append(dcb, srb);
1936
1937 return 0;
1938
1939 host_busy:
1940 return SCSI_MLQUEUE_HOST_BUSY;
1941
1942 device_busy:
1943 return SCSI_MLQUEUE_DEVICE_BUSY;
1944}
1945
1946static DEF_SCSI_QCMD(DC390_queuecommand)
1947
1948static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
1949{
1950 struct pci_dev *pdev;
1951 u16 pstat;
1952
1953 if (!pDCB) pDCB = pACB->pActiveDCB;
1954 if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB;
1955
1956 if (pSRB)
1957 {
1958 printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n",
1959 pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState,
1960 pSRB->ScsiPhase);
1961 printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus);
1962 }
1963 printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus);
1964 printk ("DC390: Register dump: SCSI block:\n");
1965 printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n");
1966 printk ("DC390: %06x %02x %02x %02x",
1967 DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16),
1968 DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State));
1969 printk (" %02x %02x %02x %02x %02x %02x\n",
1970 DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1),
1971 DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4));
1972 DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
1973 if (DC390_read8(Current_Fifo) & 0x1f)
1974 {
1975 printk ("DC390: FIFO:");
1976 while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo));
1977 printk ("\n");
1978 }
1979 printk ("DC390: Register dump: DMA engine:\n");
1980 printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n");
1981 printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n",
1982 DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr),
1983 DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr),
1984 DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl));
1985 DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
1986
1987 pdev = pACB->pdev;
1988 pci_read_config_word(pdev, PCI_STATUS, &pstat);
1989 printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
1990 printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
1991}
1992
1993
1994static int DC390_abort(struct scsi_cmnd *cmd)
1995{
1996 struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata;
1997 struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata;
1998
1999 scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n");
2000
2001 /* abort() is too stupid for already sent commands at the moment.
2002 * If it's called we are in trouble anyway, so let's dump some info
2003 * into the syslog at least. (KG, 98/08/20,99/06/20) */
2004 dc390_dumpinfo(pACB, pDCB, NULL);
2005
2006 pDCB->DCBFlag |= ABORT_DEV_;
2007 printk(KERN_INFO "DC390: Aborted.\n");
2008
2009 return FAILED;
2010}
2011
2012
2013static void dc390_ResetDevParam( struct dc390_acb* pACB )
2014{
2015 struct dc390_dcb *pDCB, *pdcb;
2016
2017 pDCB = pACB->pLinkDCB;
2018 if (! pDCB) return;
2019 pdcb = pDCB;
2020 do
2021 {
2022 pDCB->SyncMode &= ~SYNC_NEGO_DONE;
2023 pDCB->SyncPeriod = 0;
2024 pDCB->SyncOffset = 0;
2025 pDCB->TagMask = 0;
2026 pDCB->CtrlR3 = FAST_CLK;
2027 pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK;
2028 pDCB->CtrlR4 |= pACB->glitch_cfg;
2029 pDCB = pDCB->pNextDCB;
2030 }
2031 while( pdcb != pDCB );
2032 pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT);
2033
2034}
2035
2036static int DC390_bus_reset (struct scsi_cmnd *cmd)
2037{
2038 struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata;
2039 u8 bval;
2040
2041 spin_lock_irq(cmd->device->host->host_lock);
2042
2043 bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
2044 DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */
2045
2046 pACB->ACBFlag |= RESET_DEV;
2047 dc390_ResetSCSIBus(pACB);
2048
2049 dc390_ResetDevParam(pACB);
2050 mdelay(1);
2051 pACB->last_reset = jiffies + 3*HZ/2
2052 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
2053
2054 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
2055 DC390_read8(INT_Status); /* Reset Pending INT */
2056
2057 dc390_DoingSRB_Done(pACB, cmd);
2058
2059 pACB->pActiveDCB = NULL;
2060 pACB->ACBFlag = 0;
2061
2062 bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST;
2063 DC390_write8(CtrlReg1, bval); /* re-enable interrupt */
2064
2065 spin_unlock_irq(cmd->device->host->host_lock);
2066
2067 return SUCCESS;
2068}
2069
2070/**
2071 * dc390_slave_alloc - Called by the scsi mid layer to tell us about a new
2072 * scsi device that we need to deal with.
2073 *
2074 * @scsi_device: The new scsi device that we need to handle.
2075 */
2076static int dc390_slave_alloc(struct scsi_device *scsi_device)
2077{
2078 struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata;
2079 struct dc390_dcb *pDCB, *pDCB2 = NULL;
2080 uint id = scsi_device->id;
2081 uint lun = scsi_device->lun;
2082
2083 pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
2084 if (!pDCB)
2085 return -ENOMEM;
2086
2087 if (!pACB->DCBCnt++) {
2088 pACB->pLinkDCB = pDCB;
2089 pACB->pDCBRunRobin = pDCB;
2090 } else {
2091 pACB->pLastDCB->pNextDCB = pDCB;
2092 }
2093
2094 pDCB->pNextDCB = pACB->pLinkDCB;
2095 pACB->pLastDCB = pDCB;
2096
2097 pDCB->pDCBACB = pACB;
2098 pDCB->TargetID = id;
2099 pDCB->TargetLUN = lun;
2100
2101 /*
2102 * Some values are for all LUNs: Copy them
2103 * In a clean way: We would have an own structure for a SCSI-ID
2104 */
2105 if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) {
2106 pDCB->DevMode = pDCB2->DevMode;
2107 pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE;
2108 pDCB->SyncPeriod = pDCB2->SyncPeriod;
2109 pDCB->SyncOffset = pDCB2->SyncOffset;
2110 pDCB->NegoPeriod = pDCB2->NegoPeriod;
2111
2112 pDCB->CtrlR3 = pDCB2->CtrlR3;
2113 pDCB->CtrlR4 = pDCB2->CtrlR4;
2114 } else {
2115 u8 index = pACB->AdapterIndex;
2116 PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2];
2117
2118 pDCB->DevMode = prom->EE_MODE1;
2119 pDCB->NegoPeriod =
2120 (dc390_clock_period1[prom->EE_SPEED] * 25) >> 2;
2121 pDCB->CtrlR3 = FAST_CLK;
2122 pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED;
2123 if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION)
2124 pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK;
2125 }
2126
2127 if (pDCB->DevMode & SYNC_NEGO_)
2128 pDCB->SyncMode |= SYNC_ENABLE;
2129 else {
2130 pDCB->SyncMode = 0;
2131 pDCB->SyncOffset &= ~0x0f;
2132 }
2133
2134 pDCB->CtrlR1 = pACB->pScsiHost->this_id;
2135 if (pDCB->DevMode & PARITY_CHK_)
2136 pDCB->CtrlR1 |= PARITY_ERR_REPO;
2137
2138 pACB->scan_devices = 1;
2139 scsi_device->hostdata = pDCB;
2140 return 0;
2141}
2142
2143/**
2144 * dc390_slave_destroy - Called by the scsi mid layer to tell us about a
2145 * device that is going away.
2146 *
2147 * @scsi_device: The scsi device that we need to remove.
2148 */
2149static void dc390_slave_destroy(struct scsi_device *scsi_device)
2150{
2151 struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata;
2152 struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata;
2153 struct dc390_dcb* pPrevDCB = pACB->pLinkDCB;
2154
2155 pACB->scan_devices = 0;
2156
2157 BUG_ON(pDCB->GoingSRBCnt > 1);
2158
2159 if (pDCB == pACB->pLinkDCB) {
2160 if (pACB->pLastDCB == pDCB) {
2161 pDCB->pNextDCB = NULL;
2162 pACB->pLastDCB = NULL;
2163 }
2164 pACB->pLinkDCB = pDCB->pNextDCB;
2165 } else {
2166 while (pPrevDCB->pNextDCB != pDCB)
2167 pPrevDCB = pPrevDCB->pNextDCB;
2168 pPrevDCB->pNextDCB = pDCB->pNextDCB;
2169 if (pDCB == pACB->pLastDCB)
2170 pACB->pLastDCB = pPrevDCB;
2171 }
2172
2173 if (pDCB == pACB->pActiveDCB)
2174 pACB->pActiveDCB = NULL;
2175 if (pDCB == pACB->pLinkDCB)
2176 pACB->pLinkDCB = pDCB->pNextDCB;
2177 if (pDCB == pACB->pDCBRunRobin)
2178 pACB->pDCBRunRobin = pDCB->pNextDCB;
2179 kfree(pDCB);
2180
2181 pACB->DCBCnt--;
2182}
2183
2184static int dc390_slave_configure(struct scsi_device *sdev)
2185{
2186 struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
2187 struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata;
2188
2189 acb->scan_devices = 0;
2190 if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) {
2191 dcb->SyncMode |= EN_TAG_QUEUEING;
2192 scsi_activate_tcq(sdev, acb->TagMaxNum);
2193 }
2194
2195 return 0;
2196}
2197
2198static struct scsi_host_template driver_template = {
2199 .module = THIS_MODULE,
2200 .proc_name = "tmscsim",
2201 .name = DC390_BANNER " V" DC390_VERSION,
2202 .slave_alloc = dc390_slave_alloc,
2203 .slave_configure = dc390_slave_configure,
2204 .slave_destroy = dc390_slave_destroy,
2205 .queuecommand = DC390_queuecommand,
2206 .eh_abort_handler = DC390_abort,
2207 .eh_bus_reset_handler = DC390_bus_reset,
2208 .can_queue = 1,
2209 .this_id = 7,
2210 .sg_tablesize = SG_ALL,
2211 .cmd_per_lun = 1,
2212 .use_clustering = ENABLE_CLUSTERING,
2213 .max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */
2214};
2215
2216/***********************************************************************
2217 * Functions for access to DC390 EEPROM
2218 * and some to emulate it
2219 *
2220 **********************************************************************/
2221
2222static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
2223{
2224 u8 carryFlag = 1, j = 0x80, bval;
2225 int i;
2226
2227 for (i = 0; i < 9; i++) {
2228 if (carryFlag) {
2229 pci_write_config_byte(pdev, 0x80, 0x40);
2230 bval = 0xc0;
2231 } else
2232 bval = 0x80;
2233
2234 udelay(160);
2235 pci_write_config_byte(pdev, 0x80, bval);
2236 udelay(160);
2237 pci_write_config_byte(pdev, 0x80, 0);
2238 udelay(160);
2239
2240 carryFlag = (cmd & j) ? 1 : 0;
2241 j >>= 1;
2242 }
2243}
2244
2245static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
2246{
2247 int i;
2248 u16 wval = 0;
2249 u8 bval;
2250
2251 for (i = 0; i < 16; i++) {
2252 wval <<= 1;
2253
2254 pci_write_config_byte(pdev, 0x80, 0x80);
2255 udelay(160);
2256 pci_write_config_byte(pdev, 0x80, 0x40);
2257 udelay(160);
2258 pci_read_config_byte(pdev, 0x00, &bval);
2259
2260 if (bval == 0x22)
2261 wval |= 1;
2262 }
2263
2264 return wval;
2265}
2266
2267static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
2268{
2269 u8 cmd = EEPROM_READ, i;
2270
2271 for (i = 0; i < 0x40; i++) {
2272 pci_write_config_byte(pdev, 0xc0, 0);
2273 udelay(160);
2274
2275 dc390_eeprom_prepare_read(pdev, cmd++);
2276 *ptr++ = dc390_eeprom_get_data(pdev);
2277
2278 pci_write_config_byte(pdev, 0x80, 0);
2279 pci_write_config_byte(pdev, 0x80, 0);
2280 udelay(160);
2281 }
2282}
2283
2284/* Override EEprom values with explicitly set values */
2285static void dc390_eeprom_override(u8 index)
2286{
2287 u8 *ptr = (u8 *) dc390_eepromBuf[index], id;
2288
2289 /* Adapter Settings */
2290 if (tmscsim[0] != -2)
2291 ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */
2292 if (tmscsim[3] != -2)
2293 ptr[EE_MODE2] = (u8)tmscsim[3];
2294 if (tmscsim[5] != -2)
2295 ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */
2296 if (tmscsim[4] != -2)
2297 ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */
2298
2299 /* Device Settings */
2300 for (id = 0; id < MAX_SCSI_ID; id++) {
2301 if (tmscsim[2] != -2)
2302 ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */
2303 if (tmscsim[1] != -2)
2304 ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */
2305 }
2306}
2307
2308static int tmscsim_def[] = {
2309 7,
2310 0 /* 10MHz */,
2311 PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_,
2312 MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK,
2313 3 /* 16 Tags per LUN */,
2314 1 /* s delay after Reset */,
2315};
2316
2317/* Copy defaults over set values where missing */
2318static void dc390_fill_with_defaults (void)
2319{
2320 int i;
2321
2322 for (i = 0; i < 6; i++) {
2323 if (tmscsim[i] < 0 || tmscsim[i] > 255)
2324 tmscsim[i] = tmscsim_def[i];
2325 }
2326
2327 /* Sanity checks */
2328 if (tmscsim[0] > 7)
2329 tmscsim[0] = 7;
2330 if (tmscsim[1] > 7)
2331 tmscsim[1] = 4;
2332 if (tmscsim[4] > 5)
2333 tmscsim[4] = 4;
2334 if (tmscsim[5] > 180)
2335 tmscsim[5] = 180;
2336}
2337
2338static void dc390_check_eeprom(struct pci_dev *pdev, u8 index)
2339{
2340 u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120};
2341 u8 EEbuf[128];
2342 u16 *ptr = (u16 *)EEbuf, wval = 0;
2343 int i;
2344
2345 dc390_read_eeprom(pdev, ptr);
2346 memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID);
2347 memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID],
2348 &EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID);
2349
2350 dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]];
2351
2352 for (i = 0; i < 0x40; i++, ptr++)
2353 wval += *ptr;
2354
2355 /* no Tekram EEprom found */
2356 if (wval != 0x1234) {
2357 int speed;
2358
2359 printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n");
2360
2361 /*
2362 * XXX(hch): bogus, because we might have tekram and
2363 * non-tekram hbas in a single machine.
2364 */
2365 dc390_fill_with_defaults();
2366
2367 speed = dc390_clock_speed[tmscsim[1]];
2368 printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), "
2369 "DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n",
2370 tmscsim[0], tmscsim[1], speed / 10, speed % 10,
2371 (u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]);
2372 }
2373}
2374
2375static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
2376{
2377 struct Scsi_Host *shost = pACB->pScsiHost;
2378 u8 dstate;
2379
2380 /* Disable SCSI bus reset interrupt */
2381 DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id);
2382
2383 if (pACB->Gmode2 & RST_SCSI_BUS) {
2384 dc390_ResetSCSIBus(pACB);
2385 udelay(1000);
2386 pACB->last_reset = jiffies + HZ/2 +
2387 HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
2388 }
2389
2390 pACB->ACBFlag = 0;
2391
2392 /* Reset Pending INT */
2393 DC390_read8(INT_Status);
2394
2395 /* 250ms selection timeout */
2396 DC390_write8(Scsi_TimeOut, SEL_TIMEOUT);
2397
2398 /* Conversion factor = 0 , 40MHz clock */
2399 DC390_write8(Clk_Factor, CLK_FREQ_40MHZ);
2400
2401 /* NOP cmd - clear command register */
2402 DC390_write8(ScsiCmd, NOP_CMD);
2403
2404 /* Enable Feature and SCSI-2 */
2405 DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD);
2406
2407 /* Fast clock */
2408 DC390_write8(CtrlReg3, FAST_CLK);
2409
2410 /* Negation */
2411 DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */
2412 (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ?
2413 NEGATE_REQACKDATA : 0);
2414
2415 /* Clear Transfer Count High: ID */
2416 DC390_write8(CtcReg_High, 0);
2417 DC390_write8(DMA_Cmd, DMA_IDLE_CMD);
2418 DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
2419 DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
2420
2421 dstate = DC390_read8(DMA_Status);
2422 DC390_write8(DMA_Status, dstate);
2423}
2424
2425static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2426{
2427 struct dc390_acb *pACB;
2428 struct Scsi_Host *shost;
2429 unsigned long io_port;
2430 int error = -ENODEV, i;
2431
2432 if (pci_enable_device(pdev))
2433 goto out;
2434
2435 pci_set_master(pdev);
2436
2437 error = -ENOMEM;
2438 if (disable_clustering)
2439 driver_template.use_clustering = DISABLE_CLUSTERING;
2440 shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb));
2441 if (!shost)
2442 goto out_disable_device;
2443
2444 pACB = (struct dc390_acb *)shost->hostdata;
2445 memset(pACB, 0, sizeof(struct dc390_acb));
2446
2447 dc390_check_eeprom(pdev, dc390_adapterCnt);
2448 dc390_eeprom_override(dc390_adapterCnt);
2449
2450 io_port = pci_resource_start(pdev, 0);
2451
2452 shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID];
2453 shost->io_port = io_port;
2454 shost->n_io_port = 0x80;
2455 shost->irq = pdev->irq;
2456 shost->base = io_port;
2457 shost->unique_id = io_port;
2458
2459 pACB->last_reset = jiffies;
2460 pACB->pScsiHost = shost;
2461 pACB->IOPortBase = (u16) io_port;
2462 pACB->IRQLevel = pdev->irq;
2463
2464 shost->max_id = 8;
2465
2466 if (shost->max_id - 1 ==
2467 dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID])
2468 shost->max_id--;
2469
2470 if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK)
2471 shost->max_lun = 8;
2472 else
2473 shost->max_lun = 1;
2474
2475 pACB->pFreeSRB = pACB->SRB_array;
2476 pACB->SRBCount = MAX_SRB_CNT;
2477 pACB->AdapterIndex = dc390_adapterCnt;
2478 pACB->TagMaxNum =
2479 2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM];
2480 pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2];
2481
2482 for (i = 0; i < pACB->SRBCount-1; i++)
2483 pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1];
2484 pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL;
2485 pACB->pTmpSRB = &pACB->TmpSRB;
2486
2487 pACB->sel_timeout = SEL_TIMEOUT;
2488 pACB->glitch_cfg = EATER_25NS;
2489 pACB->pdev = pdev;
2490
2491 if (!request_region(io_port, shost->n_io_port, "tmscsim")) {
2492 printk(KERN_ERR "DC390: register IO ports error!\n");
2493 goto out_host_put;
2494 }
2495
2496 /* Reset Pending INT */
2497 DC390_read8_(INT_Status, io_port);
2498
2499 if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED,
2500 "tmscsim", pACB)) {
2501 printk(KERN_ERR "DC390: register IRQ error!\n");
2502 goto out_release_region;
2503 }
2504
2505 dc390_init_hw(pACB, dc390_adapterCnt);
2506
2507 dc390_adapterCnt++;
2508
2509 pci_set_drvdata(pdev, shost);
2510
2511 error = scsi_add_host(shost, &pdev->dev);
2512 if (error)
2513 goto out_free_irq;
2514 scsi_scan_host(shost);
2515 return 0;
2516
2517 out_free_irq:
2518 free_irq(pdev->irq, pACB);
2519 out_release_region:
2520 release_region(io_port, shost->n_io_port);
2521 out_host_put:
2522 scsi_host_put(shost);
2523 out_disable_device:
2524 pci_disable_device(pdev);
2525 out:
2526 return error;
2527}
2528
2529/**
2530 * dc390_remove_one - Called to remove a single instance of the adapter.
2531 *
2532 * @dev: The PCI device to remove.
2533 */
2534static void dc390_remove_one(struct pci_dev *dev)
2535{
2536 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
2537 unsigned long iflags;
2538 struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata;
2539 u8 bval;
2540
2541 scsi_remove_host(scsi_host);
2542
2543 spin_lock_irqsave(scsi_host->host_lock, iflags);
2544 pACB->ACBFlag = RESET_DEV;
2545 bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
2546 DC390_write8 (CtrlReg1, bval); /* disable interrupt */
2547 if (pACB->Gmode2 & RST_SCSI_BUS)
2548 dc390_ResetSCSIBus(pACB);
2549 spin_unlock_irqrestore(scsi_host->host_lock, iflags);
2550
2551 free_irq(scsi_host->irq, pACB);
2552 release_region(scsi_host->io_port, scsi_host->n_io_port);
2553
2554 pci_disable_device(dev);
2555 scsi_host_put(scsi_host);
2556}
2557
2558static struct pci_device_id tmscsim_pci_tbl[] = {
2559 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974,
2560 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2561 { }
2562};
2563MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
2564
2565static struct pci_driver dc390_driver = {
2566 .name = "tmscsim",
2567 .id_table = tmscsim_pci_tbl,
2568 .probe = dc390_probe_one,
2569 .remove = dc390_remove_one,
2570};
2571
2572static int __init dc390_module_init(void)
2573{
2574 if (!disable_clustering) {
2575 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2576 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2577 }
2578
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7;
2581 tmscsim[1] = 4;
2582 tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_;
2583 tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION;
2584 tmscsim[4] = 2;
2585 tmscsim[5] = 10;
2586 printk (KERN_INFO "DC390: Using safe settings.\n");
2587 }
2588
2589 return pci_register_driver(&dc390_driver);
2590}
2591
2592static void __exit dc390_module_exit(void)
2593{
2594 pci_unregister_driver(&dc390_driver);
2595}
2596
2597module_init(dc390_module_init);
2598module_exit(dc390_module_exit);
2599
2600#ifndef MODULE
2601static int __init dc390_setup (char *str)
2602{
2603 int ints[8],i, im;
2604
2605 get_options(str, ARRAY_SIZE(ints), ints);
2606 im = ints[0];
2607
2608 if (im > 6) {
2609 printk (KERN_NOTICE "DC390: ignore extra params!\n");
2610 im = 6;
2611 }
2612
2613 for (i = 0; i < im; i++)
2614 tmscsim[i] = ints[i+1];
2615 /* dc390_checkparams (); */
2616 return 1;
2617}
2618
2619__setup("tmscsim=", dc390_setup);
2620#endif
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
deleted file mode 100644
index 3d1bb4ad1826..000000000000
--- a/drivers/scsi/tmscsim.h
+++ /dev/null
@@ -1,551 +0,0 @@
1/***********************************************************************
2;* File Name : TMSCSIM.H *
3;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter *
4;* Device Driver *
5;***********************************************************************/
6/* $Id: tmscsim.h,v 2.15.2.3 2000/11/17 20:52:27 garloff Exp $ */
7
8#ifndef _TMSCSIM_H
9#define _TMSCSIM_H
10
11#include <linux/types.h>
12
13#define SCSI_IRQ_NONE 255
14
15#define MAX_ADAPTER_NUM 4
16#define MAX_SG_LIST_BUF 16 /* Not used */
17#define MAX_SCSI_ID 8
18#define MAX_SRB_CNT 50 /* Max number of started commands */
19
20#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
21
22/*
23;-----------------------------------------------------------------------
24; SCSI Request Block
25;-----------------------------------------------------------------------
26*/
27struct dc390_srb
28{
29//u8 CmdBlock[12];
30
31struct dc390_srb *pNextSRB;
32struct dc390_dcb *pSRBDCB;
33struct scsi_cmnd *pcmd;
34struct scatterlist *pSegmentList;
35
36struct scatterlist Segmentx; /* make a one entry of S/G list table */
37
38unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A
39 in CPU endianness. We're only getting 32-bit bus
40 addresses by default */
41unsigned long SGToBeXferLen; /*; to be xfer length */
42unsigned long TotalXferredLen;
43unsigned long SavedTotXLen;
44unsigned long Saved_Ptr;
45u32 SRBState;
46
47u8 SRBStatus;
48u8 SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */
49 /*; b4-settimeout,b5-Residual valid */
50u8 AdaptStatus;
51u8 TargetStatus;
52
53u8 ScsiPhase;
54s8 TagNumber;
55u8 SGIndex;
56u8 SGcount;
57
58u8 MsgCnt;
59u8 EndMessage;
60
61u8 MsgInBuf[6];
62u8 MsgOutBuf[6];
63
64//u8 IORBFlag; /*;81h-Reset, 2-retry */
65};
66
67
68/*
69;-----------------------------------------------------------------------
70; Device Control Block
71;-----------------------------------------------------------------------
72*/
73struct dc390_dcb
74{
75struct dc390_dcb *pNextDCB;
76struct dc390_acb *pDCBACB;
77
78/* Queued SRBs */
79struct dc390_srb *pGoingSRB;
80struct dc390_srb *pGoingLast;
81struct dc390_srb *pActiveSRB;
82u8 GoingSRBCnt;
83
84u32 TagMask;
85
86u8 TargetID; /*; SCSI Target ID (SCSI Only) */
87u8 TargetLUN; /*; SCSI Log. Unit (SCSI Only) */
88u8 DevMode;
89u8 DCBFlag;
90
91u8 CtrlR1;
92u8 CtrlR3;
93u8 CtrlR4;
94
95u8 SyncMode; /*; 0:async mode */
96u8 NegoPeriod; /*;for nego. */
97u8 SyncPeriod; /*;for reg. */
98u8 SyncOffset; /*;for reg. and nego.(low nibble) */
99};
100
101
102/*
103;-----------------------------------------------------------------------
104; Adapter Control Block
105;-----------------------------------------------------------------------
106*/
107struct dc390_acb
108{
109struct Scsi_Host *pScsiHost;
110u16 IOPortBase;
111u8 IRQLevel;
112u8 status;
113
114u8 SRBCount;
115u8 AdapterIndex; /*; nth Adapter this driver */
116u8 DCBCnt;
117
118u8 TagMaxNum;
119u8 ACBFlag;
120u8 Gmode2;
121u8 scan_devices;
122
123struct dc390_dcb *pLinkDCB;
124struct dc390_dcb *pLastDCB;
125struct dc390_dcb *pDCBRunRobin;
126
127struct dc390_dcb *pActiveDCB;
128struct dc390_srb *pFreeSRB;
129struct dc390_srb *pTmpSRB;
130
131u8 msgin123[4];
132u8 Connected;
133u8 pad;
134
135#if defined(USE_SPINLOCKS) && USE_SPINLOCKS > 1 && (defined(CONFIG_SMP) || DEBUG_SPINLOCKS > 0)
136spinlock_t lock;
137#endif
138u8 sel_timeout;
139u8 glitch_cfg;
140
141u8 MsgLen;
142u8 Ignore_IRQ; /* Not used */
143
144struct pci_dev *pdev;
145
146unsigned long last_reset;
147unsigned long Cmds;
148u32 SelLost;
149u32 SelConn;
150u32 CmdInQ;
151u32 CmdOutOfSRB;
152
153struct dc390_srb TmpSRB;
154struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
155};
156
157
158/*;-----------------------------------------------------------------------*/
159
160
161#define BIT31 0x80000000
162#define BIT30 0x40000000
163#define BIT29 0x20000000
164#define BIT28 0x10000000
165#define BIT27 0x08000000
166#define BIT26 0x04000000
167#define BIT25 0x02000000
168#define BIT24 0x01000000
169#define BIT23 0x00800000
170#define BIT22 0x00400000
171#define BIT21 0x00200000
172#define BIT20 0x00100000
173#define BIT19 0x00080000
174#define BIT18 0x00040000
175#define BIT17 0x00020000
176#define BIT16 0x00010000
177#define BIT15 0x00008000
178#define BIT14 0x00004000
179#define BIT13 0x00002000
180#define BIT12 0x00001000
181#define BIT11 0x00000800
182#define BIT10 0x00000400
183#define BIT9 0x00000200
184#define BIT8 0x00000100
185#define BIT7 0x00000080
186#define BIT6 0x00000040
187#define BIT5 0x00000020
188#define BIT4 0x00000010
189#define BIT3 0x00000008
190#define BIT2 0x00000004
191#define BIT1 0x00000002
192#define BIT0 0x00000001
193
194/*;---UnitCtrlFlag */
195#define UNIT_ALLOCATED BIT0
196#define UNIT_INFO_CHANGED BIT1
197#define FORMATING_MEDIA BIT2
198#define UNIT_RETRY BIT3
199
200/*;---UnitFlags */
201#define DASD_SUPPORT BIT0
202#define SCSI_SUPPORT BIT1
203#define ASPI_SUPPORT BIT2
204
205/*;----SRBState machine definition */
206#define SRB_FREE 0
207#define SRB_WAIT BIT0
208#define SRB_READY BIT1
209#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/
210#define SRB_MSGIN BIT3
211#define SRB_MSGIN_MULTI BIT4
212#define SRB_COMMAND BIT5
213#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/
214#define SRB_DISCONNECT BIT7
215#define SRB_DATA_XFER BIT8
216#define SRB_XFERPAD BIT9
217#define SRB_STATUS BIT10
218#define SRB_COMPLETED BIT11
219#define SRB_ABORT_SENT BIT12
220#define DO_SYNC_NEGO BIT13
221#define SRB_UNEXPECT_RESEL BIT14
222
223/*;---SRBstatus */
224#define SRB_OK BIT0
225#define ABORTION BIT1
226#define OVER_RUN BIT2
227#define UNDER_RUN BIT3
228#define PARITY_ERROR BIT4
229#define SRB_ERROR BIT5
230
231/*;---ACBFlag */
232#define RESET_DEV BIT0
233#define RESET_DETECT BIT1
234#define RESET_DONE BIT2
235
236/*;---DCBFlag */
237#define ABORT_DEV_ BIT0
238
239/*;---SRBFlag */
240#define DATAOUT BIT7
241#define DATAIN BIT6
242#define RESIDUAL_VALID BIT5
243#define ENABLE_TIMER BIT4
244#define RESET_DEV0 BIT2
245#define ABORT_DEV BIT1
246#define AUTO_REQSENSE BIT0
247
248/*;---Adapter status */
249#define H_STATUS_GOOD 0
250#define H_SEL_TIMEOUT 0x11
251#define H_OVER_UNDER_RUN 0x12
252#define H_UNEXP_BUS_FREE 0x13
253#define H_TARGET_PHASE_F 0x14
254#define H_INVALID_CCB_OP 0x16
255#define H_LINK_CCB_BAD 0x17
256#define H_BAD_TARGET_DIR 0x18
257#define H_DUPLICATE_CCB 0x19
258#define H_BAD_CCB_OR_SG 0x1A
259#define H_ABORT 0x0FF
260
261/* cmd->result */
262#define RES_TARGET 0x000000FF /* Target State */
263#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
264#define RES_ENDMSG 0x0000FF00 /* End Message */
265#define RES_DID 0x00FF0000 /* DID_ codes */
266#define RES_DRV 0xFF000000 /* DRIVER_ codes */
267
268#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
269#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
270
271#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0)
272#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0)
273#define SET_RES_MSG(who, msg) do { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; } while (0)
274#define SET_RES_DID(who, did) do { who &= ~RES_DID; who |= (int)(did) << 16; } while (0)
275#define SET_RES_DRV(who, drv) do { who &= ~RES_DRV; who |= (int)(drv) << 24; } while (0)
276
277/*;---Sync_Mode */
278#define SYNC_DISABLE 0
279#define SYNC_ENABLE BIT0
280#define SYNC_NEGO_DONE BIT1
281#define WIDE_ENABLE BIT2 /* Not used ;-) */
282#define WIDE_NEGO_DONE BIT3 /* Not used ;-) */
283#define EN_TAG_QUEUEING BIT4
284#define EN_ATN_STOP BIT5
285
286#define SYNC_NEGO_OFFSET 15
287
288/*;---SCSI bus phase*/
289#define SCSI_DATA_OUT 0
290#define SCSI_DATA_IN 1
291#define SCSI_COMMAND 2
292#define SCSI_STATUS_ 3
293#define SCSI_NOP0 4
294#define SCSI_NOP1 5
295#define SCSI_MSG_OUT 6
296#define SCSI_MSG_IN 7
297
298/*;----SCSI MSG BYTE*/ /* see scsi/scsi.h */ /* One is missing ! */
299#define ABORT_TAG 0x0d
300
301/*
302 * SISC query queue
303 */
304typedef struct {
305 dma_addr_t saved_dma_handle;
306} dc390_cmd_scp_t;
307
308/*
309;==========================================================
310; EEPROM byte offset
311;==========================================================
312*/
313typedef struct _EEprom
314{
315u8 EE_MODE1;
316u8 EE_SPEED;
317u8 xx1;
318u8 xx2;
319} EEprom, *PEEprom;
320
321#define REAL_EE_ADAPT_SCSI_ID 64
322#define REAL_EE_MODE2 65
323#define REAL_EE_DELAY 66
324#define REAL_EE_TAG_CMD_NUM 67
325
326#define EE_ADAPT_SCSI_ID 32
327#define EE_MODE2 33
328#define EE_DELAY 34
329#define EE_TAG_CMD_NUM 35
330
331#define EE_LEN 40
332
333/*; EE_MODE1 bits definition*/
334#define PARITY_CHK_ BIT0
335#define SYNC_NEGO_ BIT1
336#define EN_DISCONNECT_ BIT2
337#define SEND_START_ BIT3
338#define TAG_QUEUEING_ BIT4
339
340/*; EE_MODE2 bits definition*/
341#define MORE2_DRV BIT0
342#define GREATER_1G BIT1
343#define RST_SCSI_BUS BIT2
344#define ACTIVE_NEGATION BIT3
345#define NO_SEEK BIT4
346#define LUN_CHECK BIT5
347
348#define ENABLE_CE 1
349#define DISABLE_CE 0
350#define EEPROM_READ 0x80
351
352/*
353;==========================================================
354; AMD 53C974 Registers bit Definition
355;==========================================================
356*/
357/*
358;====================
359; SCSI Register
360;====================
361*/
362
363/*; Command Reg.(+0CH) (rw) */
364#define DMA_COMMAND BIT7
365#define NOP_CMD 0
366#define CLEAR_FIFO_CMD 1
367#define RST_DEVICE_CMD 2
368#define RST_SCSI_BUS_CMD 3
369
370#define INFO_XFER_CMD 0x10
371#define INITIATOR_CMD_CMPLTE 0x11
372#define MSG_ACCEPTED_CMD 0x12
373#define XFER_PAD_BYTE 0x18
374#define SET_ATN_CMD 0x1A
375#define RESET_ATN_CMD 0x1B
376
377#define SEL_WO_ATN 0x41 /* currently not used */
378#define SEL_W_ATN 0x42
379#define SEL_W_ATN_STOP 0x43
380#define SEL_W_ATN3 0x46
381#define EN_SEL_RESEL 0x44
382#define DIS_SEL_RESEL 0x45 /* currently not used */
383#define RESEL 0x40 /* " */
384#define RESEL_ATN3 0x47 /* " */
385
386#define DATA_XFER_CMD INFO_XFER_CMD
387
388
389/*; SCSI Status Reg.(+10H) (r) */
390#define INTERRUPT BIT7
391#define ILLEGAL_OP_ERR BIT6
392#define PARITY_ERR BIT5
393#define COUNT_2_ZERO BIT4
394#define GROUP_CODE_VALID BIT3
395#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0)
396/* BIT2: MSG phase; BIT1: C/D physe; BIT0: I/O phase */
397
398/*; Interrupt Status Reg.(+14H) (r) */
399#define SCSI_RESET BIT7
400#define INVALID_CMD BIT6
401#define DISCONNECTED BIT5
402#define SERVICE_REQUEST BIT4
403#define SUCCESSFUL_OP BIT3
404#define RESELECTED BIT2
405#define SEL_ATTENTION BIT1
406#define SELECTED BIT0
407
408/*; Internal State Reg.(+18H) (r) */
409#define SYNC_OFFSET_FLAG BIT3
410#define INTRN_STATE_MASK (BIT2+BIT1+BIT0)
411/* 0x04: Sel. successful (w/o stop), 0x01: Sel. successful (w/ stop) */
412
413/*; Clock Factor Reg.(+24H) (w) */
414#define CLK_FREQ_40MHZ 0
415#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0)
416#define CLK_FREQ_30MHZ (BIT2+BIT1)
417#define CLK_FREQ_25MHZ (BIT2+BIT0)
418#define CLK_FREQ_20MHZ BIT2
419#define CLK_FREQ_15MHZ (BIT1+BIT0)
420#define CLK_FREQ_10MHZ BIT1
421
422/*; Control Reg. 1(+20H) (rw) */
423#define EXTENDED_TIMING BIT7
424#define DIS_INT_ON_SCSI_RST BIT6
425#define PARITY_ERR_REPO BIT4
426#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0) /* host adapter ID */
427
428/*; Control Reg. 2(+2CH) (rw) */
429#define EN_FEATURE BIT6
430#define EN_SCSI2_CMD BIT3
431
432/*; Control Reg. 3(+30H) (rw) */
433#define ID_MSG_CHECK BIT7
434#define EN_QTAG_MSG BIT6
435#define EN_GRP2_CMD BIT5
436#define FAST_SCSI BIT4 /* ;10MB/SEC */
437#define FAST_CLK BIT3 /* ;25 - 40 MHZ */
438
439/*; Control Reg. 4(+34H) (rw) */
440#define EATER_12NS 0
441#define EATER_25NS BIT7
442#define EATER_35NS BIT6
443#define EATER_0NS (BIT7+BIT6)
444#define REDUCED_POWER BIT5
445#define CTRL4_RESERVED BIT4 /* must be 1 acc. to AM53C974.c */
446#define NEGATE_REQACKDATA BIT2
447#define NEGATE_REQACK BIT3
448
449#define GLITCH_TO_NS(x) (((~x>>6 & 2) >> 1) | ((x>>6 & 1) << 1 ^ (x>>6 & 2)))
450#define NS_TO_GLITCH(y) (((~y<<7) | ~((y<<6) ^ ((y<<5 & 1<<6) | ~0x40))) & 0xc0)
451
452/*
453;====================
454; DMA Register
455;====================
456*/
457/*; DMA Command Reg.(+40H) (rw) */
458#define READ_DIRECTION BIT7
459#define WRITE_DIRECTION 0
460#define EN_DMA_INT BIT6
461#define EN_PAGE_INT BIT5 /* page transfer interrupt enable */
462#define MAP_TO_MDL BIT4
463#define DIAGNOSTIC BIT2
464#define DMA_IDLE_CMD 0
465#define DMA_BLAST_CMD BIT0
466#define DMA_ABORT_CMD BIT1
467#define DMA_START_CMD (BIT1+BIT0)
468
469/*; DMA Status Reg.(+54H) (r) */
470#define PCI_MS_ABORT BIT6
471#define BLAST_COMPLETE BIT5
472#define SCSI_INTERRUPT BIT4
473#define DMA_XFER_DONE BIT3
474#define DMA_XFER_ABORT BIT2
475#define DMA_XFER_ERROR BIT1
476#define POWER_DOWN BIT0
477
478/*; DMA SCSI Bus and Ctrl.(+70H) */
479#define EN_INT_ON_PCI_ABORT BIT25
480#define WRT_ERASE_DMA_STAT BIT24
481#define PW_DOWN_CTRL BIT21
482#define SCSI_BUSY BIT20
483#define SCLK BIT19
484#define SCAM BIT18
485#define SCSI_LINES 0x0003ffff
486
487/*
488;==========================================================
489; SCSI Chip register address offset
490;==========================================================
491;Registers are rw unless declared otherwise
492*/
493#define CtcReg_Low 0x00 /* r curr. transfer count */
494#define CtcReg_Mid 0x04 /* r */
495#define CtcReg_High 0x38 /* r */
496#define ScsiFifo 0x08
497#define ScsiCmd 0x0C
498#define Scsi_Status 0x10 /* r */
499#define INT_Status 0x14 /* r */
500#define Sync_Period 0x18 /* w */
501#define Sync_Offset 0x1C /* w */
502#define Clk_Factor 0x24 /* w */
503#define CtrlReg1 0x20
504#define CtrlReg2 0x2C
505#define CtrlReg3 0x30
506#define CtrlReg4 0x34
507#define DMA_Cmd 0x40
508#define DMA_XferCnt 0x44 /* rw starting transfer count (32 bit) */
509#define DMA_XferAddr 0x48 /* rw starting physical address (32 bit) */
510#define DMA_Wk_ByteCntr 0x4C /* r working byte counter */
511#define DMA_Wk_AddrCntr 0x50 /* r working address counter */
512#define DMA_Status 0x54 /* r */
513#define DMA_MDL_Addr 0x58 /* rw starting MDL address */
514#define DMA_Wk_MDL_Cntr 0x5C /* r working MDL counter */
515#define DMA_ScsiBusCtrl 0x70 /* rw SCSI Bus, PCI/DMA Ctrl */
516
517#define StcReg_Low CtcReg_Low /* w start transfer count */
518#define StcReg_Mid CtcReg_Mid /* w */
519#define StcReg_High CtcReg_High /* w */
520#define Scsi_Dest_ID Scsi_Status /* w */
521#define Scsi_TimeOut INT_Status /* w */
522#define Intern_State Sync_Period /* r */
523#define Current_Fifo Sync_Offset /* r Curr. FIFO / int. state */
524
525
526#define DC390_read8(address) \
527 (inb (pACB->IOPortBase + (address)))
528
529#define DC390_read8_(address, base) \
530 (inb ((u16)(base) + (address)))
531
532#define DC390_read16(address) \
533 (inw (pACB->IOPortBase + (address)))
534
535#define DC390_read32(address) \
536 (inl (pACB->IOPortBase + (address)))
537
538#define DC390_write8(address,value) \
539 outb ((value), pACB->IOPortBase + (address))
540
541#define DC390_write8_(address,value,base) \
542 outb ((value), (u16)(base) + (address))
543
544#define DC390_write16(address,value) \
545 outw ((value), pACB->IOPortBase + (address))
546
547#define DC390_write32(address,value) \
548 outl ((value), pACB->IOPortBase + (address))
549
550
551#endif /* _TMSCSIM_H */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index d8dcf36aed11..14eb50b95a1e 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -696,25 +696,25 @@ static int u14_34f_slave_configure(struct scsi_device *dev) {
696 if (TLDEV(dev->type) && dev->tagged_supported) 696 if (TLDEV(dev->type) && dev->tagged_supported)
697 697
698 if (tag_mode == TAG_SIMPLE) { 698 if (tag_mode == TAG_SIMPLE) {
699 scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd); 699 scsi_change_queue_depth(dev, tqd);
700 tag_suffix = ", simple tags"; 700 tag_suffix = ", simple tags";
701 } 701 }
702 else if (tag_mode == TAG_ORDERED) { 702 else if (tag_mode == TAG_ORDERED) {
703 scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd); 703 scsi_change_queue_depth(dev, tqd);
704 tag_suffix = ", ordered tags"; 704 tag_suffix = ", ordered tags";
705 } 705 }
706 else { 706 else {
707 scsi_adjust_queue_depth(dev, 0, tqd); 707 scsi_change_queue_depth(dev, tqd);
708 tag_suffix = ", no tags"; 708 tag_suffix = ", no tags";
709 } 709 }
710 710
711 else if (TLDEV(dev->type) && linked_comm) { 711 else if (TLDEV(dev->type) && linked_comm) {
712 scsi_adjust_queue_depth(dev, 0, tqd); 712 scsi_change_queue_depth(dev, tqd);
713 tag_suffix = ", untagged"; 713 tag_suffix = ", untagged";
714 } 714 }
715 715
716 else { 716 else {
717 scsi_adjust_queue_depth(dev, 0, utqd); 717 scsi_change_queue_depth(dev, utqd);
718 tag_suffix = ""; 718 tag_suffix = "";
719 } 719 }
720 720
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8adf067ff019..1c3467b82566 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), 102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
103 GFP_KERNEL); 103 GFP_KERNEL);
104 if (!clkfreq) { 104 if (!clkfreq) {
105 dev_err(dev, "%s: no memory\n", "freq-table-hz");
106 ret = -ENOMEM; 105 ret = -ENOMEM;
107 goto out; 106 goto out;
108 } 107 }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
112 if (ret && (ret != -EINVAL)) { 111 if (ret && (ret != -EINVAL)) {
113 dev_err(dev, "%s: error reading array %d\n", 112 dev_err(dev, "%s: error reading array %d\n",
114 "freq-table-hz", ret); 113 "freq-table-hz", ret);
115 goto free_clkfreq; 114 return ret;
116 } 115 }
117 116
118 for (i = 0; i < sz; i += 2) { 117 for (i = 0; i < sz; i += 2) {
119 ret = of_property_read_string_index(np, 118 ret = of_property_read_string_index(np,
120 "clock-names", i/2, (const char **)&name); 119 "clock-names", i/2, (const char **)&name);
121 if (ret) 120 if (ret)
122 goto free_clkfreq; 121 goto out;
123 122
124 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); 123 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
125 if (!clki) { 124 if (!clki) {
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto free_clkfreq; 126 goto out;
128 } 127 }
129 128
130 clki->min_freq = clkfreq[i]; 129 clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
134 clki->min_freq, clki->max_freq, clki->name); 133 clki->min_freq, clki->max_freq, clki->name);
135 list_add_tail(&clki->list, &hba->clk_list_head); 134 list_add_tail(&clki->list, &hba->clk_list_head);
136 } 135 }
137free_clkfreq:
138 kfree(clkfreq);
139out: 136out:
140 return ret; 137 return ret;
141} 138}
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
162 } 159 }
163 160
164 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); 161 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
165 if (!vreg) { 162 if (!vreg)
166 dev_err(dev, "No memory for %s regulator\n", name); 163 return -ENOMEM;
167 goto out;
168 }
169 164
170 vreg->name = kstrdup(name, GFP_KERNEL); 165 vreg->name = kstrdup(name, GFP_KERNEL);
171 166
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 497c38a4a866..2e4614b9dddf 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
744 if (!ufshcd_is_clkgating_allowed(hba)) 744 if (!ufshcd_is_clkgating_allowed(hba))
745 return; 745 return;
746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
747 cancel_work_sync(&hba->clk_gating.ungate_work);
748 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
747} 749}
748 750
749/* Must be called with host lock acquired */ 751/* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2246 return ret; 2248 return ret;
2247} 2249}
2248 2250
2251 /**
2252 * ufshcd_init_pwr_info - setting the POR (power on reset)
2253 * values in hba power info
2254 * @hba: per-adapter instance
2255 */
2256static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2257{
2258 hba->pwr_info.gear_rx = UFS_PWM_G1;
2259 hba->pwr_info.gear_tx = UFS_PWM_G1;
2260 hba->pwr_info.lane_rx = 1;
2261 hba->pwr_info.lane_tx = 1;
2262 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2263 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2264 hba->pwr_info.hs_rate = 0;
2265}
2266
2249/** 2267/**
2250 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 2268 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2251 * @hba: per-adapter instance 2269 * @hba: per-adapter instance
@@ -2695,7 +2713,7 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2695 2713
2696 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", 2714 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2697 __func__, lun_qdepth); 2715 __func__, lun_qdepth);
2698 scsi_activate_tcq(sdev, lun_qdepth); 2716 scsi_change_queue_depth(sdev, lun_qdepth);
2699} 2717}
2700 2718
2701/* 2719/*
@@ -2765,11 +2783,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
2765 struct ufs_hba *hba; 2783 struct ufs_hba *hba;
2766 2784
2767 hba = shost_priv(sdev->host); 2785 hba = shost_priv(sdev->host);
2768 sdev->tagged_supported = 1;
2769 2786
2770 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ 2787 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2771 sdev->use_10_for_ms = 1; 2788 sdev->use_10_for_ms = 1;
2772 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2773 2789
2774 /* allow SCSI layer to restart the device in case of errors */ 2790 /* allow SCSI layer to restart the device in case of errors */
2775 sdev->allow_restart = 1; 2791 sdev->allow_restart = 1;
@@ -2789,34 +2805,16 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
2789 * ufshcd_change_queue_depth - change queue depth 2805 * ufshcd_change_queue_depth - change queue depth
2790 * @sdev: pointer to SCSI device 2806 * @sdev: pointer to SCSI device
2791 * @depth: required depth to set 2807 * @depth: required depth to set
2792 * @reason: reason for changing the depth
2793 * 2808 *
2794 * Change queue depth according to the reason and make sure 2809 * Change queue depth and make sure the max. limits are not crossed.
2795 * the max. limits are not crossed.
2796 */ 2810 */
2797static int ufshcd_change_queue_depth(struct scsi_device *sdev, 2811static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
2798 int depth, int reason)
2799{ 2812{
2800 struct ufs_hba *hba = shost_priv(sdev->host); 2813 struct ufs_hba *hba = shost_priv(sdev->host);
2801 2814
2802 if (depth > hba->nutrs) 2815 if (depth > hba->nutrs)
2803 depth = hba->nutrs; 2816 depth = hba->nutrs;
2804 2817 return scsi_change_queue_depth(sdev, depth);
2805 switch (reason) {
2806 case SCSI_QDEPTH_DEFAULT:
2807 case SCSI_QDEPTH_RAMP_UP:
2808 if (!sdev->tagged_supported)
2809 depth = 1;
2810 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2811 break;
2812 case SCSI_QDEPTH_QFULL:
2813 scsi_track_queue_full(sdev, depth);
2814 break;
2815 default:
2816 return -EOPNOTSUPP;
2817 }
2818
2819 return depth;
2820} 2818}
2821 2819
2822/** 2820/**
@@ -2842,10 +2840,14 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
2842 struct ufs_hba *hba; 2840 struct ufs_hba *hba;
2843 2841
2844 hba = shost_priv(sdev->host); 2842 hba = shost_priv(sdev->host);
2845 scsi_deactivate_tcq(sdev, hba->nutrs);
2846 /* Drop the reference as it won't be needed anymore */ 2843 /* Drop the reference as it won't be needed anymore */
2847 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) 2844 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
2845 unsigned long flags;
2846
2847 spin_lock_irqsave(hba->host->host_lock, flags);
2848 hba->sdev_ufs_device = NULL; 2848 hba->sdev_ufs_device = NULL;
2849 spin_unlock_irqrestore(hba->host->host_lock, flags);
2850 }
2849} 2851}
2850 2852
2851/** 2853/**
@@ -4062,6 +4064,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4062static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 4064static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4063{ 4065{
4064 int ret = 0; 4066 int ret = 0;
4067 struct scsi_device *sdev_rpmb;
4068 struct scsi_device *sdev_boot;
4065 4069
4066 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, 4070 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4067 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 4071 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,26 +4074,27 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4070 hba->sdev_ufs_device = NULL; 4074 hba->sdev_ufs_device = NULL;
4071 goto out; 4075 goto out;
4072 } 4076 }
4077 scsi_device_put(hba->sdev_ufs_device);
4073 4078
4074 hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, 4079 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4075 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 4080 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4076 if (IS_ERR(hba->sdev_boot)) { 4081 if (IS_ERR(sdev_boot)) {
4077 ret = PTR_ERR(hba->sdev_boot); 4082 ret = PTR_ERR(sdev_boot);
4078 hba->sdev_boot = NULL;
4079 goto remove_sdev_ufs_device; 4083 goto remove_sdev_ufs_device;
4080 } 4084 }
4085 scsi_device_put(sdev_boot);
4081 4086
4082 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 4087 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4083 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 4088 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4084 if (IS_ERR(hba->sdev_rpmb)) { 4089 if (IS_ERR(sdev_rpmb)) {
4085 ret = PTR_ERR(hba->sdev_rpmb); 4090 ret = PTR_ERR(sdev_rpmb);
4086 hba->sdev_rpmb = NULL;
4087 goto remove_sdev_boot; 4091 goto remove_sdev_boot;
4088 } 4092 }
4093 scsi_device_put(sdev_rpmb);
4089 goto out; 4094 goto out;
4090 4095
4091remove_sdev_boot: 4096remove_sdev_boot:
4092 scsi_remove_device(hba->sdev_boot); 4097 scsi_remove_device(sdev_boot);
4093remove_sdev_ufs_device: 4098remove_sdev_ufs_device:
4094 scsi_remove_device(hba->sdev_ufs_device); 4099 scsi_remove_device(hba->sdev_ufs_device);
4095out: 4100out:
@@ -4097,30 +4102,6 @@ out:
4097} 4102}
4098 4103
4099/** 4104/**
4100 * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
4101 * ufshcd_scsi_add_wlus()
4102 * @hba: per-adapter instance
4103 *
4104 */
4105static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
4106{
4107 if (hba->sdev_ufs_device) {
4108 scsi_remove_device(hba->sdev_ufs_device);
4109 hba->sdev_ufs_device = NULL;
4110 }
4111
4112 if (hba->sdev_boot) {
4113 scsi_remove_device(hba->sdev_boot);
4114 hba->sdev_boot = NULL;
4115 }
4116
4117 if (hba->sdev_rpmb) {
4118 scsi_remove_device(hba->sdev_rpmb);
4119 hba->sdev_rpmb = NULL;
4120 }
4121}
4122
4123/**
4124 * ufshcd_probe_hba - probe hba to detect device and initialize 4105 * ufshcd_probe_hba - probe hba to detect device and initialize
4125 * @hba: per-adapter instance 4106 * @hba: per-adapter instance
4126 * 4107 *
@@ -4134,6 +4115,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4134 if (ret) 4115 if (ret)
4135 goto out; 4116 goto out;
4136 4117
4118 ufshcd_init_pwr_info(hba);
4119
4137 /* UniPro link is active now */ 4120 /* UniPro link is active now */
4138 ufshcd_set_link_active(hba); 4121 ufshcd_set_link_active(hba);
4139 4122
@@ -4235,6 +4218,8 @@ static struct scsi_host_template ufshcd_driver_template = {
4235 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 4218 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
4236 .can_queue = UFSHCD_CAN_QUEUE, 4219 .can_queue = UFSHCD_CAN_QUEUE,
4237 .max_host_blocked = 1, 4220 .max_host_blocked = 1,
4221 .use_blk_tags = 1,
4222 .track_queue_depth = 1,
4238}; 4223};
4239 4224
4240static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 4225static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -4264,12 +4249,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4264static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 4249static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4265 struct ufs_vreg *vreg) 4250 struct ufs_vreg *vreg)
4266{ 4251{
4252 if (!vreg)
4253 return 0;
4254
4267 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 4255 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4268} 4256}
4269 4257
4270static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 4258static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4271 struct ufs_vreg *vreg) 4259 struct ufs_vreg *vreg)
4272{ 4260{
4261 if (!vreg)
4262 return 0;
4263
4273 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 4264 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4274} 4265}
4275 4266
@@ -4471,7 +4462,7 @@ out:
4471 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 4462 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4472 clk_disable_unprepare(clki->clk); 4463 clk_disable_unprepare(clki->clk);
4473 } 4464 }
4474 } else if (!ret && on) { 4465 } else if (on) {
4475 spin_lock_irqsave(hba->host->host_lock, flags); 4466 spin_lock_irqsave(hba->host->host_lock, flags);
4476 hba->clk_gating.state = CLKS_ON; 4467 hba->clk_gating.state = CLKS_ON;
4477 spin_unlock_irqrestore(hba->host->host_lock, flags); 4468 spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4666,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4675{ 4666{
4676 unsigned char cmd[6] = { START_STOP }; 4667 unsigned char cmd[6] = { START_STOP };
4677 struct scsi_sense_hdr sshdr; 4668 struct scsi_sense_hdr sshdr;
4678 struct scsi_device *sdp = hba->sdev_ufs_device; 4669 struct scsi_device *sdp;
4670 unsigned long flags;
4679 int ret; 4671 int ret;
4680 4672
4681 if (!sdp || !scsi_device_online(sdp)) 4673 spin_lock_irqsave(hba->host->host_lock, flags);
4682 return -ENODEV; 4674 sdp = hba->sdev_ufs_device;
4675 if (sdp) {
4676 ret = scsi_device_get(sdp);
4677 if (!ret && !scsi_device_online(sdp)) {
4678 ret = -ENODEV;
4679 scsi_device_put(sdp);
4680 }
4681 } else {
4682 ret = -ENODEV;
4683 }
4684 spin_unlock_irqrestore(hba->host->host_lock, flags);
4685
4686 if (ret)
4687 return ret;
4683 4688
4684 /* 4689 /*
4685 * If scsi commands fail, the scsi mid-layer schedules scsi error- 4690 * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4707,17 +4712,18 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4707 START_STOP_TIMEOUT, 0, NULL, REQ_PM); 4712 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
4708 if (ret) { 4713 if (ret) {
4709 sdev_printk(KERN_WARNING, sdp, 4714 sdev_printk(KERN_WARNING, sdp,
4710 "START_STOP failed for power mode: %d\n", pwr_mode); 4715 "START_STOP failed for power mode: %d, result %x\n",
4711 scsi_show_result(ret); 4716 pwr_mode, ret);
4712 if (driver_byte(ret) & DRIVER_SENSE) { 4717 if (driver_byte(ret) & DRIVER_SENSE) {
4713 scsi_show_sense_hdr(&sshdr); 4718 scsi_show_sense_hdr(sdp, NULL, &sshdr);
4714 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 4719 scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq);
4715 } 4720 }
4716 } 4721 }
4717 4722
4718 if (!ret) 4723 if (!ret)
4719 hba->curr_dev_pwr_mode = pwr_mode; 4724 hba->curr_dev_pwr_mode = pwr_mode;
4720out: 4725out:
4726 scsi_device_put(sdp);
4721 hba->host->eh_noresume = 0; 4727 hba->host->eh_noresume = 0;
4722 return ret; 4728 return ret;
4723} 4729}
@@ -5087,7 +5093,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
5087 int ret = 0; 5093 int ret = 0;
5088 5094
5089 if (!hba || !hba->is_powered) 5095 if (!hba || !hba->is_powered)
5090 goto out; 5096 return 0;
5091 5097
5092 if (pm_runtime_suspended(hba->dev)) { 5098 if (pm_runtime_suspended(hba->dev)) {
5093 if (hba->rpm_lvl == hba->spm_lvl) 5099 if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5237,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
5231void ufshcd_remove(struct ufs_hba *hba) 5237void ufshcd_remove(struct ufs_hba *hba)
5232{ 5238{
5233 scsi_remove_host(hba->host); 5239 scsi_remove_host(hba->host);
5234 ufshcd_scsi_remove_wlus(hba);
5235 /* disable interrupts */ 5240 /* disable interrupts */
5236 ufshcd_disable_intr(hba, hba->intr_mask); 5241 ufshcd_disable_intr(hba, hba->intr_mask);
5237 ufshcd_hba_stop(hba); 5242 ufshcd_hba_stop(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 58ecdff5065c..4a574aa45855 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -392,8 +392,6 @@ struct ufs_hba {
392 * "UFS device" W-LU. 392 * "UFS device" W-LU.
393 */ 393 */
394 struct scsi_device *sdev_ufs_device; 394 struct scsi_device *sdev_ufs_device;
395 struct scsi_device *sdev_rpmb;
396 struct scsi_device *sdev_boot;
397 395
398 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 396 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
399 enum uic_link_state uic_link_state; 397 enum uic_link_state uic_link_state;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b83846fc7859..c52bb5dfaedb 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -158,7 +158,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
158 sc, resp->response, resp->status, resp->sense_len); 158 sc, resp->response, resp->status, resp->sense_len);
159 159
160 sc->result = resp->status; 160 sc->result = resp->status;
161 virtscsi_compute_resid(sc, resp->resid); 161 virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
162 switch (resp->response) { 162 switch (resp->response) {
163 case VIRTIO_SCSI_S_OK: 163 case VIRTIO_SCSI_S_OK:
164 set_host_byte(sc, DID_OK); 164 set_host_byte(sc, DID_OK);
@@ -196,10 +196,13 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
196 break; 196 break;
197 } 197 }
198 198
199 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); 199 WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
200 VIRTIO_SCSI_SENSE_SIZE);
200 if (sc->sense_buffer) { 201 if (sc->sense_buffer) {
201 memcpy(sc->sense_buffer, resp->sense, 202 memcpy(sc->sense_buffer, resp->sense,
202 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); 203 min_t(u32,
204 virtio32_to_cpu(vscsi->vdev, resp->sense_len),
205 VIRTIO_SCSI_SENSE_SIZE));
203 if (resp->sense_len) 206 if (resp->sense_len)
204 set_driver_byte(sc, DRIVER_SENSE); 207 set_driver_byte(sc, DRIVER_SENSE);
205 } 208 }
@@ -323,7 +326,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
323 unsigned int target = event->lun[1]; 326 unsigned int target = event->lun[1];
324 unsigned int lun = (event->lun[2] << 8) | event->lun[3]; 327 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
325 328
326 switch (event->reason) { 329 switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
327 case VIRTIO_SCSI_EVT_RESET_RESCAN: 330 case VIRTIO_SCSI_EVT_RESET_RESCAN:
328 scsi_add_device(shost, 0, target, lun); 331 scsi_add_device(shost, 0, target, lun);
329 break; 332 break;
@@ -349,8 +352,8 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
349 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 352 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
350 unsigned int target = event->lun[1]; 353 unsigned int target = event->lun[1];
351 unsigned int lun = (event->lun[2] << 8) | event->lun[3]; 354 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
352 u8 asc = event->reason & 255; 355 u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
353 u8 ascq = event->reason >> 8; 356 u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
354 357
355 sdev = scsi_device_lookup(shost, 0, target, lun); 358 sdev = scsi_device_lookup(shost, 0, target, lun);
356 if (!sdev) { 359 if (!sdev) {
@@ -374,12 +377,14 @@ static void virtscsi_handle_event(struct work_struct *work)
374 struct virtio_scsi *vscsi = event_node->vscsi; 377 struct virtio_scsi *vscsi = event_node->vscsi;
375 struct virtio_scsi_event *event = &event_node->event; 378 struct virtio_scsi_event *event = &event_node->event;
376 379
377 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 380 if (event->event &
378 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 381 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
382 event->event &= ~cpu_to_virtio32(vscsi->vdev,
383 VIRTIO_SCSI_T_EVENTS_MISSED);
379 scsi_scan_host(virtio_scsi_host(vscsi->vdev)); 384 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
380 } 385 }
381 386
382 switch (event->event) { 387 switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
383 case VIRTIO_SCSI_T_NO_EVENT: 388 case VIRTIO_SCSI_T_NO_EVENT:
384 break; 389 break;
385 case VIRTIO_SCSI_T_TRANSPORT_RESET: 390 case VIRTIO_SCSI_T_TRANSPORT_RESET:
@@ -482,26 +487,28 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
482 return err; 487 return err;
483} 488}
484 489
485static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd, 490static void virtio_scsi_init_hdr(struct virtio_device *vdev,
491 struct virtio_scsi_cmd_req *cmd,
486 struct scsi_cmnd *sc) 492 struct scsi_cmnd *sc)
487{ 493{
488 cmd->lun[0] = 1; 494 cmd->lun[0] = 1;
489 cmd->lun[1] = sc->device->id; 495 cmd->lun[1] = sc->device->id;
490 cmd->lun[2] = (sc->device->lun >> 8) | 0x40; 496 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
491 cmd->lun[3] = sc->device->lun & 0xff; 497 cmd->lun[3] = sc->device->lun & 0xff;
492 cmd->tag = (unsigned long)sc; 498 cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
493 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; 499 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
494 cmd->prio = 0; 500 cmd->prio = 0;
495 cmd->crn = 0; 501 cmd->crn = 0;
496} 502}
497 503
498static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi, 504static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
505 struct virtio_scsi_cmd_req_pi *cmd_pi,
499 struct scsi_cmnd *sc) 506 struct scsi_cmnd *sc)
500{ 507{
501 struct request *rq = sc->request; 508 struct request *rq = sc->request;
502 struct blk_integrity *bi; 509 struct blk_integrity *bi;
503 510
504 virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc); 511 virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
505 512
506 if (!rq || !scsi_prot_sg_count(sc)) 513 if (!rq || !scsi_prot_sg_count(sc))
507 return; 514 return;
@@ -509,9 +516,13 @@ static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
509 bi = blk_get_integrity(rq->rq_disk); 516 bi = blk_get_integrity(rq->rq_disk);
510 517
511 if (sc->sc_data_direction == DMA_TO_DEVICE) 518 if (sc->sc_data_direction == DMA_TO_DEVICE)
512 cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size; 519 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
520 blk_rq_sectors(rq) *
521 bi->tuple_size);
513 else if (sc->sc_data_direction == DMA_FROM_DEVICE) 522 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
514 cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size; 523 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
524 blk_rq_sectors(rq) *
525 bi->tuple_size);
515} 526}
516 527
517static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 528static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
@@ -536,11 +547,11 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
536 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 547 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
537 548
538 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { 549 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
539 virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc); 550 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
540 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); 551 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
541 req_size = sizeof(cmd->req.cmd_pi); 552 req_size = sizeof(cmd->req.cmd_pi);
542 } else { 553 } else {
543 virtio_scsi_init_hdr(&cmd->req.cmd, sc); 554 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
544 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 555 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
545 req_size = sizeof(cmd->req.cmd); 556 req_size = sizeof(cmd->req.cmd);
546 } 557 }
@@ -561,6 +572,15 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
561 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); 572 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
562} 573}
563 574
575static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
576 struct scsi_cmnd *sc)
577{
578 u32 tag = blk_mq_unique_tag(sc->request);
579 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
580
581 return &vscsi->req_vqs[hwq];
582}
583
564static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, 584static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
565 struct virtio_scsi_target_state *tgt) 585 struct virtio_scsi_target_state *tgt)
566{ 586{
@@ -604,7 +624,12 @@ static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
604 struct virtio_scsi *vscsi = shost_priv(sh); 624 struct virtio_scsi *vscsi = shost_priv(sh);
605 struct virtio_scsi_target_state *tgt = 625 struct virtio_scsi_target_state *tgt =
606 scsi_target(sc->device)->hostdata; 626 scsi_target(sc->device)->hostdata;
607 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); 627 struct virtio_scsi_vq *req_vq;
628
629 if (shost_use_blk_mq(sh))
630 req_vq = virtscsi_pick_vq_mq(vscsi, sc);
631 else
632 req_vq = virtscsi_pick_vq(vscsi, tgt);
608 633
609 return virtscsi_queuecommand(vscsi, req_vq, sc); 634 return virtscsi_queuecommand(vscsi, req_vq, sc);
610} 635}
@@ -655,7 +680,8 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
655 cmd->sc = sc; 680 cmd->sc = sc;
656 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ 681 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
657 .type = VIRTIO_SCSI_T_TMF, 682 .type = VIRTIO_SCSI_T_TMF,
658 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, 683 .subtype = cpu_to_virtio32(vscsi->vdev,
684 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
659 .lun[0] = 1, 685 .lun[0] = 1,
660 .lun[1] = sc->device->id, 686 .lun[1] = sc->device->id,
661 .lun[2] = (sc->device->lun >> 8) | 0x40, 687 .lun[2] = (sc->device->lun >> 8) | 0x40,
@@ -668,30 +694,13 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
668 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth 694 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
669 * @sdev: Virtscsi target whose queue depth to change 695 * @sdev: Virtscsi target whose queue depth to change
670 * @qdepth: New queue depth 696 * @qdepth: New queue depth
671 * @reason: Reason for the queue depth change.
672 */ 697 */
673static int virtscsi_change_queue_depth(struct scsi_device *sdev, 698static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
674 int qdepth,
675 int reason)
676{ 699{
677 struct Scsi_Host *shost = sdev->host; 700 struct Scsi_Host *shost = sdev->host;
678 int max_depth = shost->cmd_per_lun; 701 int max_depth = shost->cmd_per_lun;
679 702
680 switch (reason) { 703 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
681 case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */
682 scsi_track_queue_full(sdev, qdepth);
683 break;
684 case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */
685 case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */
686 scsi_adjust_queue_depth(sdev,
687 scsi_get_tag_type(sdev),
688 min(max_depth, qdepth));
689 break;
690 default:
691 return -EOPNOTSUPP;
692 }
693
694 return sdev->queue_depth;
695} 704}
696 705
697static int virtscsi_abort(struct scsi_cmnd *sc) 706static int virtscsi_abort(struct scsi_cmnd *sc)
@@ -713,7 +722,7 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
713 .lun[1] = sc->device->id, 722 .lun[1] = sc->device->id,
714 .lun[2] = (sc->device->lun >> 8) | 0x40, 723 .lun[2] = (sc->device->lun >> 8) | 0x40,
715 .lun[3] = sc->device->lun & 0xff, 724 .lun[3] = sc->device->lun & 0xff,
716 .tag = (unsigned long)sc, 725 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
717 }; 726 };
718 return virtscsi_tmf(vscsi, cmd); 727 return virtscsi_tmf(vscsi, cmd);
719} 728}
@@ -758,6 +767,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
758 .use_clustering = ENABLE_CLUSTERING, 767 .use_clustering = ENABLE_CLUSTERING,
759 .target_alloc = virtscsi_target_alloc, 768 .target_alloc = virtscsi_target_alloc,
760 .target_destroy = virtscsi_target_destroy, 769 .target_destroy = virtscsi_target_destroy,
770 .track_queue_depth = 1,
761}; 771};
762 772
763static struct scsi_host_template virtscsi_host_template_multi = { 773static struct scsi_host_template virtscsi_host_template_multi = {
@@ -776,6 +786,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
776 .use_clustering = ENABLE_CLUSTERING, 786 .use_clustering = ENABLE_CLUSTERING,
777 .target_alloc = virtscsi_target_alloc, 787 .target_alloc = virtscsi_target_alloc,
778 .target_destroy = virtscsi_target_destroy, 788 .target_destroy = virtscsi_target_destroy,
789 .track_queue_depth = 1,
779}; 790};
780 791
781#define virtscsi_config_get(vdev, fld) \ 792#define virtscsi_config_get(vdev, fld) \
@@ -983,6 +994,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
983 shost->max_id = num_targets; 994 shost->max_id = num_targets;
984 shost->max_channel = 0; 995 shost->max_channel = 0;
985 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 996 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
997 shost->nr_hw_queues = num_queues;
986 998
987 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 999 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
988 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1000 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 598f65efaaec..0f133c1817de 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -504,33 +504,11 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
504 } 504 }
505} 505}
506 506
507static int pvscsi_change_queue_depth(struct scsi_device *sdev, 507static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
508 int qdepth,
509 int reason)
510{ 508{
511 int max_depth;
512 struct Scsi_Host *shost = sdev->host;
513
514 if (reason != SCSI_QDEPTH_DEFAULT)
515 /*
516 * We support only changing default.
517 */
518 return -EOPNOTSUPP;
519
520 max_depth = shost->can_queue;
521 if (!sdev->tagged_supported) 509 if (!sdev->tagged_supported)
522 max_depth = 1; 510 qdepth = 1;
523 if (qdepth > max_depth) 511 return scsi_change_queue_depth(sdev, qdepth);
524 qdepth = max_depth;
525 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
526
527 if (sdev->inquiry_len > 7)
528 sdev_printk(KERN_INFO, sdev,
529 "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
530 sdev->queue_depth, sdev->tagged_supported,
531 sdev->simple_tags, sdev->ordered_tags,
532 sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
533 return sdev->queue_depth;
534} 512}
535 513
536/* 514/*
@@ -723,10 +701,6 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
723 memcpy(e->cdb, cmd->cmnd, e->cdbLen); 701 memcpy(e->cdb, cmd->cmnd, e->cdbLen);
724 702
725 e->tag = SIMPLE_QUEUE_TAG; 703 e->tag = SIMPLE_QUEUE_TAG;
726 if (sdev->tagged_supported &&
727 (cmd->tag == HEAD_OF_QUEUE_TAG ||
728 cmd->tag == ORDERED_QUEUE_TAG))
729 e->tag = cmd->tag;
730 704
731 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 705 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
732 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; 706 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 32674236fec7..f94d73611ab4 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1653,7 +1653,6 @@ static struct scsi_host_template driver_template = {
1653 .can_queue = WD7000_Q, 1653 .can_queue = WD7000_Q,
1654 .this_id = 7, 1654 .this_id = 7,
1655 .sg_tablesize = WD7000_SG, 1655 .sg_tablesize = WD7000_SG,
1656 .cmd_per_lun = 1,
1657 .unchecked_isa_dma = 1, 1656 .unchecked_isa_dma = 1,
1658 .use_clustering = ENABLE_CLUSTERING, 1657 .use_clustering = ENABLE_CLUSTERING,
1659}; 1658};
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
new file mode 100644
index 000000000000..7702664d7ed3
--- /dev/null
+++ b/drivers/scsi/wd719x.c
@@ -0,0 +1,996 @@
1/*
2 * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
3 * Copyright 2013 Ondrej Zary
4 *
5 * Original driver by
6 * Aaron Dewell <dewell@woods.net>
7 * Gaerti <Juergen.Gaertner@mbox.si.uni-hannover.de>
8 *
9 * HW documentation available in book:
10 *
11 * SPIDER Command Protocol
12 * by Chandru M. Sippy
13 * SCSI Storage Products (MCP)
14 * Western Digital Corporation
15 * 09-15-95
16 *
17 * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/
18 */
19
20/*
21 * Driver workflow:
22 * 1. SCSI command is transformed to SCB (Spider Control Block) by the
23 * queuecommand function.
24 * 2. The address of the SCB is stored in a list to be able to access it, if
25 * something goes wrong.
26 * 3. The address of the SCB is written to the Controller, which loads the SCB
27 * via BM-DMA and processes it.
28 * 4. After it has finished, it generates an interrupt, and sets registers.
29 *
30 * flaws:
31 * - abort/reset functions
32 *
33 * ToDo:
34 * - tagged queueing
35 */
36
37#include <linux/interrupt.h>
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/pci.h>
41#include <linux/firmware.h>
42#include <linux/eeprom_93cx6.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include "wd719x.h"
47
48/* low-level register access */
49static inline u8 wd719x_readb(struct wd719x *wd, u8 reg)
50{
51 return ioread8(wd->base + reg);
52}
53
54static inline u32 wd719x_readl(struct wd719x *wd, u8 reg)
55{
56 return ioread32(wd->base + reg);
57}
58
59static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val)
60{
61 iowrite8(val, wd->base + reg);
62}
63
64static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val)
65{
66 iowrite16(val, wd->base + reg);
67}
68
69static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val)
70{
71 iowrite32(val, wd->base + reg);
72}
73
74/* wait until the command register is ready */
75static inline int wd719x_wait_ready(struct wd719x *wd)
76{
77 int i = 0;
78
79 do {
80 if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY)
81 return 0;
82 udelay(1);
83 } while (i++ < WD719X_WAIT_FOR_CMD_READY);
84
85 dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n",
86 wd719x_readb(wd, WD719X_AMR_COMMAND));
87
88 return -ETIMEDOUT;
89}
90
91/* poll interrupt status register until command finishes */
92static inline int wd719x_wait_done(struct wd719x *wd, int timeout)
93{
94 u8 status;
95
96 while (timeout > 0) {
97 status = wd719x_readb(wd, WD719X_AMR_INT_STATUS);
98 if (status)
99 break;
100 timeout--;
101 udelay(1);
102 }
103
104 if (timeout <= 0) {
105 dev_err(&wd->pdev->dev, "direct command timed out\n");
106 return -ETIMEDOUT;
107 }
108
109 if (status != WD719X_INT_NOERRORS) {
110 dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n",
111 status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR));
112 return -EIO;
113 }
114
115 return 0;
116}
117
118static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
119 u8 tag, dma_addr_t data, int timeout)
120{
121 int ret = 0;
122
123 /* clear interrupt status register (allow command register to clear) */
124 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
125
126 /* Wait for the Command register to become free */
127 if (wd719x_wait_ready(wd))
128 return -ETIMEDOUT;
129
130 /* make sure we get NO interrupts */
131 dev |= WD719X_DISABLE_INT;
132 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev);
133 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun);
134 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag);
135 if (data)
136 wd719x_writel(wd, WD719X_AMR_SCB_IN, data);
137
138 /* clear interrupt status register again */
139 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
140
141 /* Now, write the command */
142 wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode);
143
144 if (timeout) /* wait for the command to complete */
145 ret = wd719x_wait_done(wd, timeout);
146
147 /* clear interrupt status register (clean up) */
148 if (opcode != WD719X_CMD_READ_FIRMVER)
149 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
150
151 return ret;
152}
153
154static void wd719x_destroy(struct wd719x *wd)
155{
156 struct wd719x_scb *scb;
157
158 /* stop the RISC */
159 if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
160 WD719X_WAIT_FOR_RISC))
161 dev_warn(&wd->pdev->dev, "RISC sleep command failed\n");
162 /* disable RISC */
163 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
164
165 /* free all SCBs */
166 list_for_each_entry(scb, &wd->active_scbs, list)
167 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
168 scb->phys);
169 list_for_each_entry(scb, &wd->free_scbs, list)
170 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
171 scb->phys);
172 /* free internal buffers */
173 pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys);
174 wd->fw_virt = NULL;
175 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
176 wd->hash_phys);
177 wd->hash_virt = NULL;
178 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
179 wd->params, wd->params_phys);
180 wd->params = NULL;
181 free_irq(wd->pdev->irq, wd);
182}
183
184/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */
185static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result)
186{
187 struct wd719x *wd = shost_priv(cmd->device->host);
188 struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
189
190 if (scb) {
191 list_move(&scb->list, &wd->free_scbs);
192 dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
193 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
194 scsi_dma_unmap(cmd);
195 }
196 cmd->result = result << 16;
197 cmd->scsi_done(cmd);
198}
199
200/* Build a SCB and send it to the card */
201static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
202{
203 int i, count_sg;
204 unsigned long flags;
205 struct wd719x_scb *scb;
206 struct wd719x *wd = shost_priv(sh);
207 dma_addr_t phys;
208
209 cmd->host_scribble = NULL;
210
211 /* get a free SCB - either from existing ones or allocate a new one */
212 spin_lock_irqsave(wd->sh->host_lock, flags);
213 scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
214 if (scb) {
215 list_del(&scb->list);
216 phys = scb->phys;
217 } else {
218 spin_unlock_irqrestore(wd->sh->host_lock, flags);
219 scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
220 &phys);
221 spin_lock_irqsave(wd->sh->host_lock, flags);
222 if (!scb) {
223 dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
224 wd719x_finish_cmd(cmd, DID_ERROR);
225 spin_unlock_irqrestore(wd->sh->host_lock, flags);
226 return 0;
227 }
228 }
229 memset(scb, 0, sizeof(struct wd719x_scb));
230 list_add(&scb->list, &wd->active_scbs);
231
232 scb->phys = phys;
233 scb->cmd = cmd;
234 cmd->host_scribble = (char *) scb;
235
236 scb->CDB_tag = 0; /* Tagged queueing not supported yet */
237 scb->devid = cmd->device->id;
238 scb->lun = cmd->device->lun;
239
240 /* copy the command */
241 memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
242
243 /* map sense buffer */
244 scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
245 cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
246 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
247 scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
248
249 /* request autosense */
250 scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
251
252 /* check direction */
253 if (cmd->sc_data_direction == DMA_TO_DEVICE)
254 scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION
255 | WD719X_SCB_FLAGS_PCI_TO_SCSI;
256 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
257 scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION;
258
259 /* Scather/gather */
260 count_sg = scsi_dma_map(cmd);
261 if (count_sg < 0) {
262 wd719x_finish_cmd(cmd, DID_ERROR);
263 spin_unlock_irqrestore(wd->sh->host_lock, flags);
264 return 0;
265 }
266 BUG_ON(count_sg > WD719X_SG);
267
268 if (count_sg) {
269 struct scatterlist *sg;
270
271 scb->data_length = cpu_to_le32(count_sg *
272 sizeof(struct wd719x_sglist));
273 scb->data_p = cpu_to_le32(scb->phys +
274 offsetof(struct wd719x_scb, sg_list));
275
276 scsi_for_each_sg(cmd, sg, count_sg, i) {
277 scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg));
278 scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
279 }
280 scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER;
281 } else { /* zero length */
282 scb->data_length = 0;
283 scb->data_p = 0;
284 }
285
286 /* check if the Command register is free */
287 if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
288 spin_unlock_irqrestore(wd->sh->host_lock, flags);
289 return SCSI_MLQUEUE_HOST_BUSY;
290 }
291
292 /* write pointer to the AMR */
293 wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
294 /* send SCB opcode */
295 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
296
297 spin_unlock_irqrestore(wd->sh->host_lock, flags);
298
299 return 0;
300}
301
302static int wd719x_chip_init(struct wd719x *wd)
303{
304 int i, ret;
305 u32 risc_init[3];
306 const struct firmware *fw_wcs, *fw_risc;
307 const char fwname_wcs[] = "wd719x-wcs.bin";
308 const char fwname_risc[] = "wd719x-risc.bin";
309
310 memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE);
311
312 /* WCS (sequencer) firmware */
313 ret = request_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev);
314 if (ret) {
315 dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
316 fwname_wcs, ret);
317 return ret;
318 }
319 /* RISC firmware */
320 ret = request_firmware(&fw_risc, fwname_risc, &wd->pdev->dev);
321 if (ret) {
322 dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n",
323 fwname_risc, ret);
324 release_firmware(fw_wcs);
325 return ret;
326 }
327 wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
328
329 if (!wd->fw_virt)
330 wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size,
331 &wd->fw_phys);
332 if (!wd->fw_virt) {
333 ret = -ENOMEM;
334 goto wd719x_init_end;
335 }
336
337 /* make a fresh copy of WCS and RISC code */
338 memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size);
339 memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data,
340 fw_risc->size);
341
342 /* Reset the Spider Chip and adapter itself */
343 wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET);
344 udelay(WD719X_WAIT_FOR_RISC);
345 /* Clear PIO mode bits set by BIOS */
346 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0);
347 /* ensure RISC is not running */
348 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
349 /* ensure command port is ready */
350 wd719x_writeb(wd, WD719X_AMR_COMMAND, 0);
351 if (wd719x_wait_ready(wd)) {
352 ret = -ETIMEDOUT;
353 goto wd719x_init_end;
354 }
355
356 /* Transfer the first 2K words of RISC code to kick start the uP */
357 risc_init[0] = wd->fw_phys; /* WCS FW */
358 risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */
359 risc_init[2] = wd->hash_phys; /* hash table */
360
361 /* clear DMA status */
362 wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0);
363
364 /* address to read firmware from */
365 wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]);
366 /* base address to write firmware to (on card) */
367 wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR);
368 /* size: first 2K words */
369 wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2);
370 /* start DMA */
371 wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA);
372
373 /* wait for DMA to complete */
374 i = WD719X_WAIT_FOR_RISC;
375 while (i-- > 0) {
376 u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS);
377 if (status == WD719X_START_CHANNEL2_3DONE)
378 break;
379 if (status == WD719X_START_CHANNEL2_3ABORT) {
380 dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n");
381 ret = -EIO;
382 goto wd719x_init_end;
383 }
384 udelay(1);
385 }
386 if (i < 1) {
387 dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n");
388 ret = -ETIMEDOUT;
389 goto wd719x_init_end;
390 }
391
392 /* firmware is loaded, now initialize and wake up the RISC */
393 /* write RISC initialization long words to Spider */
394 wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]);
395 wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]);
396 wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]);
397
398 /* disable interrupts during initialization of RISC */
399 wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT);
400
401 /* issue INITIALIZE RISC comand */
402 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC);
403 /* enable advanced mode (wake up RISC) */
404 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE);
405 udelay(WD719X_WAIT_FOR_RISC);
406
407 ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC);
408 /* clear interrupt status register */
409 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
410 if (ret) {
411 dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n");
412 goto wd719x_init_end;
413 }
414 /* RISC is up and running */
415
416 /* Read FW version from RISC */
417 ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0,
418 WD719X_WAIT_FOR_RISC);
419 if (ret) {
420 dev_warn(&wd->pdev->dev, "Unable to read firmware version\n");
421 goto wd719x_init_end;
422 }
423 dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n",
424 wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1),
425 wd719x_readb(wd, WD719X_AMR_SCB_OUT));
426
427 /* RESET SCSI bus */
428 ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0,
429 WD719X_WAIT_FOR_SCSI_RESET);
430 if (ret) {
431 dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n");
432 goto wd719x_init_end;
433 }
434
435 /* use HostParameter structure to set Spider's Host Parameter Block */
436 ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0,
437 sizeof(struct wd719x_host_param), 0,
438 wd->params_phys, WD719X_WAIT_FOR_RISC);
439 if (ret) {
440 dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n");
441 goto wd719x_init_end;
442 }
443
444 /* initiate SCAM (does nothing if disabled in BIOS) */
445 /* bug?: we should pass a mask of static IDs which we don't have */
446 ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0,
447 WD719X_WAIT_FOR_SCSI_RESET);
448 if (ret) {
449 dev_warn(&wd->pdev->dev, "SCAM initialization failed\n");
450 goto wd719x_init_end;
451 }
452
453 /* clear AMR_BIOS_SHARE_INT register */
454 wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0);
455
456wd719x_init_end:
457 release_firmware(fw_wcs);
458 release_firmware(fw_risc);
459
460 return ret;
461}
462
463static int wd719x_abort(struct scsi_cmnd *cmd)
464{
465 int action, result;
466 unsigned long flags;
467 struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble;
468 struct wd719x *wd = shost_priv(cmd->device->host);
469
470 dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
471
472 action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
473
474 spin_lock_irqsave(wd->sh->host_lock, flags);
475 result = wd719x_direct_cmd(wd, action, cmd->device->id,
476 cmd->device->lun, cmd->tag, scb->phys, 0);
477 spin_unlock_irqrestore(wd->sh->host_lock, flags);
478 if (result)
479 return FAILED;
480
481 return SUCCESS;
482}
483
484static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device)
485{
486 int result;
487 unsigned long flags;
488 struct wd719x *wd = shost_priv(cmd->device->host);
489
490 dev_info(&wd->pdev->dev, "%s reset requested\n",
491 (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device");
492
493 spin_lock_irqsave(wd->sh->host_lock, flags);
494 result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0,
495 WD719X_WAIT_FOR_SCSI_RESET);
496 spin_unlock_irqrestore(wd->sh->host_lock, flags);
497 if (result)
498 return FAILED;
499
500 return SUCCESS;
501}
502
503static int wd719x_dev_reset(struct scsi_cmnd *cmd)
504{
505 return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id);
506}
507
508static int wd719x_bus_reset(struct scsi_cmnd *cmd)
509{
510 return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0);
511}
512
513static int wd719x_host_reset(struct scsi_cmnd *cmd)
514{
515 struct wd719x *wd = shost_priv(cmd->device->host);
516 struct wd719x_scb *scb, *tmp;
517 unsigned long flags;
518 int result;
519
520 dev_info(&wd->pdev->dev, "host reset requested\n");
521 spin_lock_irqsave(wd->sh->host_lock, flags);
522 /* Try to reinit the RISC */
523 if (wd719x_chip_init(wd) == 0)
524 result = SUCCESS;
525 else
526 result = FAILED;
527
528 /* flush all SCBs */
529 list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
530 struct scsi_cmnd *tmp_cmd = scb->cmd;
531 wd719x_finish_cmd(tmp_cmd, result);
532 }
533 spin_unlock_irqrestore(wd->sh->host_lock, flags);
534
535 return result;
536}
537
538static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
539 sector_t capacity, int geom[])
540{
541 if (capacity >= 0x200000) {
542 geom[0] = 255; /* heads */
543 geom[1] = 63; /* sectors */
544 } else {
545 geom[0] = 64; /* heads */
546 geom[1] = 32; /* sectors */
547 }
548 geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */
549
550 return 0;
551}
552
553/* process a SCB-completion interrupt */
554static inline void wd719x_interrupt_SCB(struct wd719x *wd,
555 union wd719x_regs regs,
556 struct wd719x_scb *scb)
557{
558 struct scsi_cmnd *cmd;
559 int result;
560
561 /* now have to find result from card */
562 switch (regs.bytes.SUE) {
563 case WD719X_SUE_NOERRORS:
564 result = DID_OK;
565 break;
566 case WD719X_SUE_REJECTED:
567 dev_err(&wd->pdev->dev, "command rejected\n");
568 result = DID_ERROR;
569 break;
570 case WD719X_SUE_SCBQFULL:
571 dev_err(&wd->pdev->dev, "SCB queue is full\n");
572 result = DID_ERROR;
573 break;
574 case WD719X_SUE_TERM:
575 dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n");
576 result = DID_ABORT; /* or DID_RESET? */
577 break;
578 case WD719X_SUE_CHAN1ABORT:
579 case WD719X_SUE_CHAN23ABORT:
580 result = DID_ABORT;
581 dev_err(&wd->pdev->dev, "DMA abort\n");
582 break;
583 case WD719X_SUE_CHAN1PAR:
584 case WD719X_SUE_CHAN23PAR:
585 result = DID_PARITY;
586 dev_err(&wd->pdev->dev, "DMA parity error\n");
587 break;
588 case WD719X_SUE_TIMEOUT:
589 result = DID_TIME_OUT;
590 dev_dbg(&wd->pdev->dev, "selection timeout\n");
591 break;
592 case WD719X_SUE_RESET:
593 dev_dbg(&wd->pdev->dev, "bus reset occured\n");
594 result = DID_RESET;
595 break;
596 case WD719X_SUE_BUSERROR:
597 dev_dbg(&wd->pdev->dev, "SCSI bus error\n");
598 result = DID_ERROR;
599 break;
600 case WD719X_SUE_WRONGWAY:
601 dev_err(&wd->pdev->dev, "wrong data transfer direction\n");
602 result = DID_ERROR;
603 break;
604 case WD719X_SUE_BADPHASE:
605 dev_err(&wd->pdev->dev, "invalid SCSI phase\n");
606 result = DID_ERROR;
607 break;
608 case WD719X_SUE_TOOLONG:
609 dev_err(&wd->pdev->dev, "record too long\n");
610 result = DID_ERROR;
611 break;
612 case WD719X_SUE_BUSFREE:
613 dev_err(&wd->pdev->dev, "unexpected bus free\n");
614 result = DID_NO_CONNECT; /* or DID_ERROR ???*/
615 break;
616 case WD719X_SUE_ARSDONE:
617 dev_dbg(&wd->pdev->dev, "auto request sense\n");
618 if (regs.bytes.SCSI == 0)
619 result = DID_OK;
620 else
621 result = DID_PARITY;
622 break;
623 case WD719X_SUE_IGNORED:
624 dev_err(&wd->pdev->dev, "target id %d ignored command\n",
625 scb->cmd->device->id);
626 result = DID_NO_CONNECT;
627 break;
628 case WD719X_SUE_WRONGTAGS:
629 dev_err(&wd->pdev->dev, "reversed tags\n");
630 result = DID_ERROR;
631 break;
632 case WD719X_SUE_BADTAGS:
633 dev_err(&wd->pdev->dev, "tag type not supported by target\n");
634 result = DID_ERROR;
635 break;
636 case WD719X_SUE_NOSCAMID:
637 dev_err(&wd->pdev->dev, "no SCAM soft ID available\n");
638 result = DID_ERROR;
639 break;
640 default:
641 dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n",
642 regs.bytes.SUE);
643 result = DID_ERROR;
644 break;
645 }
646 cmd = scb->cmd;
647
648 wd719x_finish_cmd(cmd, result);
649}
650
651static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
652{
653 struct wd719x *wd = dev_id;
654 union wd719x_regs regs;
655 unsigned long flags;
656 u32 SCB_out;
657
658 spin_lock_irqsave(wd->sh->host_lock, flags);
659 /* read SCB pointer back from card */
660 SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT);
661 /* read all status info at once */
662 regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE));
663
664 switch (regs.bytes.INT) {
665 case WD719X_INT_NONE:
666 spin_unlock_irqrestore(wd->sh->host_lock, flags);
667 return IRQ_NONE;
668 case WD719X_INT_LINKNOSTATUS:
669 dev_err(&wd->pdev->dev, "linked command completed with no status\n");
670 break;
671 case WD719X_INT_BADINT:
672 dev_err(&wd->pdev->dev, "unsolicited interrupt\n");
673 break;
674 case WD719X_INT_NOERRORS:
675 case WD719X_INT_LINKNOERRORS:
676 case WD719X_INT_ERRORSLOGGED:
677 case WD719X_INT_SPIDERFAILED:
678 /* was the cmd completed a direct or SCB command? */
679 if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) {
680 struct wd719x_scb *scb;
681 list_for_each_entry(scb, &wd->active_scbs, list)
682 if (SCB_out == scb->phys)
683 break;
684 if (SCB_out == scb->phys)
685 wd719x_interrupt_SCB(wd, regs, scb);
686 else
687 dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n");
688 } else
689 dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n",
690 regs.bytes.OPC);
691 break;
692 case WD719X_INT_PIOREADY:
693 dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n");
694 /* interrupt will not be cleared until all data is read */
695 break;
696 default:
697 dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n",
698 regs.bytes.INT);
699
700 }
701 /* clear interrupt so another can happen */
702 wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE);
703 spin_unlock_irqrestore(wd->sh->host_lock, flags);
704
705 return IRQ_HANDLED;
706}
707
708static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom)
709{
710 struct wd719x *wd = eeprom->data;
711 u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA);
712
713 eeprom->reg_data_out = reg & WD719X_EE_DO;
714}
715
716static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom)
717{
718 struct wd719x *wd = eeprom->data;
719 u8 reg = 0;
720
721 if (eeprom->reg_data_in)
722 reg |= WD719X_EE_DI;
723 if (eeprom->reg_data_clock)
724 reg |= WD719X_EE_CLK;
725 if (eeprom->reg_chip_select)
726 reg |= WD719X_EE_CS;
727
728 wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg);
729}
730
731/* read config from EEPROM so it can be downloaded by the RISC on (re-)init */
732static void wd719x_read_eeprom(struct wd719x *wd)
733{
734 struct eeprom_93cx6 eeprom;
735 u8 gpio;
736 struct wd719x_eeprom_header header;
737
738 eeprom.data = wd;
739 eeprom.register_read = wd719x_eeprom_reg_read;
740 eeprom.register_write = wd719x_eeprom_reg_write;
741 eeprom.width = PCI_EEPROM_WIDTH_93C46;
742
743 /* set all outputs to low */
744 wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0);
745 /* configure GPIO pins */
746 gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
747 /* GPIO outputs */
748 gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS));
749 /* GPIO input */
750 gpio |= WD719X_EE_DO;
751 wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio);
752
753 /* read EEPROM header */
754 eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header));
755
756 if (header.sig1 == 'W' && header.sig2 == 'D')
757 eeprom_93cx6_multireadb(&eeprom, header.cfg_offset,
758 (u8 *)wd->params,
759 sizeof(struct wd719x_host_param));
760 else { /* default EEPROM values */
761 dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n",
762 header.sig1, header.sig2);
763 wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */
764 wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */
765 wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */
766 wd->params->sel_timeout = 0x4d; /* 250 ms */
767 wd->params->sleep_timer = 0x01;
768 wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */
769 wd->params->scsi_pad = 0x1b;
770 if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */
771 wd->params->wide = cpu_to_le32(0x00000000);
772 else /* initiate & respond to WIDE messages */
773 wd->params->wide = cpu_to_le32(0xffffffff);
774 wd->params->sync = cpu_to_le32(0xffffffff);
775 wd->params->soft_mask = 0x00; /* all disabled */
776 wd->params->unsol_mask = 0x00; /* all disabled */
777 }
778 /* disable TAGGED messages */
779 wd->params->tag_en = cpu_to_le16(0x0000);
780}
781
782/* Read card type from GPIO bits 1 and 3 */
783static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
784{
785 u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL);
786
787 card |= WD719X_GPIO_ID_BITS;
788 wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card);
789 card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS;
790 switch (card) {
791 case 0x08:
792 return WD719X_TYPE_7193;
793 case 0x02:
794 return WD719X_TYPE_7197;
795 case 0x00:
796 return WD719X_TYPE_7296;
797 default:
798 dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card);
799 return WD719X_TYPE_UNKNOWN;
800 }
801}
802
803static int wd719x_board_found(struct Scsi_Host *sh)
804{
805 struct wd719x *wd = shost_priv(sh);
806 char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
807 int ret;
808
809 INIT_LIST_HEAD(&wd->active_scbs);
810 INIT_LIST_HEAD(&wd->free_scbs);
811
812 sh->base = pci_resource_start(wd->pdev, 0);
813
814 wd->type = wd719x_detect_type(wd);
815
816 wd->sh = sh;
817 sh->irq = wd->pdev->irq;
818 wd->fw_virt = NULL;
819
820 /* memory area for host (EEPROM) parameters */
821 wd->params = pci_alloc_consistent(wd->pdev,
822 sizeof(struct wd719x_host_param),
823 &wd->params_phys);
824 if (!wd->params) {
825 dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
826 return -ENOMEM;
827 }
828
829 /* memory area for the RISC for hash table of outstanding requests */
830 wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE,
831 &wd->hash_phys);
832 if (!wd->hash_virt) {
833 dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
834 ret = -ENOMEM;
835 goto fail_free_params;
836 }
837
838 ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED,
839 "wd719x", wd);
840 if (ret) {
841 dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n",
842 wd->pdev->irq);
843 goto fail_free_hash;
844 }
845
846 /* read parameters from EEPROM */
847 wd719x_read_eeprom(wd);
848
849 ret = wd719x_chip_init(wd);
850 if (ret)
851 goto fail_free_irq;
852
853 sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK;
854
855 dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n",
856 card_types[wd->type], sh->base, sh->irq, sh->this_id);
857
858 return 0;
859
860fail_free_irq:
861 free_irq(wd->pdev->irq, wd);
862fail_free_hash:
863 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
864 wd->hash_phys);
865fail_free_params:
866 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
867 wd->params, wd->params_phys);
868
869 return ret;
870}
871
872static struct scsi_host_template wd719x_template = {
873 .name = "Western Digital 719x",
874 .queuecommand = wd719x_queuecommand,
875 .eh_abort_handler = wd719x_abort,
876 .eh_device_reset_handler = wd719x_dev_reset,
877 .eh_bus_reset_handler = wd719x_bus_reset,
878 .eh_host_reset_handler = wd719x_host_reset,
879 .bios_param = wd719x_biosparam,
880 .proc_name = "wd719x",
881 .can_queue = 255,
882 .this_id = 7,
883 .sg_tablesize = WD719X_SG,
884 .cmd_per_lun = WD719X_CMD_PER_LUN,
885 .use_clustering = ENABLE_CLUSTERING,
886};
887
888static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
889{
890 int err;
891 struct Scsi_Host *sh;
892 struct wd719x *wd;
893
894 err = pci_enable_device(pdev);
895 if (err)
896 goto fail;
897
898 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
899 dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
900 goto disable_device;
901 }
902
903 err = pci_request_regions(pdev, "wd719x");
904 if (err)
905 goto disable_device;
906 pci_set_master(pdev);
907
908 err = -ENODEV;
909 if (pci_resource_len(pdev, 0) == 0)
910 goto release_region;
911
912 err = -ENOMEM;
913 sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x));
914 if (!sh)
915 goto release_region;
916
917 wd = shost_priv(sh);
918 wd->base = pci_iomap(pdev, 0, 0);
919 if (!wd->base)
920 goto free_host;
921 wd->pdev = pdev;
922
923 err = wd719x_board_found(sh);
924 if (err)
925 goto unmap;
926
927 err = scsi_add_host(sh, &wd->pdev->dev);
928 if (err)
929 goto destroy;
930
931 scsi_scan_host(sh);
932
933 pci_set_drvdata(pdev, sh);
934 return 0;
935
936destroy:
937 wd719x_destroy(wd);
938unmap:
939 pci_iounmap(pdev, wd->base);
940free_host:
941 scsi_host_put(sh);
942release_region:
943 pci_release_regions(pdev);
944disable_device:
945 pci_disable_device(pdev);
946fail:
947 return err;
948}
949
950
951static void wd719x_pci_remove(struct pci_dev *pdev)
952{
953 struct Scsi_Host *sh = pci_get_drvdata(pdev);
954 struct wd719x *wd = shost_priv(sh);
955
956 scsi_remove_host(sh);
957 wd719x_destroy(wd);
958 pci_iounmap(pdev, wd->base);
959 pci_release_regions(pdev);
960 pci_disable_device(pdev);
961
962 scsi_host_put(sh);
963}
964
965static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
966 { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
967 {}
968};
969
970MODULE_DEVICE_TABLE(pci, wd719x_pci_table);
971
972static struct pci_driver wd719x_pci_driver = {
973 .name = "wd719x",
974 .id_table = wd719x_pci_table,
975 .probe = wd719x_pci_probe,
976 .remove = wd719x_pci_remove,
977};
978
979static int __init wd719x_init(void)
980{
981 return pci_register_driver(&wd719x_pci_driver);
982}
983
984static void __exit wd719x_exit(void)
985{
986 pci_unregister_driver(&wd719x_pci_driver);
987}
988
989module_init(wd719x_init);
990module_exit(wd719x_exit);
991
992MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver");
993MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner");
994MODULE_LICENSE("GPL");
995MODULE_FIRMWARE("wd719x-wcs.bin");
996MODULE_FIRMWARE("wd719x-risc.bin");
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
new file mode 100644
index 000000000000..185e30e4eb93
--- /dev/null
+++ b/drivers/scsi/wd719x.h
@@ -0,0 +1,249 @@
1#ifndef _WD719X_H_
2#define _WD719X_H_
3
4#define WD719X_SG 255 /* Scatter/gather size */
5#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but
6 * this is 1 for now to be safe. */
7
8struct wd719x_sglist {
9 __le32 ptr;
10 __le32 length;
11} __packed;
12
13enum wd719x_card_type {
14 WD719X_TYPE_UNKNOWN = 0,
15 WD719X_TYPE_7193,
16 WD719X_TYPE_7197,
17 WD719X_TYPE_7296,
18};
19
20union wd719x_regs {
21 __le32 all; /* All Status at once */
22 struct {
23 u8 OPC; /* Opcode register */
24 u8 SCSI; /* SCSI Errors */
25 u8 SUE; /* Spider unique Errors */
26 u8 INT; /* Interrupt Status */
27 } bytes;
28};
29
30/* Spider Command Block (SCB) */
31struct wd719x_scb {
32 __le32 Int_SCB; /* 00-03 Internal SCB link pointer (must be cleared) */
33 u8 SCB_opcode; /* 04 SCB Command opcode */
34 u8 CDB_tag; /* 05 SCSI Tag byte for CDB queues (0 if untagged) */
35 u8 lun; /* 06 SCSI LUN */
36 u8 devid; /* 07 SCSI Device ID */
37 u8 CDB[16]; /* 08-23 SCSI CDB (16 bytes as defined by ANSI spec. */
38 __le32 data_p; /* 24-27 Data transfer address (or SG list address) */
39 __le32 data_length; /* 28-31 Data transfer Length (or SG list length) */
40 __le32 CDB_link; /* 32-35 SCSI CDB Link Ptr */
41 __le32 sense_buf; /* 36-39 Auto request sense buffer address */
42 u8 sense_buf_length;/* 40 Auto request sense transfer length */
43 u8 reserved; /* 41 reserved */
44 u8 SCB_options; /* 42 SCB-options */
45 u8 SCB_tag_msg; /* 43 Tagged messages options */
46 /* Not filled in by host */
47 __le32 req_ptr; /* 44-47 Ptr to Host Request returned on interrupt */
48 u8 host_opcode; /* 48 Host Command Opcode (same as AMR_00) */
49 u8 scsi_stat; /* 49 SCSI Status returned */
50 u8 ret_error; /* 50 SPIDER Unique Error Code returned (SUE) */
51 u8 int_stat; /* 51 Message u8 / Interrupt Status byte returned */
52 __le32 transferred; /* 52-55 Bytes Transferred */
53 u8 last_trans[3]; /* 56-58 Bytes Transferred in last session */
54 u8 length; /* 59 SCSI Messages Length (1-8) */
55 u8 sync_offset; /* 60 Synchronous offset */
56 u8 sync_rate; /* 61 Synchronous rate */
57 u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
58 /* everything below is for driver use (not used by card) */
59 dma_addr_t phys; /* bus address of the SCB */
60 struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
61 struct list_head list;
62 struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
63} __packed;
64
65struct wd719x {
66 struct Scsi_Host *sh; /* pointer to host structure */
67 struct pci_dev *pdev;
68 void __iomem *base;
69 enum wd719x_card_type type; /* type of card */
70 void *fw_virt; /* firmware buffer CPU address */
71 dma_addr_t fw_phys; /* firmware buffer bus address */
72 size_t fw_size; /* firmware buffer size */
73 struct wd719x_host_param *params; /* host parameters (EEPROM) */
74 dma_addr_t params_phys; /* host parameters bus address */
75 void *hash_virt; /* hash table CPU address */
76 dma_addr_t hash_phys; /* hash table bus address */
77 struct list_head active_scbs;
78 struct list_head free_scbs;
79};
80
81/* timeout delays in microsecs */
82#define WD719X_WAIT_FOR_CMD_READY 500
83#define WD719X_WAIT_FOR_RISC 2000
84#define WD719X_WAIT_FOR_SCSI_RESET 3000000
85
86/* All commands except 0x00 generate an interrupt */
87#define WD719X_CMD_READY 0x00 /* Command register ready (or noop) */
88#define WD719X_CMD_INIT_RISC 0x01 /* Initialize RISC */
89/* 0x02 is reserved */
90#define WD719X_CMD_BUSRESET 0x03 /* Assert SCSI bus reset */
91#define WD719X_CMD_READ_FIRMVER 0x04 /* Read the Firmware Revision */
92#define WD719X_CMD_ECHO_BYTES 0x05 /* Echo command bytes (DW) */
93/* 0x06 is reserved */
94/* 0x07 is reserved */
95#define WD719X_CMD_GET_PARAM 0x08 /* Get programmable parameters */
96#define WD719X_CMD_SET_PARAM 0x09 /* Set programmable parameters */
97#define WD719X_CMD_SLEEP 0x0a /* Put SPIDER to sleep */
98#define WD719X_CMD_READ_INIT 0x0b /* Read initialization parameters */
99#define WD719X_CMD_RESTORE_INIT 0x0c /* Restore initialization parameters */
100/* 0x0d is reserved */
101/* 0x0e is reserved */
102/* 0x0f is reserved */
103#define WD719X_CMD_ABORT_TAG 0x10 /* Send Abort tag message to target */
104#define WD719X_CMD_ABORT 0x11 /* Send Abort message to target */
105#define WD719X_CMD_RESET 0x12 /* Send Reset message to target */
106#define WD719X_CMD_INIT_SCAM 0x13 /* Initiate SCAM */
107#define WD719X_CMD_GET_SYNC 0x14 /* Get synchronous rates */
108#define WD719X_CMD_SET_SYNC 0x15 /* Set synchronous rates */
109#define WD719X_CMD_GET_WIDTH 0x16 /* Get SCSI bus width */
110#define WD719X_CMD_SET_WIDTH 0x17 /* Set SCSI bus width */
111#define WD719X_CMD_GET_TAGS 0x18 /* Get tag flags */
112#define WD719X_CMD_SET_TAGS 0x19 /* Set tag flags */
113#define WD719X_CMD_GET_PARAM2 0x1a /* Get programmable params (format 2) */
114#define WD719X_CMD_SET_PARAM2 0x1b /* Set programmable params (format 2) */
115/* Commands with request pointers (mailbox) */
116#define WD719X_CMD_PROCESS_SCB 0x80 /* Process SCSI Control Block (SCB) */
117/* No interrupt generated on acceptance of SCB pointer */
118
119/* interrupt status defines */
120#define WD719X_INT_NONE 0x00 /* No interrupt pending */
121#define WD719X_INT_NOERRORS 0x01 /* Command completed with no errors */
122#define WD719X_INT_LINKNOERRORS 0x02 /* link cmd completed with no errors */
123#define WD719X_INT_LINKNOSTATUS 0x03 /* link cmd completed with no flag set */
124#define WD719X_INT_ERRORSLOGGED 0x04 /* cmd completed with errors logged */
125#define WD719X_INT_SPIDERFAILED 0x05 /* cmd failed without valid SCSI status */
126#define WD719X_INT_BADINT 0x80 /* unsolicited interrupt */
127#define WD719X_INT_PIOREADY 0xf0 /* data ready for PIO output */
128
129/* Spider Unique Error Codes (SUE) */
130#define WD719X_SUE_NOERRORS 0x00 /* No errors detected by SPIDER */
131#define WD719X_SUE_REJECTED 0x01 /* Command Rejected (bad opcode/param) */
132#define WD719X_SUE_SCBQFULL 0x02 /* SCB queue full */
133/* 0x03 is reserved */
134#define WD719X_SUE_TERM 0x04 /* Host terminated SCB via primative cmd */
135#define WD719X_SUE_CHAN1PAR 0x05 /* PCI Channel 1 parity error occurred */
136#define WD719X_SUE_CHAN1ABORT 0x06 /* PCI Channel 1 system abort occurred */
137#define WD719X_SUE_CHAN23PAR 0x07 /* PCI Channel 2/3 parity error occurred */
138#define WD719X_SUE_CHAN23ABORT 0x08 /* PCI Channel 2/3 system abort occurred */
139#define WD719X_SUE_TIMEOUT 0x10 /* Selection/reselection timeout */
140#define WD719X_SUE_RESET 0x11 /* SCSI bus reset occurred */
141#define WD719X_SUE_BUSERROR 0x12 /* SCSI bus error */
142#define WD719X_SUE_WRONGWAY 0x13 /* Wrong data transfer dir set by target */
143#define WD719X_SUE_BADPHASE 0x14 /* SCSI phase illegal or unexpected */
144#define WD719X_SUE_TOOLONG 0x15 /* target requested too much data */
145#define WD719X_SUE_BUSFREE 0x16 /* Unexpected SCSI bus free */
146#define WD719X_SUE_ARSDONE 0x17 /* Auto request sense executed */
147#define WD719X_SUE_IGNORED 0x18 /* SCSI message was ignored by target */
148#define WD719X_SUE_WRONGTAGS 0x19 /* Tagged SCB & tags off (or vice versa) */
149#define WD719X_SUE_BADTAGS 0x1a /* Wrong tag message type for target */
150#define WD719X_SUE_NOSCAMID 0x1b /* No SCAM soft ID available */
151
152/* code sizes */
153#define WD719X_HASH_TABLE_SIZE 4096
154
155/* Advanced Mode Registers */
156/* Regs 0x00..0x1f are for Advanced Mode of the card (RISC is running). */
157#define WD719X_AMR_COMMAND 0x00
158#define WD719X_AMR_CMD_PARAM 0x01
159#define WD719X_AMR_CMD_PARAM_2 0x02
160#define WD719X_AMR_CMD_PARAM_3 0x03
161#define WD719X_AMR_SCB_IN 0x04
162
163#define WD719X_AMR_BIOS_SHARE_INT 0x0f
164
165#define WD719X_AMR_SCB_OUT 0x18
166#define WD719X_AMR_OP_CODE 0x1c
167#define WD719X_AMR_SCSI_STATUS 0x1d
168#define WD719X_AMR_SCB_ERROR 0x1e
169#define WD719X_AMR_INT_STATUS 0x1f
170
171#define WD719X_DISABLE_INT 0x80
172
173/* SCB flags */
174#define WD719X_SCB_FLAGS_CHECK_DIRECTION 0x01
175#define WD719X_SCB_FLAGS_PCI_TO_SCSI 0x02
176#define WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE 0x10
177#define WD719X_SCB_FLAGS_DO_SCATTER_GATHER 0x20
178#define WD719X_SCB_FLAGS_NO_DISCONNECT 0x40
179
180/* PCI Registers used for reset, initial code download */
181/* Regs 0x20..0x3f are for Normal (DOS) mode (RISC is asleep). */
182#define WD719X_PCI_GPIO_CONTROL 0x3C
183#define WD719X_PCI_GPIO_DATA 0x3D
184#define WD719X_PCI_PORT_RESET 0x3E
185#define WD719X_PCI_MODE_SELECT 0x3F
186
187#define WD719X_PCI_EXTERNAL_ADDR 0x60
188#define WD719X_PCI_INTERNAL_ADDR 0x64
189#define WD719X_PCI_DMA_TRANSFER_SIZE 0x66
190#define WD719X_PCI_CHANNEL2_3CMD 0x68
191#define WD719X_PCI_CHANNEL2_3STATUS 0x69
192
193#define WD719X_GPIO_ID_BITS 0x0a
194#define WD719X_PRAM_BASE_ADDR 0x00
195
196/* codes written to or read from the card */
197#define WD719X_PCI_RESET 0x01
198#define WD719X_ENABLE_ADVANCE_MODE 0x01
199
200#define WD719X_START_CHANNEL2_3DMA 0x17
201#define WD719X_START_CHANNEL2_3DONE 0x01
202#define WD719X_START_CHANNEL2_3ABORT 0x20
203
204/* 33C296 GPIO bits for EEPROM pins */
205#define WD719X_EE_DI (1 << 1)
206#define WD719X_EE_CS (1 << 2)
207#define WD719X_EE_CLK (1 << 3)
208#define WD719X_EE_DO (1 << 4)
209
210/* EEPROM contents */
211struct wd719x_eeprom_header {
212 u8 sig1;
213 u8 sig2;
214 u8 version;
215 u8 checksum;
216 u8 cfg_offset;
217 u8 cfg_size;
218 u8 setup_offset;
219 u8 setup_size;
220} __packed;
221
222#define WD719X_EE_SIG1 0
223#define WD719X_EE_SIG2 1
224#define WD719X_EE_VERSION 2
225#define WD719X_EE_CHECKSUM 3
226#define WD719X_EE_CFG_OFFSET 4
227#define WD719X_EE_CFG_SIZE 5
228#define WD719X_EE_SETUP_OFFSET 6
229#define WD719X_EE_SETUP_SIZE 7
230
231#define WD719X_EE_SCSI_ID_MASK 0xf
232
233/* SPIDER Host Parameters Block (=EEPROM configuration block) */
234struct wd719x_host_param {
235 u8 ch_1_th; /* FIFO threshold */
236 u8 scsi_conf; /* SCSI configuration */
237 u8 own_scsi_id; /* controller SCSI ID */
238 u8 sel_timeout; /* selection timeout*/
239 u8 sleep_timer; /* seep timer */
240 __le16 cdb_size;/* CDB size groups */
241 __le16 tag_en; /* Tag msg enables (ID 0-15) */
242 u8 scsi_pad; /* SCSI pad control */
243 __le32 wide; /* WIDE msg options (ID 0-15) */
244 __le32 sync; /* SYNC msg options (ID 0-15) */
245 u8 soft_mask; /* soft error mask */
246 u8 unsol_mask; /* unsolicited error mask */
247} __packed;
248
249#endif /* _WD719X_H_ */
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index fe2c2d595f59..f3ee439d6f0e 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -20,7 +20,7 @@
20#include <linux/bitmap.h> 20#include <linux/bitmap.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#ifdef CONFIG_PM_RUNTIME 23#ifdef CONFIG_PM
24static int sh_pm_runtime_suspend(struct device *dev) 24static int sh_pm_runtime_suspend(struct device *dev)
25{ 25{
26 int ret; 26 int ret;
@@ -68,7 +68,7 @@ static struct dev_pm_domain default_pm_domain = {
68 68
69#define DEFAULT_PM_DOMAIN_PTR NULL 69#define DEFAULT_PM_DOMAIN_PTR NULL
70 70
71#endif /* CONFIG_PM_RUNTIME */ 71#endif /* CONFIG_PM */
72 72
73static struct pm_clk_notifier_block platform_bus_notifier = { 73static struct pm_clk_notifier_block platform_bus_notifier = {
74 .pm_domain = DEFAULT_PM_DOMAIN_PTR, 74 .pm_domain = DEFAULT_PM_DOMAIN_PTR,
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index bc9dcc8cc3ce..51da2341280d 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -348,15 +348,15 @@ struct knav_range_info {
348 list_for_each_entry(region, &kdev->regions, list) 348 list_for_each_entry(region, &kdev->regions, list)
349 349
350#define first_region(kdev) \ 350#define first_region(kdev) \
351 list_first_entry(&kdev->regions, \ 351 list_first_entry_or_null(&kdev->regions, \
352 struct knav_region, list) 352 struct knav_region, list)
353 353
354#define for_each_queue_range(kdev, range) \ 354#define for_each_queue_range(kdev, range) \
355 list_for_each_entry(range, &kdev->queue_ranges, list) 355 list_for_each_entry(range, &kdev->queue_ranges, list)
356 356
357#define first_queue_range(kdev) \ 357#define first_queue_range(kdev) \
358 list_first_entry(&kdev->queue_ranges, \ 358 list_first_entry_or_null(&kdev->queue_ranges, \
359 struct knav_range_info, list) 359 struct knav_range_info, list)
360 360
361#define for_each_pool(kdev, pool) \ 361#define for_each_pool(kdev, pool) \
362 list_for_each_entry(pool, &kdev->pools, list) 362 list_for_each_entry(pool, &kdev->pools, list)
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 0a2c8634c48b..9b8dd6732681 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -785,7 +785,7 @@ void *knav_pool_create(const char *name,
785 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", 785 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
786 region_id, name); 786 region_id, name);
787 ret = -ENOMEM; 787 ret = -ENOMEM;
788 goto err; 788 goto err_unlock;
789 } 789 }
790 790
791 /* Region maintains a sorted (by region offset) list of pools 791 /* Region maintains a sorted (by region offset) list of pools
@@ -815,15 +815,16 @@ void *knav_pool_create(const char *name,
815 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", 815 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
816 name, region_id); 816 name, region_id);
817 ret = -ENOMEM; 817 ret = -ENOMEM;
818 goto err; 818 goto err_unlock;
819 } 819 }
820 820
821 mutex_unlock(&knav_dev_lock); 821 mutex_unlock(&knav_dev_lock);
822 kdesc_fill_pool(pool); 822 kdesc_fill_pool(pool);
823 return pool; 823 return pool;
824 824
825err: 825err_unlock:
826 mutex_unlock(&knav_dev_lock); 826 mutex_unlock(&knav_dev_lock);
827err:
827 kfree(pool->name); 828 kfree(pool->name);
828 devm_kfree(kdev->dev, pool); 829 devm_kfree(kdev->dev, pool);
829 return ERR_PTR(ret); 830 return ERR_PTR(ret);
@@ -1305,14 +1306,14 @@ static void knav_free_queue_ranges(struct knav_device *kdev)
1305static void knav_queue_free_regions(struct knav_device *kdev) 1306static void knav_queue_free_regions(struct knav_device *kdev)
1306{ 1307{
1307 struct knav_region *region; 1308 struct knav_region *region;
1308 struct knav_pool *pool; 1309 struct knav_pool *pool, *tmp;
1309 unsigned size; 1310 unsigned size;
1310 1311
1311 for (;;) { 1312 for (;;) {
1312 region = first_region(kdev); 1313 region = first_region(kdev);
1313 if (!region) 1314 if (!region)
1314 break; 1315 break;
1315 list_for_each_entry(pool, &region->pools, region_inst) 1316 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1316 knav_pool_destroy(pool); 1317 knav_pool_destroy(pool);
1317 1318
1318 size = region->virt_end - region->virt_start; 1319 size = region->virt_end - region->virt_start;
@@ -1639,7 +1640,7 @@ static int knav_queue_init_queues(struct knav_device *kdev)
1639 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; 1640 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1640 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); 1641 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1641 if (!kdev->instances) 1642 if (!kdev->instances)
1642 return -1; 1643 return -ENOMEM;
1643 1644
1644 for_each_queue_range(kdev, range) { 1645 for_each_queue_range(kdev, range) {
1645 if (range->ops && range->ops->init_range) 1646 if (range->ops && range->ops->init_range)
diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig
index bf5ee9c85330..a928a7fc6be4 100644
--- a/drivers/soc/versatile/Kconfig
+++ b/drivers/soc/versatile/Kconfig
@@ -1,6 +1,15 @@
1# 1#
2# ARM Versatile SoC drivers 2# ARM Versatile SoC drivers
3# 3#
4config SOC_INTEGRATOR_CM
5 bool "SoC bus device for the ARM Integrator platform core modules"
6 depends on ARCH_INTEGRATOR
7 select SOC_BUS
8 help
9 Include support for the SoC bus on the ARM Integrator platform
10 core modules providing some sysfs information about the ASIC
11 variant.
12
4config SOC_REALVIEW 13config SOC_REALVIEW
5 bool "SoC bus device for the ARM RealView platforms" 14 bool "SoC bus device for the ARM RealView platforms"
6 depends on ARCH_REALVIEW 15 depends on ARCH_REALVIEW
diff --git a/drivers/soc/versatile/Makefile b/drivers/soc/versatile/Makefile
index ad547435648e..cf612fe3a659 100644
--- a/drivers/soc/versatile/Makefile
+++ b/drivers/soc/versatile/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_SOC_INTEGRATOR_CM) += soc-integrator.o
1obj-$(CONFIG_SOC_REALVIEW) += soc-realview.o 2obj-$(CONFIG_SOC_REALVIEW) += soc-realview.o
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
new file mode 100644
index 000000000000..a5d7d39ae0ad
--- /dev/null
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 2014 Linaro Ltd.
3 *
4 * Author: Linus Walleij <linus.walleij@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2, as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/slab.h>
14#include <linux/sys_soc.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/syscon.h>
17#include <linux/regmap.h>
18#include <linux/of.h>
19
20#define INTEGRATOR_HDR_ID_OFFSET 0x00
21
22static u32 integrator_coreid;
23
24static const struct of_device_id integrator_cm_match[] = {
25 { .compatible = "arm,core-module-integrator", },
26 { }
27};
28
29static const char *integrator_arch_str(u32 id)
30{
31 switch ((id >> 16) & 0xff) {
32 case 0x00:
33 return "ASB little-endian";
34 case 0x01:
35 return "AHB little-endian";
36 case 0x03:
37 return "AHB-Lite system bus, bi-endian";
38 case 0x04:
39 return "AHB";
40 case 0x08:
41 return "AHB system bus, ASB processor bus";
42 default:
43 return "Unknown";
44 }
45}
46
47static const char *integrator_fpga_str(u32 id)
48{
49 switch ((id >> 12) & 0xf) {
50 case 0x01:
51 return "XC4062";
52 case 0x02:
53 return "XC4085";
54 case 0x03:
55 return "XVC600";
56 case 0x04:
57 return "EPM7256AE (Altera PLD)";
58 default:
59 return "Unknown";
60 }
61}
62
63static ssize_t integrator_get_manf(struct device *dev,
64 struct device_attribute *attr,
65 char *buf)
66{
67 return sprintf(buf, "%02x\n", integrator_coreid >> 24);
68}
69
70static struct device_attribute integrator_manf_attr =
71 __ATTR(manufacturer, S_IRUGO, integrator_get_manf, NULL);
72
73static ssize_t integrator_get_arch(struct device *dev,
74 struct device_attribute *attr,
75 char *buf)
76{
77 return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
78}
79
80static struct device_attribute integrator_arch_attr =
81 __ATTR(arch, S_IRUGO, integrator_get_arch, NULL);
82
83static ssize_t integrator_get_fpga(struct device *dev,
84 struct device_attribute *attr,
85 char *buf)
86{
87 return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
88}
89
90static struct device_attribute integrator_fpga_attr =
91 __ATTR(fpga, S_IRUGO, integrator_get_fpga, NULL);
92
93static ssize_t integrator_get_build(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96{
97 return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
98}
99
100static struct device_attribute integrator_build_attr =
101 __ATTR(build, S_IRUGO, integrator_get_build, NULL);
102
103static int __init integrator_soc_init(void)
104{
105 static struct regmap *syscon_regmap;
106 struct soc_device *soc_dev;
107 struct soc_device_attribute *soc_dev_attr;
108 struct device_node *np;
109 struct device *dev;
110 u32 val;
111 int ret;
112
113 np = of_find_matching_node(NULL, integrator_cm_match);
114 if (!np)
115 return -ENODEV;
116
117 syscon_regmap = syscon_node_to_regmap(np);
118 if (IS_ERR(syscon_regmap))
119 return PTR_ERR(syscon_regmap);
120
121 ret = regmap_read(syscon_regmap, INTEGRATOR_HDR_ID_OFFSET,
122 &val);
123 if (ret)
124 return -ENODEV;
125 integrator_coreid = val;
126
127 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
128 if (!soc_dev_attr)
129 return -ENOMEM;
130
131 soc_dev_attr->soc_id = "Integrator";
132 soc_dev_attr->machine = "Integrator";
133 soc_dev_attr->family = "Versatile";
134 soc_dev = soc_device_register(soc_dev_attr);
135 if (IS_ERR(soc_dev)) {
136 kfree(soc_dev_attr);
137 return -ENODEV;
138 }
139 dev = soc_device_to_device(soc_dev);
140
141 device_create_file(dev, &integrator_manf_attr);
142 device_create_file(dev, &integrator_arch_attr);
143 device_create_file(dev, &integrator_fpga_attr);
144 device_create_file(dev, &integrator_build_attr);
145
146 dev_info(dev, "Detected ARM core module:\n");
147 dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
148 dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
149 dev_info(dev, " FPGA: %s\n", integrator_fpga_str(val));
150 dev_info(dev, " Build: %02x\n", (val >> 4) & 0xFF);
151 dev_info(dev, " Rev: %c\n", ('A' + (val & 0x03)));
152
153 return 0;
154}
155device_initcall(integrator_soc_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 84e7c9e6ccef..99829985c1a1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -225,6 +225,13 @@ config SPI_GPIO
225 GPIO operations, you should be able to leverage that for better 225 GPIO operations, you should be able to leverage that for better
226 speed with a custom version of this driver; see the source code. 226 speed with a custom version of this driver; see the source code.
227 227
228config SPI_IMG_SPFI
229 tristate "IMG SPFI controller"
230 depends on MIPS || COMPILE_TEST
231 help
232 This enables support for the SPFI master controller found on
233 IMG SoCs.
234
228config SPI_IMX 235config SPI_IMX
229 tristate "Freescale i.MX SPI controllers" 236 tristate "Freescale i.MX SPI controllers"
230 depends on ARCH_MXC || COMPILE_TEST 237 depends on ARCH_MXC || COMPILE_TEST
@@ -301,6 +308,14 @@ config SPI_FSL_ESPI
301 From MPC8536, 85xx platform uses the controller, and all P10xx, 308 From MPC8536, 85xx platform uses the controller, and all P10xx,
302 P20xx, P30xx,P40xx, P50xx uses this controller. 309 P20xx, P30xx,P40xx, P50xx uses this controller.
303 310
311config SPI_MESON_SPIFC
312 tristate "Amlogic Meson SPIFC controller"
313 depends on ARCH_MESON || COMPILE_TEST
314 select REGMAP_MMIO
315 help
316 This enables master mode support for the SPIFC (SPI flash
317 controller) available in Amlogic Meson SoCs.
318
304config SPI_OC_TINY 319config SPI_OC_TINY
305 tristate "OpenCores tiny SPI" 320 tristate "OpenCores tiny SPI"
306 depends on GPIOLIB 321 depends on GPIOLIB
@@ -444,7 +459,7 @@ config SPI_S3C24XX_FIQ
444 459
445config SPI_S3C64XX 460config SPI_S3C64XX
446 tristate "Samsung S3C64XX series type SPI" 461 tristate "Samsung S3C64XX series type SPI"
447 depends on PLAT_SAMSUNG 462 depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
448 select S3C64XX_PL080 if ARCH_S3C64XX 463 select S3C64XX_PL080 if ARCH_S3C64XX
449 help 464 help
450 SPI driver for Samsung S3C64XX and newer SoCs. 465 SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 78f24ca36fcf..6b9d2ac629cc 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -40,8 +40,10 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
40obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o 40obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
41obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o 41obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
42obj-$(CONFIG_SPI_GPIO) += spi-gpio.o 42obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
43obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
43obj-$(CONFIG_SPI_IMX) += spi-imx.o 44obj-$(CONFIG_SPI_IMX) += spi-imx.o
44obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o 45obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
46obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
45obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o 47obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
46obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o 48obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
47obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o 49obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 113c83f44b5c..e4193ccc4970 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/pinctrl/consumer.h> 28#include <linux/pinctrl/consumer.h>
29#include <linux/pm_runtime.h>
29 30
30/* SPI register offsets */ 31/* SPI register offsets */
31#define SPI_CR 0x0000 32#define SPI_CR 0x0000
@@ -191,6 +192,8 @@
191 192
192#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 193#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
193 194
195#define AUTOSUSPEND_TIMEOUT 2000
196
194struct atmel_spi_dma { 197struct atmel_spi_dma {
195 struct dma_chan *chan_rx; 198 struct dma_chan *chan_rx;
196 struct dma_chan *chan_tx; 199 struct dma_chan *chan_tx;
@@ -414,23 +417,6 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
414 return err; 417 return err;
415} 418}
416 419
417static bool filter(struct dma_chan *chan, void *pdata)
418{
419 struct atmel_spi_dma *sl_pdata = pdata;
420 struct at_dma_slave *sl;
421
422 if (!sl_pdata)
423 return false;
424
425 sl = &sl_pdata->dma_slave;
426 if (sl->dma_dev == chan->device->dev) {
427 chan->private = sl;
428 return true;
429 } else {
430 return false;
431 }
432}
433
434static int atmel_spi_configure_dma(struct atmel_spi *as) 420static int atmel_spi_configure_dma(struct atmel_spi *as)
435{ 421{
436 struct dma_slave_config slave_config; 422 struct dma_slave_config slave_config;
@@ -441,19 +427,24 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
441 dma_cap_zero(mask); 427 dma_cap_zero(mask);
442 dma_cap_set(DMA_SLAVE, mask); 428 dma_cap_set(DMA_SLAVE, mask);
443 429
444 as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter, 430 as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
445 &as->dma, 431 if (IS_ERR(as->dma.chan_tx)) {
446 dev, "tx"); 432 err = PTR_ERR(as->dma.chan_tx);
447 if (!as->dma.chan_tx) { 433 if (err == -EPROBE_DEFER) {
434 dev_warn(dev, "no DMA channel available at the moment\n");
435 return err;
436 }
448 dev_err(dev, 437 dev_err(dev,
449 "DMA TX channel not available, SPI unable to use DMA\n"); 438 "DMA TX channel not available, SPI unable to use DMA\n");
450 err = -EBUSY; 439 err = -EBUSY;
451 goto error; 440 goto error;
452 } 441 }
453 442
454 as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter, 443 /*
455 &as->dma, 444 * No reason to check EPROBE_DEFER here since we have already requested
456 dev, "rx"); 445 * tx channel. If it fails here, it's for another reason.
446 */
447 as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
457 448
458 if (!as->dma.chan_rx) { 449 if (!as->dma.chan_rx) {
459 dev_err(dev, 450 dev_err(dev,
@@ -474,7 +465,7 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
474error: 465error:
475 if (as->dma.chan_rx) 466 if (as->dma.chan_rx)
476 dma_release_channel(as->dma.chan_rx); 467 dma_release_channel(as->dma.chan_rx);
477 if (as->dma.chan_tx) 468 if (!IS_ERR(as->dma.chan_tx))
478 dma_release_channel(as->dma.chan_tx); 469 dma_release_channel(as->dma.chan_tx);
479 return err; 470 return err;
480} 471}
@@ -482,11 +473,9 @@ error:
482static void atmel_spi_stop_dma(struct atmel_spi *as) 473static void atmel_spi_stop_dma(struct atmel_spi *as)
483{ 474{
484 if (as->dma.chan_rx) 475 if (as->dma.chan_rx)
485 as->dma.chan_rx->device->device_control(as->dma.chan_rx, 476 dmaengine_terminate_all(as->dma.chan_rx);
486 DMA_TERMINATE_ALL, 0);
487 if (as->dma.chan_tx) 477 if (as->dma.chan_tx)
488 as->dma.chan_tx->device->device_control(as->dma.chan_tx, 478 dmaengine_terminate_all(as->dma.chan_tx);
489 DMA_TERMINATE_ALL, 0);
490} 479}
491 480
492static void atmel_spi_release_dma(struct atmel_spi *as) 481static void atmel_spi_release_dma(struct atmel_spi *as)
@@ -1315,6 +1304,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1315 master->setup = atmel_spi_setup; 1304 master->setup = atmel_spi_setup;
1316 master->transfer_one_message = atmel_spi_transfer_one_message; 1305 master->transfer_one_message = atmel_spi_transfer_one_message;
1317 master->cleanup = atmel_spi_cleanup; 1306 master->cleanup = atmel_spi_cleanup;
1307 master->auto_runtime_pm = true;
1318 platform_set_drvdata(pdev, master); 1308 platform_set_drvdata(pdev, master);
1319 1309
1320 as = spi_master_get_devdata(master); 1310 as = spi_master_get_devdata(master);
@@ -1347,8 +1337,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
1347 as->use_dma = false; 1337 as->use_dma = false;
1348 as->use_pdc = false; 1338 as->use_pdc = false;
1349 if (as->caps.has_dma_support) { 1339 if (as->caps.has_dma_support) {
1350 if (atmel_spi_configure_dma(as) == 0) 1340 ret = atmel_spi_configure_dma(as);
1341 if (ret == 0)
1351 as->use_dma = true; 1342 as->use_dma = true;
1343 else if (ret == -EPROBE_DEFER)
1344 return ret;
1352 } else { 1345 } else {
1353 as->use_pdc = true; 1346 as->use_pdc = true;
1354 } 1347 }
@@ -1387,6 +1380,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
1387 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1380 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
1388 (unsigned long)regs->start, irq); 1381 (unsigned long)regs->start, irq);
1389 1382
1383 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1384 pm_runtime_use_autosuspend(&pdev->dev);
1385 pm_runtime_set_active(&pdev->dev);
1386 pm_runtime_enable(&pdev->dev);
1387
1390 ret = devm_spi_register_master(&pdev->dev, master); 1388 ret = devm_spi_register_master(&pdev->dev, master);
1391 if (ret) 1389 if (ret)
1392 goto out_free_dma; 1390 goto out_free_dma;
@@ -1394,6 +1392,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
1394 return 0; 1392 return 0;
1395 1393
1396out_free_dma: 1394out_free_dma:
1395 pm_runtime_disable(&pdev->dev);
1396 pm_runtime_set_suspended(&pdev->dev);
1397
1397 if (as->use_dma) 1398 if (as->use_dma)
1398 atmel_spi_release_dma(as); 1399 atmel_spi_release_dma(as);
1399 1400
@@ -1415,6 +1416,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
1415 struct spi_master *master = platform_get_drvdata(pdev); 1416 struct spi_master *master = platform_get_drvdata(pdev);
1416 struct atmel_spi *as = spi_master_get_devdata(master); 1417 struct atmel_spi *as = spi_master_get_devdata(master);
1417 1418
1419 pm_runtime_get_sync(&pdev->dev);
1420
1418 /* reset the hardware and block queue progress */ 1421 /* reset the hardware and block queue progress */
1419 spin_lock_irq(&as->lock); 1422 spin_lock_irq(&as->lock);
1420 if (as->use_dma) { 1423 if (as->use_dma) {
@@ -1432,14 +1435,37 @@ static int atmel_spi_remove(struct platform_device *pdev)
1432 1435
1433 clk_disable_unprepare(as->clk); 1436 clk_disable_unprepare(as->clk);
1434 1437
1438 pm_runtime_put_noidle(&pdev->dev);
1439 pm_runtime_disable(&pdev->dev);
1440
1435 return 0; 1441 return 0;
1436} 1442}
1437 1443
1438#ifdef CONFIG_PM_SLEEP 1444#ifdef CONFIG_PM
1445static int atmel_spi_runtime_suspend(struct device *dev)
1446{
1447 struct spi_master *master = dev_get_drvdata(dev);
1448 struct atmel_spi *as = spi_master_get_devdata(master);
1449
1450 clk_disable_unprepare(as->clk);
1451 pinctrl_pm_select_sleep_state(dev);
1452
1453 return 0;
1454}
1455
1456static int atmel_spi_runtime_resume(struct device *dev)
1457{
1458 struct spi_master *master = dev_get_drvdata(dev);
1459 struct atmel_spi *as = spi_master_get_devdata(master);
1460
1461 pinctrl_pm_select_default_state(dev);
1462
1463 return clk_prepare_enable(as->clk);
1464}
1465
1439static int atmel_spi_suspend(struct device *dev) 1466static int atmel_spi_suspend(struct device *dev)
1440{ 1467{
1441 struct spi_master *master = dev_get_drvdata(dev); 1468 struct spi_master *master = dev_get_drvdata(dev);
1442 struct atmel_spi *as = spi_master_get_devdata(master);
1443 int ret; 1469 int ret;
1444 1470
1445 /* Stop the queue running */ 1471 /* Stop the queue running */
@@ -1449,22 +1475,22 @@ static int atmel_spi_suspend(struct device *dev)
1449 return ret; 1475 return ret;
1450 } 1476 }
1451 1477
1452 clk_disable_unprepare(as->clk); 1478 if (!pm_runtime_suspended(dev))
1453 1479 atmel_spi_runtime_suspend(dev);
1454 pinctrl_pm_select_sleep_state(dev);
1455 1480
1456 return 0; 1481 return 0;
1457} 1482}
1458 1483
1459static int atmel_spi_resume(struct device *dev) 1484static int atmel_spi_resume(struct device *dev)
1460{ 1485{
1461 struct spi_master *master = dev_get_drvdata(dev); 1486 struct spi_master *master = dev_get_drvdata(dev);
1462 struct atmel_spi *as = spi_master_get_devdata(master);
1463 int ret; 1487 int ret;
1464 1488
1465 pinctrl_pm_select_default_state(dev); 1489 if (!pm_runtime_suspended(dev)) {
1466 1490 ret = atmel_spi_runtime_resume(dev);
1467 clk_prepare_enable(as->clk); 1491 if (ret)
1492 return ret;
1493 }
1468 1494
1469 /* Start the queue running */ 1495 /* Start the queue running */
1470 ret = spi_master_resume(master); 1496 ret = spi_master_resume(master);
@@ -1474,8 +1500,11 @@ static int atmel_spi_resume(struct device *dev)
1474 return ret; 1500 return ret;
1475} 1501}
1476 1502
1477static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); 1503static const struct dev_pm_ops atmel_spi_pm_ops = {
1478 1504 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
1505 SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
1506 atmel_spi_runtime_resume, NULL)
1507};
1479#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops) 1508#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
1480#else 1509#else
1481#define ATMEL_SPI_PM_OPS NULL 1510#define ATMEL_SPI_PM_OPS NULL
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7b811e38c7ad..5a6749881ff9 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -47,6 +47,7 @@
47#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */ 47#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
48#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */ 48#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
49#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */ 49#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
50#define CDNS_SPI_CR_PERI_SEL_MASK 0x00000200 /* Peripheral Select Decode */
50#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */ 51#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
51#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */ 52#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
52#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */ 53#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
@@ -148,6 +149,11 @@ static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
148 */ 149 */
149static void cdns_spi_init_hw(struct cdns_spi *xspi) 150static void cdns_spi_init_hw(struct cdns_spi *xspi)
150{ 151{
152 u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK;
153
154 if (xspi->is_decoded_cs)
155 ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK;
156
151 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, 157 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
152 CDNS_SPI_ER_DISABLE_MASK); 158 CDNS_SPI_ER_DISABLE_MASK);
153 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET, 159 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
@@ -160,8 +166,7 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
160 166
161 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, 167 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
162 CDNS_SPI_IXR_ALL_MASK); 168 CDNS_SPI_IXR_ALL_MASK);
163 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, 169 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
164 CDNS_SPI_CR_DEFAULT_MASK);
165 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET, 170 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
166 CDNS_SPI_ER_ENABLE_MASK); 171 CDNS_SPI_ER_ENABLE_MASK);
167} 172}
@@ -516,6 +521,17 @@ static int cdns_spi_probe(struct platform_device *pdev)
516 goto clk_dis_apb; 521 goto clk_dis_apb;
517 } 522 }
518 523
524 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
525 if (ret < 0)
526 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
527 else
528 master->num_chipselect = num_cs;
529
530 ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
531 &xspi->is_decoded_cs);
532 if (ret < 0)
533 xspi->is_decoded_cs = 0;
534
519 /* SPI controller initializations */ 535 /* SPI controller initializations */
520 cdns_spi_init_hw(xspi); 536 cdns_spi_init_hw(xspi);
521 537
@@ -534,19 +550,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
534 goto remove_master; 550 goto remove_master;
535 } 551 }
536 552
537 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
538
539 if (ret < 0)
540 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
541 else
542 master->num_chipselect = num_cs;
543
544 ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
545 &xspi->is_decoded_cs);
546
547 if (ret < 0)
548 xspi->is_decoded_cs = 0;
549
550 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; 553 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
551 master->prepare_message = cdns_prepare_message; 554 master->prepare_message = cdns_prepare_message;
552 master->transfer_one = cdns_transfer_one; 555 master->transfer_one = cdns_transfer_one;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 46c6d58e1fda..7281316a5ecb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -26,6 +26,9 @@
26#include <linux/intel_mid_dma.h> 26#include <linux/intel_mid_dma.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28 28
29#define RX_BUSY 0
30#define TX_BUSY 1
31
29struct mid_dma { 32struct mid_dma {
30 struct intel_mid_dma_slave dmas_tx; 33 struct intel_mid_dma_slave dmas_tx;
31 struct intel_mid_dma_slave dmas_rx; 34 struct intel_mid_dma_slave dmas_rx;
@@ -98,41 +101,26 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
98} 101}
99 102
100/* 103/*
101 * dws->dma_chan_done is cleared before the dma transfer starts, 104 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
102 * callback for rx/tx channel will each increment it by 1. 105 * channel will clear a corresponding bit.
103 * Reaching 2 means the whole spi transaction is done.
104 */ 106 */
105static void dw_spi_dma_done(void *arg) 107static void dw_spi_dma_tx_done(void *arg)
106{ 108{
107 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
108 110
109 if (++dws->dma_chan_done != 2) 111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
110 return; 112 return;
111 dw_spi_xfer_done(dws); 113 dw_spi_xfer_done(dws);
112} 114}
113 115
114static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) 116static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
115{ 117{
116 struct dma_async_tx_descriptor *txdesc, *rxdesc; 118 struct dma_slave_config txconf;
117 struct dma_slave_config txconf, rxconf; 119 struct dma_async_tx_descriptor *txdesc;
118 u16 dma_ctrl = 0;
119
120 /* 1. setup DMA related registers */
121 if (cs_change) {
122 spi_enable_chip(dws, 0);
123 dw_writew(dws, DW_SPI_DMARDLR, 0xf);
124 dw_writew(dws, DW_SPI_DMATDLR, 0x10);
125 if (dws->tx_dma)
126 dma_ctrl |= SPI_DMA_TDMAE;
127 if (dws->rx_dma)
128 dma_ctrl |= SPI_DMA_RDMAE;
129 dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
130 spi_enable_chip(dws, 1);
131 }
132 120
133 dws->dma_chan_done = 0; 121 if (!dws->tx_dma)
122 return NULL;
134 123
135 /* 2. Prepare the TX dma transfer */
136 txconf.direction = DMA_MEM_TO_DEV; 124 txconf.direction = DMA_MEM_TO_DEV;
137 txconf.dst_addr = dws->dma_addr; 125 txconf.dst_addr = dws->dma_addr;
138 txconf.dst_maxburst = LNW_DMA_MSIZE_16; 126 txconf.dst_maxburst = LNW_DMA_MSIZE_16;
@@ -151,10 +139,33 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
151 1, 139 1,
152 DMA_MEM_TO_DEV, 140 DMA_MEM_TO_DEV,
153 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 141 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
154 txdesc->callback = dw_spi_dma_done; 142 txdesc->callback = dw_spi_dma_tx_done;
155 txdesc->callback_param = dws; 143 txdesc->callback_param = dws;
156 144
157 /* 3. Prepare the RX dma transfer */ 145 return txdesc;
146}
147
148/*
149 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
150 * channel will clear a corresponding bit.
151 */
152static void dw_spi_dma_rx_done(void *arg)
153{
154 struct dw_spi *dws = arg;
155
156 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
157 return;
158 dw_spi_xfer_done(dws);
159}
160
161static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
162{
163 struct dma_slave_config rxconf;
164 struct dma_async_tx_descriptor *rxdesc;
165
166 if (!dws->rx_dma)
167 return NULL;
168
158 rxconf.direction = DMA_DEV_TO_MEM; 169 rxconf.direction = DMA_DEV_TO_MEM;
159 rxconf.src_addr = dws->dma_addr; 170 rxconf.src_addr = dws->dma_addr;
160 rxconf.src_maxburst = LNW_DMA_MSIZE_16; 171 rxconf.src_maxburst = LNW_DMA_MSIZE_16;
@@ -173,15 +184,56 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
173 1, 184 1,
174 DMA_DEV_TO_MEM, 185 DMA_DEV_TO_MEM,
175 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 186 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
176 rxdesc->callback = dw_spi_dma_done; 187 rxdesc->callback = dw_spi_dma_rx_done;
177 rxdesc->callback_param = dws; 188 rxdesc->callback_param = dws;
178 189
190 return rxdesc;
191}
192
193static void dw_spi_dma_setup(struct dw_spi *dws)
194{
195 u16 dma_ctrl = 0;
196
197 spi_enable_chip(dws, 0);
198
199 dw_writew(dws, DW_SPI_DMARDLR, 0xf);
200 dw_writew(dws, DW_SPI_DMATDLR, 0x10);
201
202 if (dws->tx_dma)
203 dma_ctrl |= SPI_DMA_TDMAE;
204 if (dws->rx_dma)
205 dma_ctrl |= SPI_DMA_RDMAE;
206 dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
207
208 spi_enable_chip(dws, 1);
209}
210
211static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
212{
213 struct dma_async_tx_descriptor *txdesc, *rxdesc;
214
215 /* 1. setup DMA related registers */
216 if (cs_change)
217 dw_spi_dma_setup(dws);
218
219 /* 2. Prepare the TX dma transfer */
220 txdesc = dw_spi_dma_prepare_tx(dws);
221
222 /* 3. Prepare the RX dma transfer */
223 rxdesc = dw_spi_dma_prepare_rx(dws);
224
179 /* rx must be started before tx due to spi instinct */ 225 /* rx must be started before tx due to spi instinct */
180 dmaengine_submit(rxdesc); 226 if (rxdesc) {
181 dma_async_issue_pending(dws->rxchan); 227 set_bit(RX_BUSY, &dws->dma_chan_busy);
228 dmaengine_submit(rxdesc);
229 dma_async_issue_pending(dws->rxchan);
230 }
182 231
183 dmaengine_submit(txdesc); 232 if (txdesc) {
184 dma_async_issue_pending(dws->txchan); 233 set_bit(TX_BUSY, &dws->dma_chan_busy);
234 dmaengine_submit(txdesc);
235 dma_async_issue_pending(dws->txchan);
236 }
185 237
186 return 0; 238 return 0;
187} 239}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 72e12bad14b9..d0d5542efc06 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
376 chip = dws->cur_chip; 376 chip = dws->cur_chip;
377 spi = message->spi; 377 spi = message->spi;
378 378
379 if (unlikely(!chip->clk_div))
380 chip->clk_div = dws->max_freq / chip->speed_hz;
381
382 if (message->state == ERROR_STATE) { 379 if (message->state == ERROR_STATE) {
383 message->status = -EIO; 380 message->status = -EIO;
384 goto early_exit; 381 goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
419 if (transfer->speed_hz) { 416 if (transfer->speed_hz) {
420 speed = chip->speed_hz; 417 speed = chip->speed_hz;
421 418
422 if (transfer->speed_hz != speed) { 419 if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
423 speed = transfer->speed_hz; 420 speed = transfer->speed_hz;
424 421
425 /* clk_div doesn't support odd number */ 422 /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
581 dev_err(&spi->dev, "No max speed HZ parameter\n"); 578 dev_err(&spi->dev, "No max speed HZ parameter\n");
582 return -EINVAL; 579 return -EINVAL;
583 } 580 }
584 chip->speed_hz = spi->max_speed_hz;
585 581
586 chip->tmode = 0; /* Tx & Rx */ 582 chip->tmode = 0; /* Tx & Rx */
587 /* Default SPI mode is SCPOL = 0, SCPH = 0 */ 583 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 83a103a76481..3d32be68c142 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -139,7 +139,7 @@ struct dw_spi {
139 struct scatterlist tx_sgl; 139 struct scatterlist tx_sgl;
140 struct dma_chan *rxchan; 140 struct dma_chan *rxchan;
141 struct scatterlist rx_sgl; 141 struct scatterlist rx_sgl;
142 int dma_chan_done; 142 unsigned long dma_chan_busy;
143 struct device *dma_dev; 143 struct device *dma_dev;
144 dma_addr_t dma_addr; /* phy address of the Data register */ 144 dma_addr_t dma_addr; /* phy address of the Data register */
145 struct dw_spi_dma_ops *dma_ops; 145 struct dw_spi_dma_ops *dma_ops;
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index c5dd20beee22..e85ab1cb17a2 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -56,12 +56,15 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
56 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, 56 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
57 QE_CR_PROTOCOL_UNSPECIFIED, 0); 57 QE_CR_PROTOCOL_UNSPECIFIED, 0);
58 } else { 58 } else {
59 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
60 if (mspi->flags & SPI_CPM1) { 59 if (mspi->flags & SPI_CPM1) {
60 out_be32(&mspi->pram->rstate, 0);
61 out_be16(&mspi->pram->rbptr, 61 out_be16(&mspi->pram->rbptr,
62 in_be16(&mspi->pram->rbase)); 62 in_be16(&mspi->pram->rbase));
63 out_be32(&mspi->pram->tstate, 0);
63 out_be16(&mspi->pram->tbptr, 64 out_be16(&mspi->pram->tbptr,
64 in_be16(&mspi->pram->tbase)); 65 in_be16(&mspi->pram->tbase));
66 } else {
67 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
65 } 68 }
66 } 69 }
67} 70}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 831ceb4a91f6..4cda994d3f40 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -438,7 +438,7 @@ static int dspi_resume(struct device *dev)
438 438
439static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); 439static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
440 440
441static struct regmap_config dspi_regmap_config = { 441static const struct regmap_config dspi_regmap_config = {
442 .reg_bits = 32, 442 .reg_bits = 32,
443 .val_bits = 32, 443 .val_bits = 32,
444 .reg_stride = 4, 444 .reg_stride = 4,
@@ -492,7 +492,6 @@ static int dspi_probe(struct platform_device *pdev)
492 goto out_master_put; 492 goto out_master_put;
493 } 493 }
494 494
495 dspi_regmap_config.lock_arg = dspi;
496 dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base, 495 dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
497 &dspi_regmap_config); 496 &dspi_regmap_config);
498 if (IS_ERR(dspi->regmap)) { 497 if (IS_ERR(dspi->regmap)) {
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index a7f94b6a9e70..56cadf13519e 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -411,7 +411,8 @@ static void fsl_espi_rw_trans(struct spi_message *m,
411 kfree(local_buf); 411 kfree(local_buf);
412} 412}
413 413
414static void fsl_espi_do_one_msg(struct spi_message *m) 414static int fsl_espi_do_one_msg(struct spi_master *master,
415 struct spi_message *m)
415{ 416{
416 struct spi_transfer *t; 417 struct spi_transfer *t;
417 u8 *rx_buf = NULL; 418 u8 *rx_buf = NULL;
@@ -441,8 +442,8 @@ static void fsl_espi_do_one_msg(struct spi_message *m)
441 442
442 m->actual_length = espi_trans.actual_length; 443 m->actual_length = espi_trans.actual_length;
443 m->status = espi_trans.status; 444 m->status = espi_trans.status;
444 if (m->complete) 445 spi_finalize_current_message(master);
445 m->complete(m->context); 446 return 0;
446} 447}
447 448
448static int fsl_espi_setup(struct spi_device *spi) 449static int fsl_espi_setup(struct spi_device *spi)
@@ -587,6 +588,38 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
587 iounmap(mspi->reg_base); 588 iounmap(mspi->reg_base);
588} 589}
589 590
591static int fsl_espi_suspend(struct spi_master *master)
592{
593 struct mpc8xxx_spi *mpc8xxx_spi;
594 struct fsl_espi_reg *reg_base;
595 u32 regval;
596
597 mpc8xxx_spi = spi_master_get_devdata(master);
598 reg_base = mpc8xxx_spi->reg_base;
599
600 regval = mpc8xxx_spi_read_reg(&reg_base->mode);
601 regval &= ~SPMODE_ENABLE;
602 mpc8xxx_spi_write_reg(&reg_base->mode, regval);
603
604 return 0;
605}
606
607static int fsl_espi_resume(struct spi_master *master)
608{
609 struct mpc8xxx_spi *mpc8xxx_spi;
610 struct fsl_espi_reg *reg_base;
611 u32 regval;
612
613 mpc8xxx_spi = spi_master_get_devdata(master);
614 reg_base = mpc8xxx_spi->reg_base;
615
616 regval = mpc8xxx_spi_read_reg(&reg_base->mode);
617 regval |= SPMODE_ENABLE;
618 mpc8xxx_spi_write_reg(&reg_base->mode, regval);
619
620 return 0;
621}
622
590static struct spi_master * fsl_espi_probe(struct device *dev, 623static struct spi_master * fsl_espi_probe(struct device *dev,
591 struct resource *mem, unsigned int irq) 624 struct resource *mem, unsigned int irq)
592{ 625{
@@ -607,16 +640,16 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
607 640
608 dev_set_drvdata(dev, master); 641 dev_set_drvdata(dev, master);
609 642
610 ret = mpc8xxx_spi_probe(dev, mem, irq); 643 mpc8xxx_spi_probe(dev, mem, irq);
611 if (ret)
612 goto err_probe;
613 644
614 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 645 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
615 master->setup = fsl_espi_setup; 646 master->setup = fsl_espi_setup;
616 master->cleanup = fsl_espi_cleanup; 647 master->cleanup = fsl_espi_cleanup;
648 master->transfer_one_message = fsl_espi_do_one_msg;
649 master->prepare_transfer_hardware = fsl_espi_resume;
650 master->unprepare_transfer_hardware = fsl_espi_suspend;
617 651
618 mpc8xxx_spi = spi_master_get_devdata(master); 652 mpc8xxx_spi = spi_master_get_devdata(master);
619 mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
620 mpc8xxx_spi->spi_remove = fsl_espi_remove; 653 mpc8xxx_spi->spi_remove = fsl_espi_remove;
621 654
622 mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); 655 mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
@@ -762,25 +795,15 @@ static int of_fsl_espi_remove(struct platform_device *dev)
762static int of_fsl_espi_suspend(struct device *dev) 795static int of_fsl_espi_suspend(struct device *dev)
763{ 796{
764 struct spi_master *master = dev_get_drvdata(dev); 797 struct spi_master *master = dev_get_drvdata(dev);
765 struct mpc8xxx_spi *mpc8xxx_spi;
766 struct fsl_espi_reg *reg_base;
767 u32 regval;
768 int ret; 798 int ret;
769 799
770 mpc8xxx_spi = spi_master_get_devdata(master);
771 reg_base = mpc8xxx_spi->reg_base;
772
773 ret = spi_master_suspend(master); 800 ret = spi_master_suspend(master);
774 if (ret) { 801 if (ret) {
775 dev_warn(dev, "cannot suspend master\n"); 802 dev_warn(dev, "cannot suspend master\n");
776 return ret; 803 return ret;
777 } 804 }
778 805
779 regval = mpc8xxx_spi_read_reg(&reg_base->mode); 806 return fsl_espi_suspend(master);
780 regval &= ~SPMODE_ENABLE;
781 mpc8xxx_spi_write_reg(&reg_base->mode, regval);
782
783 return 0;
784} 807}
785 808
786static int of_fsl_espi_resume(struct device *dev) 809static int of_fsl_espi_resume(struct device *dev)
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 5ddb5b098e4e..446b737e1532 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -61,44 +61,6 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
61 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 61 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
62} 62}
63 63
64static void mpc8xxx_spi_work(struct work_struct *work)
65{
66 struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
67 work);
68
69 spin_lock_irq(&mpc8xxx_spi->lock);
70 while (!list_empty(&mpc8xxx_spi->queue)) {
71 struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
72 struct spi_message, queue);
73
74 list_del_init(&m->queue);
75 spin_unlock_irq(&mpc8xxx_spi->lock);
76
77 if (mpc8xxx_spi->spi_do_one_msg)
78 mpc8xxx_spi->spi_do_one_msg(m);
79
80 spin_lock_irq(&mpc8xxx_spi->lock);
81 }
82 spin_unlock_irq(&mpc8xxx_spi->lock);
83}
84
85int mpc8xxx_spi_transfer(struct spi_device *spi,
86 struct spi_message *m)
87{
88 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
89 unsigned long flags;
90
91 m->actual_length = 0;
92 m->status = -EINPROGRESS;
93
94 spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
95 list_add_tail(&m->queue, &mpc8xxx_spi->queue);
96 queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
97 spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
98
99 return 0;
100}
101
102const char *mpc8xxx_spi_strmode(unsigned int flags) 64const char *mpc8xxx_spi_strmode(unsigned int flags)
103{ 65{
104 if (flags & SPI_QE_CPU_MODE) { 66 if (flags & SPI_QE_CPU_MODE) {
@@ -114,13 +76,12 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
114 return "CPU"; 76 return "CPU";
115} 77}
116 78
117int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 79void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
118 unsigned int irq) 80 unsigned int irq)
119{ 81{
120 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); 82 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
121 struct spi_master *master; 83 struct spi_master *master;
122 struct mpc8xxx_spi *mpc8xxx_spi; 84 struct mpc8xxx_spi *mpc8xxx_spi;
123 int ret = 0;
124 85
125 master = dev_get_drvdata(dev); 86 master = dev_get_drvdata(dev);
126 87
@@ -128,7 +89,6 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
128 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH 89 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
129 | SPI_LSB_FIRST | SPI_LOOP; 90 | SPI_LSB_FIRST | SPI_LOOP;
130 91
131 master->transfer = mpc8xxx_spi_transfer;
132 master->dev.of_node = dev->of_node; 92 master->dev.of_node = dev->of_node;
133 93
134 mpc8xxx_spi = spi_master_get_devdata(master); 94 mpc8xxx_spi = spi_master_get_devdata(master);
@@ -147,22 +107,7 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
147 master->bus_num = pdata->bus_num; 107 master->bus_num = pdata->bus_num;
148 master->num_chipselect = pdata->max_chipselect; 108 master->num_chipselect = pdata->max_chipselect;
149 109
150 spin_lock_init(&mpc8xxx_spi->lock);
151 init_completion(&mpc8xxx_spi->done); 110 init_completion(&mpc8xxx_spi->done);
152 INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
153 INIT_LIST_HEAD(&mpc8xxx_spi->queue);
154
155 mpc8xxx_spi->workqueue = create_singlethread_workqueue(
156 dev_name(master->dev.parent));
157 if (mpc8xxx_spi->workqueue == NULL) {
158 ret = -EBUSY;
159 goto err;
160 }
161
162 return 0;
163
164err:
165 return ret;
166} 111}
167 112
168int mpc8xxx_spi_remove(struct device *dev) 113int mpc8xxx_spi_remove(struct device *dev)
@@ -173,8 +118,6 @@ int mpc8xxx_spi_remove(struct device *dev)
173 master = dev_get_drvdata(dev); 118 master = dev_get_drvdata(dev);
174 mpc8xxx_spi = spi_master_get_devdata(master); 119 mpc8xxx_spi = spi_master_get_devdata(master);
175 120
176 flush_workqueue(mpc8xxx_spi->workqueue);
177 destroy_workqueue(mpc8xxx_spi->workqueue);
178 spi_unregister_master(master); 121 spi_unregister_master(master);
179 122
180 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 123 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 2fcbfd01d109..b4ed04e8862f 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -55,7 +55,6 @@ struct mpc8xxx_spi {
55 u32(*get_tx) (struct mpc8xxx_spi *); 55 u32(*get_tx) (struct mpc8xxx_spi *);
56 56
57 /* hooks for different controller driver */ 57 /* hooks for different controller driver */
58 void (*spi_do_one_msg) (struct spi_message *m);
59 void (*spi_remove) (struct mpc8xxx_spi *mspi); 58 void (*spi_remove) (struct mpc8xxx_spi *mspi);
60 59
61 unsigned int count; 60 unsigned int count;
@@ -78,12 +77,6 @@ struct mpc8xxx_spi {
78 int bits_per_word, int msb_first); 77 int bits_per_word, int msb_first);
79#endif 78#endif
80 79
81 struct workqueue_struct *workqueue;
82 struct work_struct work;
83
84 struct list_head queue;
85 spinlock_t lock;
86
87 struct completion done; 80 struct completion done;
88}; 81};
89 82
@@ -123,9 +116,8 @@ extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
123 struct fsl_spi_platform_data *pdata); 116 struct fsl_spi_platform_data *pdata);
124extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, 117extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
125 struct spi_transfer *t, unsigned int len); 118 struct spi_transfer *t, unsigned int len);
126extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
127extern const char *mpc8xxx_spi_strmode(unsigned int flags); 119extern const char *mpc8xxx_spi_strmode(unsigned int flags);
128extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 120extern void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
129 unsigned int irq); 121 unsigned int irq);
130extern int mpc8xxx_spi_remove(struct device *dev); 122extern int mpc8xxx_spi_remove(struct device *dev);
131extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev); 123extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index ed792880c9d6..3b159364c459 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -353,7 +353,8 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
353 return mpc8xxx_spi->count; 353 return mpc8xxx_spi->count;
354} 354}
355 355
356static void fsl_spi_do_one_msg(struct spi_message *m) 356static int fsl_spi_do_one_msg(struct spi_master *master,
357 struct spi_message *m)
357{ 358{
358 struct spi_device *spi = m->spi; 359 struct spi_device *spi = m->spi;
359 struct spi_transfer *t, *first; 360 struct spi_transfer *t, *first;
@@ -367,10 +368,9 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
367 list_for_each_entry(t, &m->transfers, transfer_list) { 368 list_for_each_entry(t, &m->transfers, transfer_list) {
368 if ((first->bits_per_word != t->bits_per_word) || 369 if ((first->bits_per_word != t->bits_per_word) ||
369 (first->speed_hz != t->speed_hz)) { 370 (first->speed_hz != t->speed_hz)) {
370 status = -EINVAL;
371 dev_err(&spi->dev, 371 dev_err(&spi->dev,
372 "bits_per_word/speed_hz should be same for the same SPI transfer\n"); 372 "bits_per_word/speed_hz should be same for the same SPI transfer\n");
373 return; 373 return -EINVAL;
374 } 374 }
375 } 375 }
376 376
@@ -408,8 +408,7 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
408 } 408 }
409 409
410 m->status = status; 410 m->status = status;
411 if (m->complete) 411 spi_finalize_current_message(master);
412 m->complete(m->context);
413 412
414 if (status || !cs_change) { 413 if (status || !cs_change) {
415 ndelay(nsecs); 414 ndelay(nsecs);
@@ -417,6 +416,7 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
417 } 416 }
418 417
419 fsl_spi_setup_transfer(spi, NULL); 418 fsl_spi_setup_transfer(spi, NULL);
419 return 0;
420} 420}
421 421
422static int fsl_spi_setup(struct spi_device *spi) 422static int fsl_spi_setup(struct spi_device *spi)
@@ -624,15 +624,13 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
624 624
625 dev_set_drvdata(dev, master); 625 dev_set_drvdata(dev, master);
626 626
627 ret = mpc8xxx_spi_probe(dev, mem, irq); 627 mpc8xxx_spi_probe(dev, mem, irq);
628 if (ret)
629 goto err_probe;
630 628
631 master->setup = fsl_spi_setup; 629 master->setup = fsl_spi_setup;
632 master->cleanup = fsl_spi_cleanup; 630 master->cleanup = fsl_spi_cleanup;
631 master->transfer_one_message = fsl_spi_do_one_msg;
633 632
634 mpc8xxx_spi = spi_master_get_devdata(master); 633 mpc8xxx_spi = spi_master_get_devdata(master);
635 mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
636 mpc8xxx_spi->spi_remove = fsl_spi_remove; 634 mpc8xxx_spi->spi_remove = fsl_spi_remove;
637 mpc8xxx_spi->max_bits_per_word = 32; 635 mpc8xxx_spi->max_bits_per_word = 32;
638 mpc8xxx_spi->type = fsl_spi_get_type(dev); 636 mpc8xxx_spi->type = fsl_spi_get_type(dev);
@@ -704,7 +702,6 @@ free_irq:
704err_ioremap: 702err_ioremap:
705 fsl_spi_cpm_free(mpc8xxx_spi); 703 fsl_spi_cpm_free(mpc8xxx_spi);
706err_cpm_init: 704err_cpm_init:
707err_probe:
708 spi_master_put(master); 705 spi_master_put(master);
709err: 706err:
710 return ERR_PTR(ret); 707 return ERR_PTR(ret);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 9f595535cf27..4b600d4f8548 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -48,7 +48,7 @@ struct spi_gpio {
48 struct spi_bitbang bitbang; 48 struct spi_bitbang bitbang;
49 struct spi_gpio_platform_data pdata; 49 struct spi_gpio_platform_data pdata;
50 struct platform_device *pdev; 50 struct platform_device *pdev;
51 int cs_gpios[0]; 51 unsigned long cs_gpios[0];
52}; 52};
53 53
54/*----------------------------------------------------------------------*/ 54/*----------------------------------------------------------------------*/
@@ -220,7 +220,7 @@ static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
220static void spi_gpio_chipselect(struct spi_device *spi, int is_active) 220static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
221{ 221{
222 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); 222 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
223 unsigned int cs = spi_gpio->cs_gpios[spi->chip_select]; 223 unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
224 224
225 /* set initial clock polarity */ 225 /* set initial clock polarity */
226 if (is_active) 226 if (is_active)
@@ -234,7 +234,7 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
234 234
235static int spi_gpio_setup(struct spi_device *spi) 235static int spi_gpio_setup(struct spi_device *spi)
236{ 236{
237 unsigned int cs; 237 unsigned long cs;
238 int status = 0; 238 int status = 0;
239 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); 239 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
240 struct device_node *np = spi->master->dev.of_node; 240 struct device_node *np = spi->master->dev.of_node;
@@ -249,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
249 /* 249 /*
250 * ... otherwise, take it from spi->controller_data 250 * ... otherwise, take it from spi->controller_data
251 */ 251 */
252 cs = (unsigned int)(uintptr_t) spi->controller_data; 252 cs = (uintptr_t) spi->controller_data;
253 } 253 }
254 254
255 if (!spi->controller_state) { 255 if (!spi->controller_state) {
@@ -277,7 +277,7 @@ static int spi_gpio_setup(struct spi_device *spi)
277static void spi_gpio_cleanup(struct spi_device *spi) 277static void spi_gpio_cleanup(struct spi_device *spi)
278{ 278{
279 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi); 279 struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
280 unsigned int cs = spi_gpio->cs_gpios[spi->chip_select]; 280 unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
281 281
282 if (cs != SPI_GPIO_NO_CHIPSELECT) 282 if (cs != SPI_GPIO_NO_CHIPSELECT)
283 gpio_free(cs); 283 gpio_free(cs);
@@ -413,6 +413,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
413 struct spi_gpio_platform_data *pdata; 413 struct spi_gpio_platform_data *pdata;
414 u16 master_flags = 0; 414 u16 master_flags = 0;
415 bool use_of = 0; 415 bool use_of = 0;
416 int num_devices;
416 417
417 status = spi_gpio_probe_dt(pdev); 418 status = spi_gpio_probe_dt(pdev);
418 if (status < 0) 419 if (status < 0)
@@ -422,16 +423,21 @@ static int spi_gpio_probe(struct platform_device *pdev)
422 423
423 pdata = dev_get_platdata(&pdev->dev); 424 pdata = dev_get_platdata(&pdev->dev);
424#ifdef GENERIC_BITBANG 425#ifdef GENERIC_BITBANG
425 if (!pdata || !pdata->num_chipselect) 426 if (!pdata || (!use_of && !pdata->num_chipselect))
426 return -ENODEV; 427 return -ENODEV;
427#endif 428#endif
428 429
430 if (use_of && !SPI_N_CHIPSEL)
431 num_devices = 1;
432 else
433 num_devices = SPI_N_CHIPSEL;
434
429 status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); 435 status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags);
430 if (status < 0) 436 if (status < 0)
431 return status; 437 return status;
432 438
433 master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio) + 439 master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio) +
434 (sizeof(int) * SPI_N_CHIPSEL)); 440 (sizeof(unsigned long) * num_devices));
435 if (!master) { 441 if (!master) {
436 status = -ENOMEM; 442 status = -ENOMEM;
437 goto gpio_free; 443 goto gpio_free;
@@ -446,7 +452,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
446 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 452 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
447 master->flags = master_flags; 453 master->flags = master_flags;
448 master->bus_num = pdev->id; 454 master->bus_num = pdev->id;
449 master->num_chipselect = SPI_N_CHIPSEL; 455 master->num_chipselect = num_devices;
450 master->setup = spi_gpio_setup; 456 master->setup = spi_gpio_setup;
451 master->cleanup = spi_gpio_cleanup; 457 master->cleanup = spi_gpio_cleanup;
452#ifdef CONFIG_OF 458#ifdef CONFIG_OF
@@ -461,9 +467,18 @@ static int spi_gpio_probe(struct platform_device *pdev)
461 * property of the node. 467 * property of the node.
462 */ 468 */
463 469
464 for (i = 0; i < SPI_N_CHIPSEL; i++) 470 if (!SPI_N_CHIPSEL)
465 spi_gpio->cs_gpios[i] = 471 spi_gpio->cs_gpios[0] = SPI_GPIO_NO_CHIPSELECT;
466 of_get_named_gpio(np, "cs-gpios", i); 472 else
473 for (i = 0; i < SPI_N_CHIPSEL; i++) {
474 status = of_get_named_gpio(np, "cs-gpios", i);
475 if (status < 0) {
476 dev_err(&pdev->dev,
477 "invalid cs-gpios property\n");
478 goto gpio_free;
479 }
480 spi_gpio->cs_gpios[i] = status;
481 }
467 } 482 }
468#endif 483#endif
469 484
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
new file mode 100644
index 000000000000..43781c9fe521
--- /dev/null
+++ b/drivers/spi/spi-img-spfi.c
@@ -0,0 +1,746 @@
1/*
2 * IMG SPFI controller driver
3 *
4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/spi/spi.h>
25#include <linux/spinlock.h>
26
27#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
28#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
29#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
30#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
31#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
32#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
33#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
34#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
35#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
36
37#define SPFI_CONTROL 0x14
38#define SPFI_CONTROL_CONTINUE BIT(12)
39#define SPFI_CONTROL_SOFT_RESET BIT(11)
40#define SPFI_CONTROL_SEND_DMA BIT(10)
41#define SPFI_CONTROL_GET_DMA BIT(9)
42#define SPFI_CONTROL_TMODE_SHIFT 5
43#define SPFI_CONTROL_TMODE_MASK 0x7
44#define SPFI_CONTROL_TMODE_SINGLE 0
45#define SPFI_CONTROL_TMODE_DUAL 1
46#define SPFI_CONTROL_TMODE_QUAD 2
47#define SPFI_CONTROL_SPFI_EN BIT(0)
48
49#define SPFI_TRANSACTION 0x18
50#define SPFI_TRANSACTION_TSIZE_SHIFT 16
51#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
52
53#define SPFI_PORT_STATE 0x1c
54#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
55#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
56#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
57#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
58
59#define SPFI_TX_32BIT_VALID_DATA 0x20
60#define SPFI_TX_8BIT_VALID_DATA 0x24
61#define SPFI_RX_32BIT_VALID_DATA 0x28
62#define SPFI_RX_8BIT_VALID_DATA 0x2c
63
64#define SPFI_INTERRUPT_STATUS 0x30
65#define SPFI_INTERRUPT_ENABLE 0x34
66#define SPFI_INTERRUPT_CLEAR 0x38
67#define SPFI_INTERRUPT_IACCESS BIT(12)
68#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
69#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
70#define SPFI_INTERRUPT_GDFUL BIT(8)
71#define SPFI_INTERRUPT_GDHF BIT(7)
72#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
73#define SPFI_INTERRUPT_GDTRIG BIT(5)
74#define SPFI_INTERRUPT_SDFUL BIT(3)
75#define SPFI_INTERRUPT_SDHF BIT(2)
76#define SPFI_INTERRUPT_SDE BIT(1)
77#define SPFI_INTERRUPT_SDTRIG BIT(0)
78
79/*
80 * There are four parallel FIFOs of 16 bytes each. The word buffer
81 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
82 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
83 * accesses only a single FIFO, resulting in an effective FIFO size of
84 * 16 bytes.
85 */
86#define SPFI_32BIT_FIFO_SIZE 64
87#define SPFI_8BIT_FIFO_SIZE 16
88
89struct img_spfi {
90 struct device *dev;
91 struct spi_master *master;
92 spinlock_t lock;
93
94 void __iomem *regs;
95 phys_addr_t phys;
96 int irq;
97 struct clk *spfi_clk;
98 struct clk *sys_clk;
99
100 struct dma_chan *rx_ch;
101 struct dma_chan *tx_ch;
102 bool tx_dma_busy;
103 bool rx_dma_busy;
104};
105
106static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
107{
108 return readl(spfi->regs + reg);
109}
110
111static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
112{
113 writel(val, spfi->regs + reg);
114}
115
116static inline void spfi_start(struct img_spfi *spfi)
117{
118 u32 val;
119
120 val = spfi_readl(spfi, SPFI_CONTROL);
121 val |= SPFI_CONTROL_SPFI_EN;
122 spfi_writel(spfi, val, SPFI_CONTROL);
123}
124
125static inline void spfi_stop(struct img_spfi *spfi)
126{
127 u32 val;
128
129 val = spfi_readl(spfi, SPFI_CONTROL);
130 val &= ~SPFI_CONTROL_SPFI_EN;
131 spfi_writel(spfi, val, SPFI_CONTROL);
132}
133
134static inline void spfi_reset(struct img_spfi *spfi)
135{
136 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
137 udelay(1);
138 spfi_writel(spfi, 0, SPFI_CONTROL);
139}
140
141static void spfi_flush_tx_fifo(struct img_spfi *spfi)
142{
143 unsigned long timeout = jiffies + msecs_to_jiffies(10);
144
145 spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR);
146 while (time_before(jiffies, timeout)) {
147 if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) &
148 SPFI_INTERRUPT_SDE)
149 return;
150 cpu_relax();
151 }
152
153 dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n");
154 spfi_reset(spfi);
155}
156
157static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
158 unsigned int max)
159{
160 unsigned int count = 0;
161 u32 status;
162
163 while (count < max) {
164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
166 if (status & SPFI_INTERRUPT_SDFUL)
167 break;
168 spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
169 count += 4;
170 }
171
172 return count;
173}
174
175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
176 unsigned int max)
177{
178 unsigned int count = 0;
179 u32 status;
180
181 while (count < max) {
182 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
183 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
184 if (status & SPFI_INTERRUPT_SDFUL)
185 break;
186 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
187 count++;
188 }
189
190 return count;
191}
192
193static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
194 unsigned int max)
195{
196 unsigned int count = 0;
197 u32 status;
198
199 while (count < max) {
200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
201 SPFI_INTERRUPT_CLEAR);
202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
203 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
204 break;
205 buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
206 count += 4;
207 }
208
209 return count;
210}
211
212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
213 unsigned int max)
214{
215 unsigned int count = 0;
216 u32 status;
217
218 while (count < max) {
219 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
220 SPFI_INTERRUPT_CLEAR);
221 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
222 if (!(status & SPFI_INTERRUPT_GDEX8BIT))
223 break;
224 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
225 count++;
226 }
227
228 return count;
229}
230
231static int img_spfi_start_pio(struct spi_master *master,
232 struct spi_device *spi,
233 struct spi_transfer *xfer)
234{
235 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
236 unsigned int tx_bytes = 0, rx_bytes = 0;
237 const void *tx_buf = xfer->tx_buf;
238 void *rx_buf = xfer->rx_buf;
239 unsigned long timeout;
240
241 if (tx_buf)
242 tx_bytes = xfer->len;
243 if (rx_buf)
244 rx_bytes = xfer->len;
245
246 spfi_start(spfi);
247
248 timeout = jiffies +
249 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
250 while ((tx_bytes > 0 || rx_bytes > 0) &&
251 time_before(jiffies, timeout)) {
252 unsigned int tx_count, rx_count;
253
254 switch (xfer->bits_per_word) {
255 case 32:
256 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
257 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
258 break;
259 case 8:
260 default:
261 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
263 break;
264 }
265
266 tx_buf += tx_count;
267 rx_buf += rx_count;
268 tx_bytes -= tx_count;
269 rx_bytes -= rx_count;
270
271 cpu_relax();
272 }
273
274 if (rx_bytes > 0 || tx_bytes > 0) {
275 dev_err(spfi->dev, "PIO transfer timed out\n");
276 spfi_reset(spfi);
277 return -ETIMEDOUT;
278 }
279
280 if (tx_buf)
281 spfi_flush_tx_fifo(spfi);
282 spfi_stop(spfi);
283
284 return 0;
285}
286
287static void img_spfi_dma_rx_cb(void *data)
288{
289 struct img_spfi *spfi = data;
290 unsigned long flags;
291
292 spin_lock_irqsave(&spfi->lock, flags);
293
294 spfi->rx_dma_busy = false;
295 if (!spfi->tx_dma_busy) {
296 spfi_stop(spfi);
297 spi_finalize_current_transfer(spfi->master);
298 }
299
300 spin_unlock_irqrestore(&spfi->lock, flags);
301}
302
303static void img_spfi_dma_tx_cb(void *data)
304{
305 struct img_spfi *spfi = data;
306 unsigned long flags;
307
308 spfi_flush_tx_fifo(spfi);
309
310 spin_lock_irqsave(&spfi->lock, flags);
311
312 spfi->tx_dma_busy = false;
313 if (!spfi->rx_dma_busy) {
314 spfi_stop(spfi);
315 spi_finalize_current_transfer(spfi->master);
316 }
317
318 spin_unlock_irqrestore(&spfi->lock, flags);
319}
320
321static int img_spfi_start_dma(struct spi_master *master,
322 struct spi_device *spi,
323 struct spi_transfer *xfer)
324{
325 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
326 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
327 struct dma_slave_config rxconf, txconf;
328
329 spfi->rx_dma_busy = false;
330 spfi->tx_dma_busy = false;
331
332 if (xfer->rx_buf) {
333 rxconf.direction = DMA_DEV_TO_MEM;
334 switch (xfer->bits_per_word) {
335 case 32:
336 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
337 rxconf.src_addr_width = 4;
338 rxconf.src_maxburst = 4;
339 break;
340 case 8:
341 default:
342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
343 rxconf.src_addr_width = 1;
344 rxconf.src_maxburst = 1;
345 }
346 dmaengine_slave_config(spfi->rx_ch, &rxconf);
347
348 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
349 xfer->rx_sg.nents,
350 DMA_DEV_TO_MEM,
351 DMA_PREP_INTERRUPT);
352 if (!rxdesc)
353 goto stop_dma;
354
355 rxdesc->callback = img_spfi_dma_rx_cb;
356 rxdesc->callback_param = spfi;
357 }
358
359 if (xfer->tx_buf) {
360 txconf.direction = DMA_MEM_TO_DEV;
361 switch (xfer->bits_per_word) {
362 case 32:
363 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
364 txconf.dst_addr_width = 4;
365 txconf.dst_maxburst = 4;
366 break;
367 case 8:
368 default:
369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
370 txconf.dst_addr_width = 1;
371 txconf.dst_maxburst = 1;
372 break;
373 }
374 dmaengine_slave_config(spfi->tx_ch, &txconf);
375
376 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
377 xfer->tx_sg.nents,
378 DMA_MEM_TO_DEV,
379 DMA_PREP_INTERRUPT);
380 if (!txdesc)
381 goto stop_dma;
382
383 txdesc->callback = img_spfi_dma_tx_cb;
384 txdesc->callback_param = spfi;
385 }
386
387 if (xfer->rx_buf) {
388 spfi->rx_dma_busy = true;
389 dmaengine_submit(rxdesc);
390 dma_async_issue_pending(spfi->rx_ch);
391 }
392
393 if (xfer->tx_buf) {
394 spfi->tx_dma_busy = true;
395 dmaengine_submit(txdesc);
396 dma_async_issue_pending(spfi->tx_ch);
397 }
398
399 spfi_start(spfi);
400
401 return 1;
402
403stop_dma:
404 dmaengine_terminate_all(spfi->rx_ch);
405 dmaengine_terminate_all(spfi->tx_ch);
406 return -EIO;
407}
408
409static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
410 struct spi_transfer *xfer)
411{
412 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
413 u32 val, div;
414
415 /*
416 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
417 * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits)
418 */
419 div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz);
420 div = clamp(512 / (1 << get_count_order(div)), 1, 255);
421
422 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
423 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
424 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
425 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
426 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
427
428 val = spfi_readl(spfi, SPFI_CONTROL);
429 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
430 if (xfer->tx_buf)
431 val |= SPFI_CONTROL_SEND_DMA;
432 if (xfer->rx_buf)
433 val |= SPFI_CONTROL_GET_DMA;
434 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
435 if (xfer->tx_nbits == SPI_NBITS_DUAL &&
436 xfer->rx_nbits == SPI_NBITS_DUAL)
437 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
438 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
439 xfer->rx_nbits == SPI_NBITS_QUAD)
440 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
441 val &= ~SPFI_CONTROL_CONTINUE;
442 if (!xfer->cs_change && !list_is_last(&xfer->transfer_list,
443 &master->cur_msg->transfers))
444 val |= SPFI_CONTROL_CONTINUE;
445 spfi_writel(spfi, val, SPFI_CONTROL);
446
447 val = spfi_readl(spfi, SPFI_PORT_STATE);
448 if (spi->mode & SPI_CPHA)
449 val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
450 else
451 val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select);
452 if (spi->mode & SPI_CPOL)
453 val |= SPFI_PORT_STATE_CK_POL(spi->chip_select);
454 else
455 val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select);
456 spfi_writel(spfi, val, SPFI_PORT_STATE);
457
458 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
459 SPFI_TRANSACTION);
460}
461
462static int img_spfi_transfer_one(struct spi_master *master,
463 struct spi_device *spi,
464 struct spi_transfer *xfer)
465{
466 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
467 bool dma_reset = false;
468 unsigned long flags;
469 int ret;
470
471 /*
472 * Stop all DMA and reset the controller if the previous transaction
473 * timed-out and never completed it's DMA.
474 */
475 spin_lock_irqsave(&spfi->lock, flags);
476 if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
477 dev_err(spfi->dev, "SPI DMA still busy\n");
478 dma_reset = true;
479 }
480 spin_unlock_irqrestore(&spfi->lock, flags);
481
482 if (dma_reset) {
483 dmaengine_terminate_all(spfi->tx_ch);
484 dmaengine_terminate_all(spfi->rx_ch);
485 spfi_reset(spfi);
486 }
487
488 img_spfi_config(master, spi, xfer);
489 if (master->can_dma && master->can_dma(master, spi, xfer))
490 ret = img_spfi_start_dma(master, spi, xfer);
491 else
492 ret = img_spfi_start_pio(master, spi, xfer);
493
494 return ret;
495}
496
497static void img_spfi_set_cs(struct spi_device *spi, bool enable)
498{
499 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
500 u32 val;
501
502 val = spfi_readl(spfi, SPFI_PORT_STATE);
503 val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT);
504 val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
505 spfi_writel(spfi, val, SPFI_PORT_STATE);
506}
507
508static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
509 struct spi_transfer *xfer)
510{
511 if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
512 return true;
513 if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
514 return true;
515 return false;
516}
517
518static irqreturn_t img_spfi_irq(int irq, void *dev_id)
519{
520 struct img_spfi *spfi = (struct img_spfi *)dev_id;
521 u32 status;
522
523 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
524 if (status & SPFI_INTERRUPT_IACCESS) {
525 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
526 dev_err(spfi->dev, "Illegal access interrupt");
527 return IRQ_HANDLED;
528 }
529
530 return IRQ_NONE;
531}
532
533static int img_spfi_probe(struct platform_device *pdev)
534{
535 struct spi_master *master;
536 struct img_spfi *spfi;
537 struct resource *res;
538 int ret;
539
540 master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
541 if (!master)
542 return -ENOMEM;
543 platform_set_drvdata(pdev, master);
544
545 spfi = spi_master_get_devdata(master);
546 spfi->dev = &pdev->dev;
547 spfi->master = master;
548 spin_lock_init(&spfi->lock);
549
550 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
551 spfi->regs = devm_ioremap_resource(spfi->dev, res);
552 if (IS_ERR(spfi->regs)) {
553 ret = PTR_ERR(spfi->regs);
554 goto put_spi;
555 }
556 spfi->phys = res->start;
557
558 spfi->irq = platform_get_irq(pdev, 0);
559 if (spfi->irq < 0) {
560 ret = spfi->irq;
561 goto put_spi;
562 }
563 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
564 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
565 if (ret)
566 goto put_spi;
567
568 spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
569 if (IS_ERR(spfi->sys_clk)) {
570 ret = PTR_ERR(spfi->sys_clk);
571 goto put_spi;
572 }
573 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
574 if (IS_ERR(spfi->spfi_clk)) {
575 ret = PTR_ERR(spfi->spfi_clk);
576 goto put_spi;
577 }
578
579 ret = clk_prepare_enable(spfi->sys_clk);
580 if (ret)
581 goto put_spi;
582 ret = clk_prepare_enable(spfi->spfi_clk);
583 if (ret)
584 goto disable_pclk;
585
586 spfi_reset(spfi);
587 /*
588 * Only enable the error (IACCESS) interrupt. In PIO mode we'll
589 * poll the status of the FIFOs.
590 */
591 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
592
593 master->auto_runtime_pm = true;
594 master->bus_num = pdev->id;
595 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
596 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
597 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
598 master->num_chipselect = 5;
599 master->dev.of_node = pdev->dev.of_node;
600 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
601 master->max_speed_hz = clk_get_rate(spfi->spfi_clk);
602 master->min_speed_hz = master->max_speed_hz / 512;
603
604 master->set_cs = img_spfi_set_cs;
605 master->transfer_one = img_spfi_transfer_one;
606
607 spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
608 spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
609 if (!spfi->tx_ch || !spfi->rx_ch) {
610 if (spfi->tx_ch)
611 dma_release_channel(spfi->tx_ch);
612 if (spfi->rx_ch)
613 dma_release_channel(spfi->rx_ch);
614 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
615 } else {
616 master->dma_tx = spfi->tx_ch;
617 master->dma_rx = spfi->rx_ch;
618 master->can_dma = img_spfi_can_dma;
619 }
620
621 pm_runtime_set_active(spfi->dev);
622 pm_runtime_enable(spfi->dev);
623
624 ret = devm_spi_register_master(spfi->dev, master);
625 if (ret)
626 goto disable_pm;
627
628 return 0;
629
630disable_pm:
631 pm_runtime_disable(spfi->dev);
632 if (spfi->rx_ch)
633 dma_release_channel(spfi->rx_ch);
634 if (spfi->tx_ch)
635 dma_release_channel(spfi->tx_ch);
636 clk_disable_unprepare(spfi->spfi_clk);
637disable_pclk:
638 clk_disable_unprepare(spfi->sys_clk);
639put_spi:
640 spi_master_put(master);
641
642 return ret;
643}
644
645static int img_spfi_remove(struct platform_device *pdev)
646{
647 struct spi_master *master = platform_get_drvdata(pdev);
648 struct img_spfi *spfi = spi_master_get_devdata(master);
649
650 if (spfi->tx_ch)
651 dma_release_channel(spfi->tx_ch);
652 if (spfi->rx_ch)
653 dma_release_channel(spfi->rx_ch);
654
655 pm_runtime_disable(spfi->dev);
656 if (!pm_runtime_status_suspended(spfi->dev)) {
657 clk_disable_unprepare(spfi->spfi_clk);
658 clk_disable_unprepare(spfi->sys_clk);
659 }
660
661 spi_master_put(master);
662
663 return 0;
664}
665
666#ifdef CONFIG_PM_RUNTIME
667static int img_spfi_runtime_suspend(struct device *dev)
668{
669 struct spi_master *master = dev_get_drvdata(dev);
670 struct img_spfi *spfi = spi_master_get_devdata(master);
671
672 clk_disable_unprepare(spfi->spfi_clk);
673 clk_disable_unprepare(spfi->sys_clk);
674
675 return 0;
676}
677
678static int img_spfi_runtime_resume(struct device *dev)
679{
680 struct spi_master *master = dev_get_drvdata(dev);
681 struct img_spfi *spfi = spi_master_get_devdata(master);
682 int ret;
683
684 ret = clk_prepare_enable(spfi->sys_clk);
685 if (ret)
686 return ret;
687 ret = clk_prepare_enable(spfi->spfi_clk);
688 if (ret) {
689 clk_disable_unprepare(spfi->sys_clk);
690 return ret;
691 }
692
693 return 0;
694}
695#endif /* CONFIG_PM_RUNTIME */
696
697#ifdef CONFIG_PM_SLEEP
698static int img_spfi_suspend(struct device *dev)
699{
700 struct spi_master *master = dev_get_drvdata(dev);
701
702 return spi_master_suspend(master);
703}
704
705static int img_spfi_resume(struct device *dev)
706{
707 struct spi_master *master = dev_get_drvdata(dev);
708 struct img_spfi *spfi = spi_master_get_devdata(master);
709 int ret;
710
711 ret = pm_runtime_get_sync(dev);
712 if (ret)
713 return ret;
714 spfi_reset(spfi);
715 pm_runtime_put(dev);
716
717 return spi_master_resume(master);
718}
719#endif /* CONFIG_PM_SLEEP */
720
721static const struct dev_pm_ops img_spfi_pm_ops = {
722 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
723 NULL)
724 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
725};
726
727static const struct of_device_id img_spfi_of_match[] = {
728 { .compatible = "img,spfi", },
729 { },
730};
731MODULE_DEVICE_TABLE(of, img_spfi_of_match);
732
733static struct platform_driver img_spfi_driver = {
734 .driver = {
735 .name = "img-spfi",
736 .pm = &img_spfi_pm_ops,
737 .of_match_table = of_match_ptr(img_spfi_of_match),
738 },
739 .probe = img_spfi_probe,
740 .remove = img_spfi_remove,
741};
742module_platform_driver(img_spfi_driver);
743
744MODULE_DESCRIPTION("IMG SPFI controller driver");
745MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
746MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
new file mode 100644
index 000000000000..0e48f8c2037d
--- /dev/null
+++ b/drivers/spi/spi-meson-spifc.c
@@ -0,0 +1,462 @@
1/*
2 * Driver for Amlogic Meson SPI flash controller (SPIFC)
3 *
4 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program. If not, see <http://www.gnu.org/licenses/>.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/regmap.h>
24#include <linux/spi/spi.h>
25#include <linux/types.h>
26
27/* register map */
28#define REG_CMD 0x00
29#define REG_ADDR 0x04
30#define REG_CTRL 0x08
31#define REG_CTRL1 0x0c
32#define REG_STATUS 0x10
33#define REG_CTRL2 0x14
34#define REG_CLOCK 0x18
35#define REG_USER 0x1c
36#define REG_USER1 0x20
37#define REG_USER2 0x24
38#define REG_USER3 0x28
39#define REG_USER4 0x2c
40#define REG_SLAVE 0x30
41#define REG_SLAVE1 0x34
42#define REG_SLAVE2 0x38
43#define REG_SLAVE3 0x3c
44#define REG_C0 0x40
45#define REG_B8 0x60
46#define REG_MAX 0x7c
47
48/* register fields */
49#define CMD_USER BIT(18)
50#define CTRL_ENABLE_AHB BIT(17)
51#define CLOCK_SOURCE BIT(31)
52#define CLOCK_DIV_SHIFT 12
53#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
54#define CLOCK_CNT_HIGH_SHIFT 6
55#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
56#define CLOCK_CNT_LOW_SHIFT 0
57#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
58#define USER_DIN_EN_MS BIT(0)
59#define USER_CMP_MODE BIT(2)
60#define USER_UC_DOUT_SEL BIT(27)
61#define USER_UC_DIN_SEL BIT(28)
62#define USER_UC_MASK ((BIT(5) - 1) << 27)
63#define USER1_BN_UC_DOUT_SHIFT 17
64#define USER1_BN_UC_DOUT_MASK (0xff << 16)
65#define USER1_BN_UC_DIN_SHIFT 8
66#define USER1_BN_UC_DIN_MASK (0xff << 8)
67#define USER4_CS_ACT BIT(30)
68#define SLAVE_TRST_DONE BIT(4)
69#define SLAVE_OP_MODE BIT(30)
70#define SLAVE_SW_RST BIT(31)
71
72#define SPIFC_BUFFER_SIZE 64
73
74/**
75 * struct meson_spifc
76 * @master: the SPI master
77 * @regmap: regmap for device registers
78 * @clk: input clock of the built-in baud rate generator
79 * @device: the device structure
80 */
81struct meson_spifc {
82 struct spi_master *master;
83 struct regmap *regmap;
84 struct clk *clk;
85 struct device *dev;
86};
87
88static struct regmap_config spifc_regmap_config = {
89 .reg_bits = 32,
90 .val_bits = 32,
91 .reg_stride = 4,
92 .max_register = REG_MAX,
93};
94
95/**
96 * meson_spifc_wait_ready() - wait for the current operation to terminate
97 * @spifc: the Meson SPI device
98 * Return: 0 on success, a negative value on error
99 */
100static int meson_spifc_wait_ready(struct meson_spifc *spifc)
101{
102 unsigned long deadline = jiffies + msecs_to_jiffies(5);
103 u32 data;
104
105 do {
106 regmap_read(spifc->regmap, REG_SLAVE, &data);
107 if (data & SLAVE_TRST_DONE)
108 return 0;
109 cond_resched();
110 } while (!time_after(jiffies, deadline));
111
112 return -ETIMEDOUT;
113}
114
115/**
116 * meson_spifc_drain_buffer() - copy data from device buffer to memory
117 * @spifc: the Meson SPI device
118 * @buf: the destination buffer
119 * @len: number of bytes to copy
120 */
121static void meson_spifc_drain_buffer(struct meson_spifc *spifc, u8 *buf,
122 int len)
123{
124 u32 data;
125 int i = 0;
126
127 while (i < len) {
128 regmap_read(spifc->regmap, REG_C0 + i, &data);
129
130 if (len - i >= 4) {
131 *((u32 *)buf) = data;
132 buf += 4;
133 } else {
134 memcpy(buf, &data, len - i);
135 break;
136 }
137 i += 4;
138 }
139}
140
141/**
142 * meson_spifc_fill_buffer() - copy data from memory to device buffer
143 * @spifc: the Meson SPI device
144 * @buf: the source buffer
145 * @len: number of bytes to copy
146 */
147static void meson_spifc_fill_buffer(struct meson_spifc *spifc, const u8 *buf,
148 int len)
149{
150 u32 data;
151 int i = 0;
152
153 while (i < len) {
154 if (len - i >= 4)
155 data = *(u32 *)buf;
156 else
157 memcpy(&data, buf, len - i);
158
159 regmap_write(spifc->regmap, REG_C0 + i, data);
160
161 buf += 4;
162 i += 4;
163 }
164}
165
166/**
167 * meson_spifc_setup_speed() - program the clock divider
168 * @spifc: the Meson SPI device
169 * @speed: desired speed in Hz
170 */
171static void meson_spifc_setup_speed(struct meson_spifc *spifc, u32 speed)
172{
173 unsigned long parent, value;
174 int n;
175
176 parent = clk_get_rate(spifc->clk);
177 n = max_t(int, parent / speed - 1, 1);
178
179 dev_dbg(spifc->dev, "parent %lu, speed %u, n %d\n", parent,
180 speed, n);
181
182 value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
183 value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
184 value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
185 CLOCK_CNT_HIGH_MASK;
186
187 regmap_write(spifc->regmap, REG_CLOCK, value);
188}
189
190/**
191 * meson_spifc_txrx() - transfer a chunk of data
192 * @spifc: the Meson SPI device
193 * @xfer: the current SPI transfer
194 * @offset: offset of the data to transfer
195 * @len: length of the data to transfer
196 * @last_xfer: whether this is the last transfer of the message
197 * @last_chunk: whether this is the last chunk of the transfer
198 * Return: 0 on success, a negative value on error
199 */
200static int meson_spifc_txrx(struct meson_spifc *spifc,
201 struct spi_transfer *xfer,
202 int offset, int len, bool last_xfer,
203 bool last_chunk)
204{
205 bool keep_cs = true;
206 int ret;
207
208 if (xfer->tx_buf)
209 meson_spifc_fill_buffer(spifc, xfer->tx_buf + offset, len);
210
211 /* enable DOUT stage */
212 regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
213 USER_UC_DOUT_SEL);
214 regmap_write(spifc->regmap, REG_USER1,
215 (8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
216
217 /* enable data input during DOUT */
218 regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
219 USER_DIN_EN_MS);
220
221 if (last_chunk) {
222 if (last_xfer)
223 keep_cs = xfer->cs_change;
224 else
225 keep_cs = !xfer->cs_change;
226 }
227
228 regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
229 keep_cs ? USER4_CS_ACT : 0);
230
231 /* clear transition done bit */
232 regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
233 /* start transfer */
234 regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
235
236 ret = meson_spifc_wait_ready(spifc);
237
238 if (!ret && xfer->rx_buf)
239 meson_spifc_drain_buffer(spifc, xfer->rx_buf + offset, len);
240
241 return ret;
242}
243
244/**
245 * meson_spifc_transfer_one() - perform a single transfer
246 * @master: the SPI master
247 * @spi: the SPI device
248 * @xfer: the current SPI transfer
249 * Return: 0 on success, a negative value on error
250 */
251static int meson_spifc_transfer_one(struct spi_master *master,
252 struct spi_device *spi,
253 struct spi_transfer *xfer)
254{
255 struct meson_spifc *spifc = spi_master_get_devdata(master);
256 int len, done = 0, ret = 0;
257
258 meson_spifc_setup_speed(spifc, xfer->speed_hz);
259
260 regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
261
262 while (done < xfer->len && !ret) {
263 len = min_t(int, xfer->len - done, SPIFC_BUFFER_SIZE);
264 ret = meson_spifc_txrx(spifc, xfer, done, len,
265 spi_transfer_is_last(master, xfer),
266 done + len >= xfer->len);
267 done += len;
268 }
269
270 regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
271 CTRL_ENABLE_AHB);
272
273 return ret;
274}
275
276/**
277 * meson_spifc_hw_init() - reset and initialize the SPI controller
278 * @spifc: the Meson SPI device
279 */
280static void meson_spifc_hw_init(struct meson_spifc *spifc)
281{
282 /* reset device */
283 regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
284 SLAVE_SW_RST);
285 /* disable compatible mode */
286 regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
287 /* set master mode */
288 regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
289}
290
291static int meson_spifc_probe(struct platform_device *pdev)
292{
293 struct spi_master *master;
294 struct meson_spifc *spifc;
295 struct resource *res;
296 void __iomem *base;
297 unsigned int rate;
298 int ret = 0;
299
300 master = spi_alloc_master(&pdev->dev, sizeof(struct meson_spifc));
301 if (!master)
302 return -ENOMEM;
303
304 platform_set_drvdata(pdev, master);
305
306 spifc = spi_master_get_devdata(master);
307 spifc->dev = &pdev->dev;
308
309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310 base = devm_ioremap_resource(spifc->dev, res);
311 if (IS_ERR(base)) {
312 ret = PTR_ERR(base);
313 goto out_err;
314 }
315
316 spifc->regmap = devm_regmap_init_mmio(spifc->dev, base,
317 &spifc_regmap_config);
318 if (IS_ERR(spifc->regmap)) {
319 ret = PTR_ERR(spifc->regmap);
320 goto out_err;
321 }
322
323 spifc->clk = devm_clk_get(spifc->dev, NULL);
324 if (IS_ERR(spifc->clk)) {
325 dev_err(spifc->dev, "missing clock\n");
326 ret = PTR_ERR(spifc->clk);
327 goto out_err;
328 }
329
330 ret = clk_prepare_enable(spifc->clk);
331 if (ret) {
332 dev_err(spifc->dev, "can't prepare clock\n");
333 goto out_err;
334 }
335
336 rate = clk_get_rate(spifc->clk);
337
338 master->num_chipselect = 1;
339 master->dev.of_node = pdev->dev.of_node;
340 master->bits_per_word_mask = SPI_BPW_MASK(8);
341 master->auto_runtime_pm = true;
342 master->transfer_one = meson_spifc_transfer_one;
343 master->min_speed_hz = rate >> 6;
344 master->max_speed_hz = rate >> 1;
345
346 meson_spifc_hw_init(spifc);
347
348 pm_runtime_set_active(spifc->dev);
349 pm_runtime_enable(spifc->dev);
350
351 ret = devm_spi_register_master(spifc->dev, master);
352 if (ret) {
353 dev_err(spifc->dev, "failed to register spi master\n");
354 goto out_clk;
355 }
356
357 return 0;
358out_clk:
359 clk_disable_unprepare(spifc->clk);
360out_err:
361 spi_master_put(master);
362 return ret;
363}
364
365static int meson_spifc_remove(struct platform_device *pdev)
366{
367 struct spi_master *master = platform_get_drvdata(pdev);
368 struct meson_spifc *spifc = spi_master_get_devdata(master);
369
370 pm_runtime_get_sync(&pdev->dev);
371 clk_disable_unprepare(spifc->clk);
372 pm_runtime_disable(&pdev->dev);
373
374 return 0;
375}
376
377#ifdef CONFIG_PM_SLEEP
378static int meson_spifc_suspend(struct device *dev)
379{
380 struct spi_master *master = dev_get_drvdata(dev);
381 struct meson_spifc *spifc = spi_master_get_devdata(master);
382 int ret;
383
384 ret = spi_master_suspend(master);
385 if (ret)
386 return ret;
387
388 if (!pm_runtime_suspended(dev))
389 clk_disable_unprepare(spifc->clk);
390
391 return 0;
392}
393
394static int meson_spifc_resume(struct device *dev)
395{
396 struct spi_master *master = dev_get_drvdata(dev);
397 struct meson_spifc *spifc = spi_master_get_devdata(master);
398 int ret;
399
400 if (!pm_runtime_suspended(dev)) {
401 ret = clk_prepare_enable(spifc->clk);
402 if (ret)
403 return ret;
404 }
405
406 meson_spifc_hw_init(spifc);
407
408 ret = spi_master_resume(master);
409 if (ret)
410 clk_disable_unprepare(spifc->clk);
411
412 return ret;
413}
414#endif /* CONFIG_PM_SLEEP */
415
416#ifdef CONFIG_PM_RUNTIME
417static int meson_spifc_runtime_suspend(struct device *dev)
418{
419 struct spi_master *master = dev_get_drvdata(dev);
420 struct meson_spifc *spifc = spi_master_get_devdata(master);
421
422 clk_disable_unprepare(spifc->clk);
423
424 return 0;
425}
426
427static int meson_spifc_runtime_resume(struct device *dev)
428{
429 struct spi_master *master = dev_get_drvdata(dev);
430 struct meson_spifc *spifc = spi_master_get_devdata(master);
431
432 return clk_prepare_enable(spifc->clk);
433}
434#endif /* CONFIG_PM_RUNTIME */
435
436static const struct dev_pm_ops meson_spifc_pm_ops = {
437 SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
438 SET_RUNTIME_PM_OPS(meson_spifc_runtime_suspend,
439 meson_spifc_runtime_resume,
440 NULL)
441};
442
443static const struct of_device_id meson_spifc_dt_match[] = {
444 { .compatible = "amlogic,meson6-spifc", },
445 { },
446};
447
448static struct platform_driver meson_spifc_driver = {
449 .probe = meson_spifc_probe,
450 .remove = meson_spifc_remove,
451 .driver = {
452 .name = "meson-spifc",
453 .of_match_table = of_match_ptr(meson_spifc_dt_match),
454 .pm = &meson_spifc_pm_ops,
455 },
456};
457
458module_platform_driver(meson_spifc_driver);
459
460MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
461MODULE_DESCRIPTION("Amlogic Meson SPIFC driver");
462MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 51460878af04..06a11546a1a7 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -182,7 +182,6 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
182 int min, ret; 182 int min, ret;
183 u32 ctrl0; 183 u32 ctrl0;
184 struct page *vm_page; 184 struct page *vm_page;
185 void *sg_buf;
186 struct { 185 struct {
187 u32 pio[4]; 186 u32 pio[4];
188 struct scatterlist sg; 187 struct scatterlist sg;
@@ -232,13 +231,14 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
232 ret = -ENOMEM; 231 ret = -ENOMEM;
233 goto err_vmalloc; 232 goto err_vmalloc;
234 } 233 }
235 sg_buf = page_address(vm_page) + 234
236 ((size_t)buf & ~PAGE_MASK); 235 sg_init_table(&dma_xfer[sg_count].sg, 1);
236 sg_set_page(&dma_xfer[sg_count].sg, vm_page,
237 min, offset_in_page(buf));
237 } else { 238 } else {
238 sg_buf = buf; 239 sg_init_one(&dma_xfer[sg_count].sg, buf, min);
239 } 240 }
240 241
241 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
242 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 242 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
243 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 243 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
244 244
@@ -511,7 +511,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
511 init_completion(&spi->c); 511 init_completion(&spi->c);
512 512
513 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 513 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
514 DRIVER_NAME, ssp); 514 dev_name(&pdev->dev), ssp);
515 if (ret) 515 if (ret)
516 goto out_master_free; 516 goto out_master_free;
517 517
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index fc2dd8441608..89ca162801da 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2377,7 +2377,7 @@ static int pl022_runtime_resume(struct device *dev)
2377 2377
2378static const struct dev_pm_ops pl022_dev_pm_ops = { 2378static const struct dev_pm_ops pl022_dev_pm_ops = {
2379 SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume) 2379 SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
2380 SET_PM_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL) 2380 SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
2381}; 2381};
2382 2382
2383static struct vendor_data vendor_arm = { 2383static struct vendor_data vendor_arm = {
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 6beee8ce2d68..fa7399e84bbb 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -19,6 +19,7 @@ enum {
19 PORT_BSW0, 19 PORT_BSW0,
20 PORT_BSW1, 20 PORT_BSW1,
21 PORT_BSW2, 21 PORT_BSW2,
22 PORT_QUARK_X1000,
22}; 23};
23 24
24struct pxa_spi_info { 25struct pxa_spi_info {
@@ -92,6 +93,12 @@ static struct pxa_spi_info spi_info_configs[] = {
92 .tx_param = &bsw2_tx_param, 93 .tx_param = &bsw2_tx_param,
93 .rx_param = &bsw2_rx_param, 94 .rx_param = &bsw2_rx_param,
94 }, 95 },
96 [PORT_QUARK_X1000] = {
97 .type = QUARK_X1000_SSP,
98 .port_id = -1,
99 .num_chipselect = 1,
100 .max_clk_rate = 50000000,
101 },
95}; 102};
96 103
97static int pxa2xx_spi_pci_probe(struct pci_dev *dev, 104static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
@@ -191,6 +198,7 @@ static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
191 198
192static const struct pci_device_id pxa2xx_spi_pci_devices[] = { 199static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
193 { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, 200 { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
201 { PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
194 { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT }, 202 { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
195 { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 }, 203 { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
196 { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, 204 { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 9e9e0f971e6c..1a1df5092aca 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -63,10 +63,64 @@ MODULE_ALIAS("platform:pxa2xx-spi");
63 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ 63 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
64 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 64 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
65 65
66#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \
67 | QUARK_X1000_SSCR1_EFWR \
68 | QUARK_X1000_SSCR1_RFT \
69 | QUARK_X1000_SSCR1_TFT \
70 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
71
66#define LPSS_RX_THRESH_DFLT 64 72#define LPSS_RX_THRESH_DFLT 64
67#define LPSS_TX_LOTHRESH_DFLT 160 73#define LPSS_TX_LOTHRESH_DFLT 160
68#define LPSS_TX_HITHRESH_DFLT 224 74#define LPSS_TX_HITHRESH_DFLT 224
69 75
76struct quark_spi_rate {
77 u32 bitrate;
78 u32 dds_clk_rate;
79 u32 clk_div;
80};
81
82/*
83 * 'rate', 'dds', 'clk_div' lookup table, which is defined in
84 * the Quark SPI datasheet.
85 */
86static const struct quark_spi_rate quark_spi_rate_table[] = {
87/* bitrate, dds_clk_rate, clk_div */
88 {50000000, 0x800000, 0},
89 {40000000, 0x666666, 0},
90 {25000000, 0x400000, 0},
91 {20000000, 0x666666, 1},
92 {16667000, 0x800000, 2},
93 {13333000, 0x666666, 2},
94 {12500000, 0x200000, 0},
95 {10000000, 0x800000, 4},
96 {8000000, 0x666666, 4},
97 {6250000, 0x400000, 3},
98 {5000000, 0x400000, 4},
99 {4000000, 0x666666, 9},
100 {3125000, 0x80000, 0},
101 {2500000, 0x400000, 9},
102 {2000000, 0x666666, 19},
103 {1563000, 0x40000, 0},
104 {1250000, 0x200000, 9},
105 {1000000, 0x400000, 24},
106 {800000, 0x666666, 49},
107 {781250, 0x20000, 0},
108 {625000, 0x200000, 19},
109 {500000, 0x400000, 49},
110 {400000, 0x666666, 99},
111 {390625, 0x10000, 0},
112 {250000, 0x400000, 99},
113 {200000, 0x666666, 199},
114 {195313, 0x8000, 0},
115 {125000, 0x100000, 49},
116 {100000, 0x200000, 124},
117 {50000, 0x100000, 124},
118 {25000, 0x80000, 124},
119 {10016, 0x20000, 77},
120 {5040, 0x20000, 154},
121 {1002, 0x8000, 194},
122};
123
70/* Offset from drv_data->lpss_base */ 124/* Offset from drv_data->lpss_base */
71#define GENERAL_REG 0x08 125#define GENERAL_REG 0x08
72#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) 126#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
@@ -80,6 +134,96 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
80 return drv_data->ssp_type == LPSS_SSP; 134 return drv_data->ssp_type == LPSS_SSP;
81} 135}
82 136
137static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
138{
139 return drv_data->ssp_type == QUARK_X1000_SSP;
140}
141
142static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
143{
144 switch (drv_data->ssp_type) {
145 case QUARK_X1000_SSP:
146 return QUARK_X1000_SSCR1_CHANGE_MASK;
147 default:
148 return SSCR1_CHANGE_MASK;
149 }
150}
151
152static u32
153pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
154{
155 switch (drv_data->ssp_type) {
156 case QUARK_X1000_SSP:
157 return RX_THRESH_QUARK_X1000_DFLT;
158 default:
159 return RX_THRESH_DFLT;
160 }
161}
162
163static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
164{
165 void __iomem *reg = drv_data->ioaddr;
166 u32 mask;
167
168 switch (drv_data->ssp_type) {
169 case QUARK_X1000_SSP:
170 mask = QUARK_X1000_SSSR_TFL_MASK;
171 break;
172 default:
173 mask = SSSR_TFL_MASK;
174 break;
175 }
176
177 return (read_SSSR(reg) & mask) == mask;
178}
179
180static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
181 u32 *sccr1_reg)
182{
183 u32 mask;
184
185 switch (drv_data->ssp_type) {
186 case QUARK_X1000_SSP:
187 mask = QUARK_X1000_SSCR1_RFT;
188 break;
189 default:
190 mask = SSCR1_RFT;
191 break;
192 }
193 *sccr1_reg &= ~mask;
194}
195
196static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
197 u32 *sccr1_reg, u32 threshold)
198{
199 switch (drv_data->ssp_type) {
200 case QUARK_X1000_SSP:
201 *sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
202 break;
203 default:
204 *sccr1_reg |= SSCR1_RxTresh(threshold);
205 break;
206 }
207}
208
209static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
210 u32 clk_div, u8 bits)
211{
212 switch (drv_data->ssp_type) {
213 case QUARK_X1000_SSP:
214 return clk_div
215 | QUARK_X1000_SSCR0_Motorola
216 | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
217 | SSCR0_SSE;
218 default:
219 return clk_div
220 | SSCR0_Motorola
221 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
222 | SSCR0_SSE
223 | (bits > 16 ? SSCR0_EDSS : 0);
224 }
225}
226
83/* 227/*
84 * Read and write LPSS SSP private registers. Caller must first check that 228 * Read and write LPSS SSP private registers. Caller must first check that
85 * is_lpss_ssp() returns true before these can be called. 229 * is_lpss_ssp() returns true before these can be called.
@@ -234,7 +378,7 @@ static int null_writer(struct driver_data *drv_data)
234 void __iomem *reg = drv_data->ioaddr; 378 void __iomem *reg = drv_data->ioaddr;
235 u8 n_bytes = drv_data->n_bytes; 379 u8 n_bytes = drv_data->n_bytes;
236 380
237 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 381 if (pxa2xx_spi_txfifo_full(drv_data)
238 || (drv_data->tx == drv_data->tx_end)) 382 || (drv_data->tx == drv_data->tx_end))
239 return 0; 383 return 0;
240 384
@@ -262,7 +406,7 @@ static int u8_writer(struct driver_data *drv_data)
262{ 406{
263 void __iomem *reg = drv_data->ioaddr; 407 void __iomem *reg = drv_data->ioaddr;
264 408
265 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 409 if (pxa2xx_spi_txfifo_full(drv_data)
266 || (drv_data->tx == drv_data->tx_end)) 410 || (drv_data->tx == drv_data->tx_end))
267 return 0; 411 return 0;
268 412
@@ -289,7 +433,7 @@ static int u16_writer(struct driver_data *drv_data)
289{ 433{
290 void __iomem *reg = drv_data->ioaddr; 434 void __iomem *reg = drv_data->ioaddr;
291 435
292 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 436 if (pxa2xx_spi_txfifo_full(drv_data)
293 || (drv_data->tx == drv_data->tx_end)) 437 || (drv_data->tx == drv_data->tx_end))
294 return 0; 438 return 0;
295 439
@@ -316,7 +460,7 @@ static int u32_writer(struct driver_data *drv_data)
316{ 460{
317 void __iomem *reg = drv_data->ioaddr; 461 void __iomem *reg = drv_data->ioaddr;
318 462
319 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 463 if (pxa2xx_spi_txfifo_full(drv_data)
320 || (drv_data->tx == drv_data->tx_end)) 464 || (drv_data->tx == drv_data->tx_end))
321 return 0; 465 return 0;
322 466
@@ -508,8 +652,9 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
508 * remaining RX bytes. 652 * remaining RX bytes.
509 */ 653 */
510 if (pxa25x_ssp_comp(drv_data)) { 654 if (pxa25x_ssp_comp(drv_data)) {
655 u32 rx_thre;
511 656
512 sccr1_reg &= ~SSCR1_RFT; 657 pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
513 658
514 bytes_left = drv_data->rx_end - drv_data->rx; 659 bytes_left = drv_data->rx_end - drv_data->rx;
515 switch (drv_data->n_bytes) { 660 switch (drv_data->n_bytes) {
@@ -519,10 +664,11 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
519 bytes_left >>= 1; 664 bytes_left >>= 1;
520 } 665 }
521 666
522 if (bytes_left > RX_THRESH_DFLT) 667 rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
523 bytes_left = RX_THRESH_DFLT; 668 if (rx_thre > bytes_left)
669 rx_thre = bytes_left;
524 670
525 sccr1_reg |= SSCR1_RxTresh(bytes_left); 671 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
526 } 672 }
527 write_SSCR1(sccr1_reg, reg); 673 write_SSCR1(sccr1_reg, reg);
528 } 674 }
@@ -585,6 +731,28 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
585 return drv_data->transfer_handler(drv_data); 731 return drv_data->transfer_handler(drv_data);
586} 732}
587 733
734/*
735 * The Quark SPI data sheet gives a table, and for the given 'rate',
736 * the 'dds' and 'clk_div' can be found in the table.
737 */
738static u32 quark_x1000_set_clk_regvals(u32 rate, u32 *dds, u32 *clk_div)
739{
740 unsigned int i;
741
742 for (i = 0; i < ARRAY_SIZE(quark_spi_rate_table); i++) {
743 if (rate >= quark_spi_rate_table[i].bitrate) {
744 *dds = quark_spi_rate_table[i].dds_clk_rate;
745 *clk_div = quark_spi_rate_table[i].clk_div;
746 return quark_spi_rate_table[i].bitrate;
747 }
748 }
749
750 *dds = quark_spi_rate_table[i-1].dds_clk_rate;
751 *clk_div = quark_spi_rate_table[i-1].clk_div;
752
753 return quark_spi_rate_table[i-1].bitrate;
754}
755
588static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 756static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
589{ 757{
590 unsigned long ssp_clk = drv_data->max_clk_rate; 758 unsigned long ssp_clk = drv_data->max_clk_rate;
@@ -598,6 +766,20 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
598 return ((ssp_clk / rate - 1) & 0xfff) << 8; 766 return ((ssp_clk / rate - 1) & 0xfff) << 8;
599} 767}
600 768
769static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
770 struct chip_data *chip, int rate)
771{
772 u32 clk_div;
773
774 switch (drv_data->ssp_type) {
775 case QUARK_X1000_SSP:
776 quark_x1000_set_clk_regvals(rate, &chip->dds_rate, &clk_div);
777 return clk_div << 8;
778 default:
779 return ssp_get_clk_div(drv_data, rate);
780 }
781}
782
601static void pump_transfers(unsigned long data) 783static void pump_transfers(unsigned long data)
602{ 784{
603 struct driver_data *drv_data = (struct driver_data *)data; 785 struct driver_data *drv_data = (struct driver_data *)data;
@@ -613,6 +795,7 @@ static void pump_transfers(unsigned long data)
613 u32 cr1; 795 u32 cr1;
614 u32 dma_thresh = drv_data->cur_chip->dma_threshold; 796 u32 dma_thresh = drv_data->cur_chip->dma_threshold;
615 u32 dma_burst = drv_data->cur_chip->dma_burst_size; 797 u32 dma_burst = drv_data->cur_chip->dma_burst_size;
798 u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
616 799
617 /* Get current state information */ 800 /* Get current state information */
618 message = drv_data->cur_msg; 801 message = drv_data->cur_msg;
@@ -699,7 +882,7 @@ static void pump_transfers(unsigned long data)
699 if (transfer->bits_per_word) 882 if (transfer->bits_per_word)
700 bits = transfer->bits_per_word; 883 bits = transfer->bits_per_word;
701 884
702 clk_div = ssp_get_clk_div(drv_data, speed); 885 clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
703 886
704 if (bits <= 8) { 887 if (bits <= 8) {
705 drv_data->n_bytes = 1; 888 drv_data->n_bytes = 1;
@@ -731,11 +914,7 @@ static void pump_transfers(unsigned long data)
731 "pump_transfers: DMA burst size reduced to match bits_per_word\n"); 914 "pump_transfers: DMA burst size reduced to match bits_per_word\n");
732 } 915 }
733 916
734 cr0 = clk_div 917 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
735 | SSCR0_Motorola
736 | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
737 | SSCR0_SSE
738 | (bits > 16 ? SSCR0_EDSS : 0);
739 } 918 }
740 919
741 message->state = RUNNING_STATE; 920 message->state = RUNNING_STATE;
@@ -771,17 +950,20 @@ static void pump_transfers(unsigned long data)
771 write_SSITF(chip->lpss_tx_threshold, reg); 950 write_SSITF(chip->lpss_tx_threshold, reg);
772 } 951 }
773 952
953 if (is_quark_x1000_ssp(drv_data) &&
954 (read_DDS_RATE(reg) != chip->dds_rate))
955 write_DDS_RATE(chip->dds_rate, reg);
956
774 /* see if we need to reload the config registers */ 957 /* see if we need to reload the config registers */
775 if ((read_SSCR0(reg) != cr0) 958 if ((read_SSCR0(reg) != cr0) ||
776 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != 959 (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) {
777 (cr1 & SSCR1_CHANGE_MASK)) {
778 960
779 /* stop the SSP, and update the other bits */ 961 /* stop the SSP, and update the other bits */
780 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 962 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
781 if (!pxa25x_ssp_comp(drv_data)) 963 if (!pxa25x_ssp_comp(drv_data))
782 write_SSTO(chip->timeout, reg); 964 write_SSTO(chip->timeout, reg);
783 /* first set CR1 without interrupt and service enables */ 965 /* first set CR1 without interrupt and service enables */
784 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); 966 write_SSCR1(cr1 & change_mask, reg);
785 /* restart the SSP */ 967 /* restart the SSP */
786 write_SSCR0(cr0, reg); 968 write_SSCR0(cr0, reg);
787 969
@@ -875,14 +1057,22 @@ static int setup(struct spi_device *spi)
875 unsigned int clk_div; 1057 unsigned int clk_div;
876 uint tx_thres, tx_hi_thres, rx_thres; 1058 uint tx_thres, tx_hi_thres, rx_thres;
877 1059
878 if (is_lpss_ssp(drv_data)) { 1060 switch (drv_data->ssp_type) {
1061 case QUARK_X1000_SSP:
1062 tx_thres = TX_THRESH_QUARK_X1000_DFLT;
1063 tx_hi_thres = 0;
1064 rx_thres = RX_THRESH_QUARK_X1000_DFLT;
1065 break;
1066 case LPSS_SSP:
879 tx_thres = LPSS_TX_LOTHRESH_DFLT; 1067 tx_thres = LPSS_TX_LOTHRESH_DFLT;
880 tx_hi_thres = LPSS_TX_HITHRESH_DFLT; 1068 tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
881 rx_thres = LPSS_RX_THRESH_DFLT; 1069 rx_thres = LPSS_RX_THRESH_DFLT;
882 } else { 1070 break;
1071 default:
883 tx_thres = TX_THRESH_DFLT; 1072 tx_thres = TX_THRESH_DFLT;
884 tx_hi_thres = 0; 1073 tx_hi_thres = 0;
885 rx_thres = RX_THRESH_DFLT; 1074 rx_thres = RX_THRESH_DFLT;
1075 break;
886 } 1076 }
887 1077
888 /* Only alloc on first setup */ 1078 /* Only alloc on first setup */
@@ -935,9 +1125,6 @@ static int setup(struct spi_device *spi)
935 chip->enable_dma = drv_data->master_info->enable_dma; 1125 chip->enable_dma = drv_data->master_info->enable_dma;
936 } 1126 }
937 1127
938 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
939 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
940
941 chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres); 1128 chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
942 chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) 1129 chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
943 | SSITF_TxHiThresh(tx_hi_thres); 1130 | SSITF_TxHiThresh(tx_hi_thres);
@@ -956,15 +1143,24 @@ static int setup(struct spi_device *spi)
956 } 1143 }
957 } 1144 }
958 1145
959 clk_div = ssp_get_clk_div(drv_data, spi->max_speed_hz); 1146 clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
960 chip->speed_hz = spi->max_speed_hz; 1147 chip->speed_hz = spi->max_speed_hz;
961 1148
962 chip->cr0 = clk_div 1149 chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
963 | SSCR0_Motorola 1150 spi->bits_per_word);
964 | SSCR0_DataSize(spi->bits_per_word > 16 ? 1151 switch (drv_data->ssp_type) {
965 spi->bits_per_word - 16 : spi->bits_per_word) 1152 case QUARK_X1000_SSP:
966 | SSCR0_SSE 1153 chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
967 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); 1154 & QUARK_X1000_SSCR1_RFT)
1155 | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
1156 & QUARK_X1000_SSCR1_TFT);
1157 break;
1158 default:
1159 chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
1160 (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
1161 break;
1162 }
1163
968 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); 1164 chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
969 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) 1165 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
970 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 1166 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
@@ -993,7 +1189,8 @@ static int setup(struct spi_device *spi)
993 chip->read = u16_reader; 1189 chip->read = u16_reader;
994 chip->write = u16_writer; 1190 chip->write = u16_writer;
995 } else if (spi->bits_per_word <= 32) { 1191 } else if (spi->bits_per_word <= 32) {
996 chip->cr0 |= SSCR0_EDSS; 1192 if (!is_quark_x1000_ssp(drv_data))
1193 chip->cr0 |= SSCR0_EDSS;
997 chip->n_bytes = 4; 1194 chip->n_bytes = 4;
998 chip->read = u32_reader; 1195 chip->read = u32_reader;
999 chip->write = u32_writer; 1196 chip->write = u32_writer;
@@ -1144,7 +1341,15 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1144 drv_data->ioaddr = ssp->mmio_base; 1341 drv_data->ioaddr = ssp->mmio_base;
1145 drv_data->ssdr_physical = ssp->phys_base + SSDR; 1342 drv_data->ssdr_physical = ssp->phys_base + SSDR;
1146 if (pxa25x_ssp_comp(drv_data)) { 1343 if (pxa25x_ssp_comp(drv_data)) {
1147 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1344 switch (drv_data->ssp_type) {
1345 case QUARK_X1000_SSP:
1346 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1347 break;
1348 default:
1349 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1350 break;
1351 }
1352
1148 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 1353 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1149 drv_data->dma_cr1 = 0; 1354 drv_data->dma_cr1 = 0;
1150 drv_data->clear_sr = SSSR_ROR; 1355 drv_data->clear_sr = SSSR_ROR;
@@ -1182,16 +1387,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1182 1387
1183 /* Load default SSP configuration */ 1388 /* Load default SSP configuration */
1184 write_SSCR0(0, drv_data->ioaddr); 1389 write_SSCR0(0, drv_data->ioaddr);
1185 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1390 switch (drv_data->ssp_type) {
1186 SSCR1_TxTresh(TX_THRESH_DFLT), 1391 case QUARK_X1000_SSP:
1187 drv_data->ioaddr); 1392 write_SSCR1(QUARK_X1000_SSCR1_RxTresh(
1188 write_SSCR0(SSCR0_SCR(2) 1393 RX_THRESH_QUARK_X1000_DFLT) |
1189 | SSCR0_Motorola 1394 QUARK_X1000_SSCR1_TxTresh(
1190 | SSCR0_DataSize(8), 1395 TX_THRESH_QUARK_X1000_DFLT),
1191 drv_data->ioaddr); 1396 drv_data->ioaddr);
1397
1398 /* using the Motorola SPI protocol and use 8 bit frame */
1399 write_SSCR0(QUARK_X1000_SSCR0_Motorola
1400 | QUARK_X1000_SSCR0_DataSize(8),
1401 drv_data->ioaddr);
1402 break;
1403 default:
1404 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
1405 SSCR1_TxTresh(TX_THRESH_DFLT),
1406 drv_data->ioaddr);
1407 write_SSCR0(SSCR0_SCR(2)
1408 | SSCR0_Motorola
1409 | SSCR0_DataSize(8),
1410 drv_data->ioaddr);
1411 break;
1412 }
1413
1192 if (!pxa25x_ssp_comp(drv_data)) 1414 if (!pxa25x_ssp_comp(drv_data))
1193 write_SSTO(0, drv_data->ioaddr); 1415 write_SSTO(0, drv_data->ioaddr);
1194 write_SSPSP(0, drv_data->ioaddr); 1416
1417 if (!is_quark_x1000_ssp(drv_data))
1418 write_SSPSP(0, drv_data->ioaddr);
1195 1419
1196 lpss_ssp_setup(drv_data); 1420 lpss_ssp_setup(drv_data);
1197 1421
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 5adc2a11c7bc..6bec59c90cd4 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -93,6 +93,7 @@ struct driver_data {
93struct chip_data { 93struct chip_data {
94 u32 cr0; 94 u32 cr0;
95 u32 cr1; 95 u32 cr1;
96 u32 dds_rate;
96 u32 psp; 97 u32 psp;
97 u32 timeout; 98 u32 timeout;
98 u8 n_bytes; 99 u8 n_bytes;
@@ -126,6 +127,7 @@ DEFINE_SSP_REG(SSCR1, 0x04)
126DEFINE_SSP_REG(SSSR, 0x08) 127DEFINE_SSP_REG(SSSR, 0x08)
127DEFINE_SSP_REG(SSITR, 0x0c) 128DEFINE_SSP_REG(SSITR, 0x0c)
128DEFINE_SSP_REG(SSDR, 0x10) 129DEFINE_SSP_REG(SSDR, 0x10)
130DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
129DEFINE_SSP_REG(SSTO, 0x28) 131DEFINE_SSP_REG(SSTO, 0x28)
130DEFINE_SSP_REG(SSPSP, 0x2c) 132DEFINE_SSP_REG(SSPSP, 0x2c)
131DEFINE_SSP_REG(SSITF, SSITF) 133DEFINE_SSP_REG(SSITF, SSITF)
@@ -141,18 +143,22 @@ DEFINE_SSP_REG(SSIRF, SSIRF)
141 143
142static inline int pxa25x_ssp_comp(struct driver_data *drv_data) 144static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
143{ 145{
144 if (drv_data->ssp_type == PXA25x_SSP) 146 switch (drv_data->ssp_type) {
147 case PXA25x_SSP:
148 case CE4100_SSP:
149 case QUARK_X1000_SSP:
145 return 1; 150 return 1;
146 if (drv_data->ssp_type == CE4100_SSP) 151 default:
147 return 1; 152 return 0;
148 return 0; 153 }
149} 154}
150 155
151static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) 156static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
152{ 157{
153 void __iomem *reg = drv_data->ioaddr; 158 void __iomem *reg = drv_data->ioaddr;
154 159
155 if (drv_data->ssp_type == CE4100_SSP) 160 if (drv_data->ssp_type == CE4100_SSP ||
161 drv_data->ssp_type == QUARK_X1000_SSP)
156 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 162 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
157 163
158 write_SSSR(val, reg); 164 write_SSSR(val, reg);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 87bc16f491f0..80637e9406e2 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -749,8 +749,6 @@ static int rockchip_spi_remove(struct platform_device *pdev)
749 if (rs->dma_rx.ch) 749 if (rs->dma_rx.ch)
750 dma_release_channel(rs->dma_rx.ch); 750 dma_release_channel(rs->dma_rx.ch);
751 751
752 spi_master_put(master);
753
754 return 0; 752 return 0;
755} 753}
756 754
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 480133ee1eb3..0f602cba1989 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -33,8 +33,9 @@
33 33
34#include <linux/platform_data/spi-s3c64xx.h> 34#include <linux/platform_data/spi-s3c64xx.h>
35 35
36#define MAX_SPI_PORTS 3 36#define MAX_SPI_PORTS 6
37#define S3C64XX_SPI_QUIRK_POLL (1 << 0) 37#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
38#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
38 39
39/* Registers and bit-fields */ 40/* Registers and bit-fields */
40 41
@@ -78,6 +79,7 @@
78 79
79#define S3C64XX_SPI_SLAVE_AUTO (1<<1) 80#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
80#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) 81#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
82#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
81 83
82#define S3C64XX_SPI_INT_TRAILING_EN (1<<6) 84#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
83#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) 85#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
@@ -344,16 +346,8 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
344 spi->dma_tx = sdd->tx_dma.ch; 346 spi->dma_tx = sdd->tx_dma.ch;
345 } 347 }
346 348
347 ret = pm_runtime_get_sync(&sdd->pdev->dev);
348 if (ret < 0) {
349 dev_err(dev, "Failed to enable device: %d\n", ret);
350 goto out_tx;
351 }
352
353 return 0; 349 return 0;
354 350
355out_tx:
356 dma_release_channel(sdd->tx_dma.ch);
357out_rx: 351out_rx:
358 dma_release_channel(sdd->rx_dma.ch); 352 dma_release_channel(sdd->rx_dma.ch);
359out: 353out:
@@ -370,7 +364,6 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
370 dma_release_channel(sdd->tx_dma.ch); 364 dma_release_channel(sdd->tx_dma.ch);
371 } 365 }
372 366
373 pm_runtime_put(&sdd->pdev->dev);
374 return 0; 367 return 0;
375} 368}
376 369
@@ -717,7 +710,12 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
717 enable_datapath(sdd, spi, xfer, use_dma); 710 enable_datapath(sdd, spi, xfer, use_dma);
718 711
719 /* Start the signals */ 712 /* Start the signals */
720 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 713 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
714 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
715 else
716 writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
717 | S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2,
718 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
721 719
722 spin_unlock_irqrestore(&sdd->lock, flags); 720 spin_unlock_irqrestore(&sdd->lock, flags);
723 721
@@ -866,13 +864,15 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
866 } 864 }
867 865
868 pm_runtime_put(&sdd->pdev->dev); 866 pm_runtime_put(&sdd->pdev->dev);
869 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 867 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
868 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
870 return 0; 869 return 0;
871 870
872setup_exit: 871setup_exit:
873 pm_runtime_put(&sdd->pdev->dev); 872 pm_runtime_put(&sdd->pdev->dev);
874 /* setup() returns with device de-selected */ 873 /* setup() returns with device de-selected */
875 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 874 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
875 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
876 876
877 if (gpio_is_valid(spi->cs_gpio)) 877 if (gpio_is_valid(spi->cs_gpio))
878 gpio_free(spi->cs_gpio); 878 gpio_free(spi->cs_gpio);
@@ -946,7 +946,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
946 946
947 sdd->cur_speed = 0; 947 sdd->cur_speed = 0;
948 948
949 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 949 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
950 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
950 951
951 /* Disable Interrupts - we use Polling if not DMA mode */ 952 /* Disable Interrupts - we use Polling if not DMA mode */
952 writel(0, regs + S3C64XX_SPI_INT_EN); 953 writel(0, regs + S3C64XX_SPI_INT_EN);
@@ -1341,6 +1342,15 @@ static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
1341 .quirks = S3C64XX_SPI_QUIRK_POLL, 1342 .quirks = S3C64XX_SPI_QUIRK_POLL,
1342}; 1343};
1343 1344
1345static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1346 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1347 .rx_lvl_offset = 15,
1348 .tx_st_done = 25,
1349 .high_speed = true,
1350 .clk_from_cmu = true,
1351 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1352};
1353
1344static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1354static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1345 { 1355 {
1346 .name = "s3c2443-spi", 1356 .name = "s3c2443-spi",
@@ -1374,6 +1384,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
1374 { .compatible = "samsung,exynos5440-spi", 1384 { .compatible = "samsung,exynos5440-spi",
1375 .data = (void *)&exynos5440_spi_port_config, 1385 .data = (void *)&exynos5440_spi_port_config,
1376 }, 1386 },
1387 { .compatible = "samsung,exynos7-spi",
1388 .data = (void *)&exynos7_spi_port_config,
1389 },
1377 { }, 1390 { },
1378}; 1391};
1379MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match); 1392MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 39e2c0a55a28..73a88b33f810 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -23,6 +23,7 @@
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/dma-direction.h> 24#include <linux/dma-direction.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/reset.h>
26 27
27#define DRIVER_NAME "sirfsoc_spi" 28#define DRIVER_NAME "sirfsoc_spi"
28 29
@@ -134,6 +135,7 @@
134 ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE)) 135 ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
135 136
136#define SIRFSOC_MAX_CMD_BYTES 4 137#define SIRFSOC_MAX_CMD_BYTES 4
138#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
137 139
138struct sirfsoc_spi { 140struct sirfsoc_spi {
139 struct spi_bitbang bitbang; 141 struct spi_bitbang bitbang;
@@ -562,9 +564,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
562 564
563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); 565 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 566 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
565 sspi->word_width; 567 (sspi->word_width >> 1);
566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 568 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
567 sspi->word_width; 569 (sspi->word_width >> 1);
568 570
569 if (!(spi->mode & SPI_CS_HIGH)) 571 if (!(spi->mode & SPI_CS_HIGH))
570 regval |= SIRFSOC_SPI_CS_IDLE_STAT; 572 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
@@ -629,9 +631,6 @@ static int spi_sirfsoc_setup(struct spi_device *spi)
629{ 631{
630 struct sirfsoc_spi *sspi; 632 struct sirfsoc_spi *sspi;
631 633
632 if (!spi->max_speed_hz)
633 return -EINVAL;
634
635 sspi = spi_master_get_devdata(spi->master); 634 sspi = spi_master_get_devdata(spi->master);
636 635
637 if (spi->cs_gpio == -ENOENT) 636 if (spi->cs_gpio == -ENOENT)
@@ -649,6 +648,12 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
649 int irq; 648 int irq;
650 int i, ret; 649 int i, ret;
651 650
651 ret = device_reset(&pdev->dev);
652 if (ret) {
653 dev_err(&pdev->dev, "SPI reset failed!\n");
654 return ret;
655 }
656
652 master = spi_alloc_master(&pdev->dev, sizeof(*sspi)); 657 master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
653 if (!master) { 658 if (!master) {
654 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 659 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -683,6 +688,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
683 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; 688 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
684 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | 689 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
685 SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 690 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
691 master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
686 sspi->bitbang.master->dev.of_node = pdev->dev.of_node; 692 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
687 693
688 /* request DMA channels */ 694 /* request DMA channels */
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index 2501a8373e89..f2ab827c81bb 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -402,8 +402,7 @@ exit_busy:
402exit: 402exit:
403 if (c->workqueue) 403 if (c->workqueue)
404 destroy_workqueue(c->workqueue); 404 destroy_workqueue(c->workqueue);
405 if (c->clk) 405 clk_disable(c->clk);
406 clk_disable(c->clk);
407 spi_master_put(master); 406 spi_master_put(master);
408 return ret; 407 return ret;
409} 408}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index cf4ed29c7ee6..66a70e9bc743 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
615 sg_free_table(sgt); 615 sg_free_table(sgt);
616 return -ENOMEM; 616 return -ENOMEM;
617 } 617 }
618 sg_buf = page_address(vm_page) + 618 sg_set_page(&sgt->sgl[i], vm_page,
619 ((size_t)buf & ~PAGE_MASK); 619 min, offset_in_page(buf));
620 } else { 620 } else {
621 sg_buf = buf; 621 sg_buf = buf;
622 sg_set_buf(&sgt->sgl[i], sg_buf, min);
622 } 623 }
623 624
624 sg_set_buf(&sgt->sgl[i], sg_buf, min);
625 625
626 buf += min; 626 buf += min;
627 len -= min; 627 len -= min;
@@ -1001,7 +1001,7 @@ static int spi_init_queue(struct spi_master *master)
1001 dev_name(&master->dev)); 1001 dev_name(&master->dev));
1002 if (IS_ERR(master->kworker_task)) { 1002 if (IS_ERR(master->kworker_task)) {
1003 dev_err(&master->dev, "failed to create message pump task\n"); 1003 dev_err(&master->dev, "failed to create message pump task\n");
1004 return -ENOMEM; 1004 return PTR_ERR(master->kworker_task);
1005 } 1005 }
1006 init_kthread_work(&master->pump_messages, spi_pump_messages); 1006 init_kthread_work(&master->pump_messages, spi_pump_messages);
1007 1007
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index e50039fb1474..6941e04afb8c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -87,6 +87,7 @@ struct spidev_data {
87 unsigned users; 87 unsigned users;
88 u8 *tx_buffer; 88 u8 *tx_buffer;
89 u8 *rx_buffer; 89 u8 *rx_buffer;
90 u32 speed_hz;
90}; 91};
91 92
92static LIST_HEAD(device_list); 93static LIST_HEAD(device_list);
@@ -138,6 +139,7 @@ spidev_sync_write(struct spidev_data *spidev, size_t len)
138 struct spi_transfer t = { 139 struct spi_transfer t = {
139 .tx_buf = spidev->tx_buffer, 140 .tx_buf = spidev->tx_buffer,
140 .len = len, 141 .len = len,
142 .speed_hz = spidev->speed_hz,
141 }; 143 };
142 struct spi_message m; 144 struct spi_message m;
143 145
@@ -152,6 +154,7 @@ spidev_sync_read(struct spidev_data *spidev, size_t len)
152 struct spi_transfer t = { 154 struct spi_transfer t = {
153 .rx_buf = spidev->rx_buffer, 155 .rx_buf = spidev->rx_buffer,
154 .len = len, 156 .len = len,
157 .speed_hz = spidev->speed_hz,
155 }; 158 };
156 struct spi_message m; 159 struct spi_message m;
157 160
@@ -274,6 +277,8 @@ static int spidev_message(struct spidev_data *spidev,
274 k_tmp->bits_per_word = u_tmp->bits_per_word; 277 k_tmp->bits_per_word = u_tmp->bits_per_word;
275 k_tmp->delay_usecs = u_tmp->delay_usecs; 278 k_tmp->delay_usecs = u_tmp->delay_usecs;
276 k_tmp->speed_hz = u_tmp->speed_hz; 279 k_tmp->speed_hz = u_tmp->speed_hz;
280 if (!k_tmp->speed_hz)
281 k_tmp->speed_hz = spidev->speed_hz;
277#ifdef VERBOSE 282#ifdef VERBOSE
278 dev_dbg(&spidev->spi->dev, 283 dev_dbg(&spidev->spi->dev,
279 " xfer len %zd %s%s%s%dbits %u usec %uHz\n", 284 " xfer len %zd %s%s%s%dbits %u usec %uHz\n",
@@ -377,7 +382,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
377 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); 382 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
378 break; 383 break;
379 case SPI_IOC_RD_MAX_SPEED_HZ: 384 case SPI_IOC_RD_MAX_SPEED_HZ:
380 retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg); 385 retval = __put_user(spidev->speed_hz, (__u32 __user *)arg);
381 break; 386 break;
382 387
383 /* write requests */ 388 /* write requests */
@@ -441,10 +446,11 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
441 446
442 spi->max_speed_hz = tmp; 447 spi->max_speed_hz = tmp;
443 retval = spi_setup(spi); 448 retval = spi_setup(spi);
444 if (retval < 0) 449 if (retval >= 0)
445 spi->max_speed_hz = save; 450 spidev->speed_hz = tmp;
446 else 451 else
447 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); 452 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
453 spi->max_speed_hz = save;
448 } 454 }
449 break; 455 break;
450 456
@@ -570,6 +576,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
570 kfree(spidev->rx_buffer); 576 kfree(spidev->rx_buffer);
571 spidev->rx_buffer = NULL; 577 spidev->rx_buffer = NULL;
572 578
579 spidev->speed_hz = spidev->spi->max_speed_hz;
580
573 /* ... after we unbound from the underlying device? */ 581 /* ... after we unbound from the underlying device? */
574 spin_lock_irq(&spidev->spi_lock); 582 spin_lock_irq(&spidev->spi_lock);
575 dofree = (spidev->spi == NULL); 583 dofree = (spidev->spi == NULL);
@@ -650,6 +658,8 @@ static int spidev_probe(struct spi_device *spi)
650 } 658 }
651 mutex_unlock(&device_list_lock); 659 mutex_unlock(&device_list_lock);
652 660
661 spidev->speed_hz = spi->max_speed_hz;
662
653 if (status == 0) 663 if (status == 0)
654 spi_set_drvdata(spi, spidev); 664 spi_set_drvdata(spi, spidev);
655 else 665 else
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6014625920b0..e969107ddb47 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -537,7 +537,8 @@ static const struct i2c_device_id isl29028_id[] = {
537MODULE_DEVICE_TABLE(i2c, isl29028_id); 537MODULE_DEVICE_TABLE(i2c, isl29028_id);
538 538
539static const struct of_device_id isl29028_of_match[] = { 539static const struct of_device_id isl29028_of_match[] = {
540 { .compatible = "isil,isl29028", }, 540 { .compatible = "isl,isl29028", },
541 { .compatible = "isil,isl29028", },/* deprecated, don't use */
541 { }, 542 { },
542}; 543};
543MODULE_DEVICE_TABLE(of, isl29028_of_match); 544MODULE_DEVICE_TABLE(of, isl29028_of_match);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 7e3f6a45da00..7561030c96e6 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -1025,8 +1025,8 @@ static int tracefiled(void *arg)
1025 1025
1026 if (f_pos >= (off_t)cfs_tracefile_size) 1026 if (f_pos >= (off_t)cfs_tracefile_size)
1027 f_pos = 0; 1027 f_pos = 0;
1028 else if (f_pos > i_size_read(filp->f_dentry->d_inode)) 1028 else if (f_pos > i_size_read(file_inode(filp)))
1029 f_pos = i_size_read(filp->f_dentry->d_inode); 1029 f_pos = i_size_read(file_inode(filp));
1030 1030
1031 buf = kmap(tage->page); 1031 buf = kmap(tage->page);
1032 rc = vfs_write(filp, (__force const char __user *)buf, 1032 rc = vfs_write(filp, (__force const char __user *)buf,
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 439e4875b05c..f692261e9b5c 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -151,10 +151,10 @@ static int ll_ddelete(const struct dentry *de)
151{ 151{
152 LASSERT(de); 152 LASSERT(de);
153 153
154 CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n", 154 CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n",
155 d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping", 155 d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
156 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode, 156 de, de, de->d_parent, de->d_inode,
157 d_unhashed((struct dentry *)de) ? "" : "hashed,", 157 d_unhashed(de) ? "" : "hashed,",
158 list_empty(&de->d_subdirs) ? "" : "subdirs"); 158 list_empty(&de->d_subdirs) ? "" : "subdirs");
159 159
160 /* kernel >= 2.6.38 last refcount is decreased after this function. */ 160 /* kernel >= 2.6.38 last refcount is decreased after this function. */
@@ -180,8 +180,8 @@ int ll_d_init(struct dentry *de)
180{ 180{
181 LASSERT(de != NULL); 181 LASSERT(de != NULL);
182 182
183 CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n", 183 CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
184 de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode, 184 de, de, de->d_parent, de->d_inode,
185 d_count(de)); 185 d_count(de));
186 186
187 if (de->d_fsdata == NULL) { 187 if (de->d_fsdata == NULL) {
@@ -258,10 +258,9 @@ void ll_invalidate_aliases(struct inode *inode)
258 inode->i_ino, inode->i_generation, inode); 258 inode->i_ino, inode->i_generation, inode);
259 259
260 ll_lock_dcache(inode); 260 ll_lock_dcache(inode);
261 ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 261 ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
262 CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p " 262 CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p "
263 "inode %p flags %d\n", dentry->d_name.len, 263 "inode %p flags %d\n", dentry, dentry, dentry->d_parent,
264 dentry->d_name.name, dentry, dentry->d_parent,
265 dentry->d_inode, dentry->d_flags); 264 dentry->d_inode, dentry->d_flags);
266 265
267 if (unlikely(dentry == dentry->d_sb->s_root)) { 266 if (unlikely(dentry == dentry->d_sb->s_root)) {
@@ -352,8 +351,8 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
352{ 351{
353 int rc; 352 int rc;
354 353
355 CDEBUG(D_VFSTRACE, "VFS Op:name=%s, flags=%u\n", 354 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, flags=%u\n",
356 dentry->d_name.name, flags); 355 dentry, flags);
357 356
358 rc = ll_revalidate_dentry(dentry, flags); 357 rc = ll_revalidate_dentry(dentry, flags);
359 return rc; 358 return rc;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index b0bb7095dde5..a79fd65ec4c6 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -593,7 +593,7 @@ int ll_dir_read(struct inode *inode, struct dir_context *ctx)
593 593
594static int ll_readdir(struct file *filp, struct dir_context *ctx) 594static int ll_readdir(struct file *filp, struct dir_context *ctx)
595{ 595{
596 struct inode *inode = filp->f_dentry->d_inode; 596 struct inode *inode = file_inode(filp);
597 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp); 597 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
598 struct ll_sb_info *sbi = ll_i2sbi(inode); 598 struct ll_sb_info *sbi = ll_i2sbi(inode);
599 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; 599 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
@@ -1242,7 +1242,7 @@ ll_getname(const char __user *filename)
1242 1242
1243static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1243static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1244{ 1244{
1245 struct inode *inode = file->f_dentry->d_inode; 1245 struct inode *inode = file_inode(file);
1246 struct ll_sb_info *sbi = ll_i2sbi(inode); 1246 struct ll_sb_info *sbi = ll_i2sbi(inode);
1247 struct obd_ioctl_data *data; 1247 struct obd_ioctl_data *data;
1248 int rc = 0; 1248 int rc = 0;
@@ -1389,7 +1389,7 @@ lmv_out_free:
1389 return -EFAULT; 1389 return -EFAULT;
1390 } 1390 }
1391 1391
1392 if (inode->i_sb->s_root == file->f_dentry) 1392 if (is_root_inode(inode))
1393 set_default = 1; 1393 set_default = 1;
1394 1394
1395 /* in v1 and v3 cases lumv1 points to data */ 1395 /* in v1 and v3 cases lumv1 points to data */
@@ -1780,8 +1780,7 @@ out_quotactl:
1780 return ll_flush_ctx(inode); 1780 return ll_flush_ctx(inode);
1781#ifdef CONFIG_FS_POSIX_ACL 1781#ifdef CONFIG_FS_POSIX_ACL
1782 case LL_IOC_RMTACL: { 1782 case LL_IOC_RMTACL: {
1783 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && 1783 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1784 inode == inode->i_sb->s_root->d_inode) {
1785 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1784 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1786 1785
1787 LASSERT(fd != NULL); 1786 LASSERT(fd != NULL);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index c99b74117152..a2ae9a68a9a0 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -266,6 +266,10 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
266{ 266{
267 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 267 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
268 struct ll_inode_info *lli = ll_i2info(inode); 268 struct ll_inode_info *lli = ll_i2info(inode);
269 int lockmode;
270 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
271 struct lustre_handle lockh;
272 ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
269 int rc = 0; 273 int rc = 0;
270 274
271 /* clear group lock, if present */ 275 /* clear group lock, if present */
@@ -292,39 +296,26 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
292 296
293 /* Let's see if we have good enough OPEN lock on the file and if 297 /* Let's see if we have good enough OPEN lock on the file and if
294 we can skip talking to MDS */ 298 we can skip talking to MDS */
295 if (file->f_dentry->d_inode) { /* Can this ever be false? */
296 int lockmode;
297 __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
298 struct lustre_handle lockh;
299 struct inode *inode = file->f_dentry->d_inode;
300 ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
301
302 mutex_lock(&lli->lli_och_mutex);
303 if (fd->fd_omode & FMODE_WRITE) {
304 lockmode = LCK_CW;
305 LASSERT(lli->lli_open_fd_write_count);
306 lli->lli_open_fd_write_count--;
307 } else if (fd->fd_omode & FMODE_EXEC) {
308 lockmode = LCK_PR;
309 LASSERT(lli->lli_open_fd_exec_count);
310 lli->lli_open_fd_exec_count--;
311 } else {
312 lockmode = LCK_CR;
313 LASSERT(lli->lli_open_fd_read_count);
314 lli->lli_open_fd_read_count--;
315 }
316 mutex_unlock(&lli->lli_och_mutex);
317 299
318 if (!md_lock_match(md_exp, flags, ll_inode2fid(inode), 300 mutex_lock(&lli->lli_och_mutex);
319 LDLM_IBITS, &policy, lockmode, 301 if (fd->fd_omode & FMODE_WRITE) {
320 &lockh)) { 302 lockmode = LCK_CW;
321 rc = ll_md_real_close(file->f_dentry->d_inode, 303 LASSERT(lli->lli_open_fd_write_count);
322 fd->fd_omode); 304 lli->lli_open_fd_write_count--;
323 } 305 } else if (fd->fd_omode & FMODE_EXEC) {
306 lockmode = LCK_PR;
307 LASSERT(lli->lli_open_fd_exec_count);
308 lli->lli_open_fd_exec_count--;
324 } else { 309 } else {
325 CERROR("Releasing a file %p with negative dentry %p. Name %s", 310 lockmode = LCK_CR;
326 file, file->f_dentry, file->f_dentry->d_name.name); 311 LASSERT(lli->lli_open_fd_read_count);
312 lli->lli_open_fd_read_count--;
327 } 313 }
314 mutex_unlock(&lli->lli_och_mutex);
315
316 if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
317 LDLM_IBITS, &policy, lockmode, &lockh))
318 rc = ll_md_real_close(inode, fd->fd_omode);
328 319
329out: 320out:
330 LUSTRE_FPRIVATE(file) = NULL; 321 LUSTRE_FPRIVATE(file) = NULL;
@@ -350,8 +341,7 @@ int ll_file_release(struct inode *inode, struct file *file)
350 inode->i_generation, inode); 341 inode->i_generation, inode);
351 342
352#ifdef CONFIG_FS_POSIX_ACL 343#ifdef CONFIG_FS_POSIX_ACL
353 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && 344 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
354 inode == inode->i_sb->s_root->d_inode) {
355 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 345 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
356 346
357 LASSERT(fd != NULL); 347 LASSERT(fd != NULL);
@@ -363,7 +353,7 @@ int ll_file_release(struct inode *inode, struct file *file)
363 } 353 }
364#endif 354#endif
365 355
366 if (inode->i_sb->s_root != file->f_dentry) 356 if (!is_root_inode(inode))
367 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); 357 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
368 fd = LUSTRE_FPRIVATE(file); 358 fd = LUSTRE_FPRIVATE(file);
369 LASSERT(fd != NULL); 359 LASSERT(fd != NULL);
@@ -375,7 +365,7 @@ int ll_file_release(struct inode *inode, struct file *file)
375 lli->lli_opendir_pid != 0) 365 lli->lli_opendir_pid != 0)
376 ll_stop_statahead(inode, lli->lli_opendir_key); 366 ll_stop_statahead(inode, lli->lli_opendir_key);
377 367
378 if (inode->i_sb->s_root == file->f_dentry) { 368 if (is_root_inode(inode)) {
379 LUSTRE_FPRIVATE(file) = NULL; 369 LUSTRE_FPRIVATE(file) = NULL;
380 ll_file_data_put(fd); 370 ll_file_data_put(fd);
381 return 0; 371 return 0;
@@ -394,21 +384,19 @@ int ll_file_release(struct inode *inode, struct file *file)
394 return rc; 384 return rc;
395} 385}
396 386
397static int ll_intent_file_open(struct file *file, void *lmm, 387static int ll_intent_file_open(struct dentry *dentry, void *lmm,
398 int lmmsize, struct lookup_intent *itp) 388 int lmmsize, struct lookup_intent *itp)
399{ 389{
400 struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode); 390 struct inode *inode = dentry->d_inode;
401 struct dentry *parent = file->f_dentry->d_parent; 391 struct ll_sb_info *sbi = ll_i2sbi(inode);
402 const char *name = file->f_dentry->d_name.name; 392 struct dentry *parent = dentry->d_parent;
403 const int len = file->f_dentry->d_name.len; 393 const char *name = dentry->d_name.name;
394 const int len = dentry->d_name.len;
404 struct md_op_data *op_data; 395 struct md_op_data *op_data;
405 struct ptlrpc_request *req; 396 struct ptlrpc_request *req;
406 __u32 opc = LUSTRE_OPC_ANY; 397 __u32 opc = LUSTRE_OPC_ANY;
407 int rc; 398 int rc;
408 399
409 if (!parent)
410 return -ENOENT;
411
412 /* Usually we come here only for NFSD, and we want open lock. 400 /* Usually we come here only for NFSD, and we want open lock.
413 But we can also get here with pre 2.6.15 patchless kernels, and in 401 But we can also get here with pre 2.6.15 patchless kernels, and in
414 that case that lock is also ok */ 402 that case that lock is also ok */
@@ -425,7 +413,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
425 } 413 }
426 414
427 op_data = ll_prep_md_op_data(NULL, parent->d_inode, 415 op_data = ll_prep_md_op_data(NULL, parent->d_inode,
428 file->f_dentry->d_inode, name, len, 416 inode, name, len,
429 O_RDWR, opc, NULL); 417 O_RDWR, opc, NULL);
430 if (IS_ERR(op_data)) 418 if (IS_ERR(op_data))
431 return PTR_ERR(op_data); 419 return PTR_ERR(op_data);
@@ -441,7 +429,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
441 if (!it_disposition(itp, DISP_OPEN_OPEN) || 429 if (!it_disposition(itp, DISP_OPEN_OPEN) ||
442 it_open_error(DISP_OPEN_OPEN, itp)) 430 it_open_error(DISP_OPEN_OPEN, itp))
443 goto out; 431 goto out;
444 ll_release_openhandle(file->f_dentry, itp); 432 ll_release_openhandle(inode, itp);
445 goto out; 433 goto out;
446 } 434 }
447 435
@@ -456,10 +444,9 @@ static int ll_intent_file_open(struct file *file, void *lmm,
456 goto out; 444 goto out;
457 } 445 }
458 446
459 rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL, itp); 447 rc = ll_prep_inode(&inode, req, NULL, itp);
460 if (!rc && itp->d.lustre.it_lock_mode) 448 if (!rc && itp->d.lustre.it_lock_mode)
461 ll_set_lock_data(sbi->ll_md_exp, file->f_dentry->d_inode, 449 ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
462 itp, NULL);
463 450
464out: 451out:
465 ptlrpc_req_finished(req); 452 ptlrpc_req_finished(req);
@@ -501,7 +488,7 @@ static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
501static int ll_local_open(struct file *file, struct lookup_intent *it, 488static int ll_local_open(struct file *file, struct lookup_intent *it,
502 struct ll_file_data *fd, struct obd_client_handle *och) 489 struct ll_file_data *fd, struct obd_client_handle *och)
503{ 490{
504 struct inode *inode = file->f_dentry->d_inode; 491 struct inode *inode = file_inode(file);
505 struct ll_inode_info *lli = ll_i2info(inode); 492 struct ll_inode_info *lli = ll_i2info(inode);
506 493
507 LASSERT(!LUSTRE_FPRIVATE(file)); 494 LASSERT(!LUSTRE_FPRIVATE(file));
@@ -574,7 +561,7 @@ int ll_file_open(struct inode *inode, struct file *file)
574 spin_unlock(&lli->lli_sa_lock); 561 spin_unlock(&lli->lli_sa_lock);
575 } 562 }
576 563
577 if (inode->i_sb->s_root == file->f_dentry) { 564 if (is_root_inode(inode)) {
578 LUSTRE_FPRIVATE(file) = fd; 565 LUSTRE_FPRIVATE(file) = fd;
579 return 0; 566 return 0;
580 } 567 }
@@ -632,7 +619,7 @@ restart:
632 goto out_openerr; 619 goto out_openerr;
633 } 620 }
634 621
635 ll_release_openhandle(file->f_dentry, it); 622 ll_release_openhandle(inode, it);
636 } 623 }
637 (*och_usecount)++; 624 (*och_usecount)++;
638 625
@@ -652,7 +639,7 @@ restart:
652 result in a deadlock */ 639 result in a deadlock */
653 mutex_unlock(&lli->lli_och_mutex); 640 mutex_unlock(&lli->lli_och_mutex);
654 it->it_create_mode |= M_CHECK_STALE; 641 it->it_create_mode |= M_CHECK_STALE;
655 rc = ll_intent_file_open(file, NULL, 0, it); 642 rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
656 it->it_create_mode &= ~M_CHECK_STALE; 643 it->it_create_mode &= ~M_CHECK_STALE;
657 if (rc) 644 if (rc)
658 goto out_openerr; 645 goto out_openerr;
@@ -1065,7 +1052,7 @@ int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
1065static bool file_is_noatime(const struct file *file) 1052static bool file_is_noatime(const struct file *file)
1066{ 1053{
1067 const struct vfsmount *mnt = file->f_path.mnt; 1054 const struct vfsmount *mnt = file->f_path.mnt;
1068 const struct inode *inode = file->f_path.dentry->d_inode; 1055 const struct inode *inode = file_inode(file);
1069 1056
1070 /* Adapted from file_accessed() and touch_atime().*/ 1057 /* Adapted from file_accessed() and touch_atime().*/
1071 if (file->f_flags & O_NOATIME) 1058 if (file->f_flags & O_NOATIME)
@@ -1091,7 +1078,7 @@ static bool file_is_noatime(const struct file *file)
1091 1078
1092void ll_io_init(struct cl_io *io, const struct file *file, int write) 1079void ll_io_init(struct cl_io *io, const struct file *file, int write)
1093{ 1080{
1094 struct inode *inode = file->f_dentry->d_inode; 1081 struct inode *inode = file_inode(file);
1095 1082
1096 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK; 1083 io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
1097 if (write) { 1084 if (write) {
@@ -1117,7 +1104,7 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
1117 struct file *file, enum cl_io_type iot, 1104 struct file *file, enum cl_io_type iot,
1118 loff_t *ppos, size_t count) 1105 loff_t *ppos, size_t count)
1119{ 1106{
1120 struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode); 1107 struct ll_inode_info *lli = ll_i2info(file_inode(file));
1121 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1108 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1122 struct cl_io *io; 1109 struct cl_io *io;
1123 ssize_t result; 1110 ssize_t result;
@@ -1178,20 +1165,20 @@ out:
1178 /* If any bit been read/written (result != 0), we just return 1165 /* If any bit been read/written (result != 0), we just return
1179 * short read/write instead of restart io. */ 1166 * short read/write instead of restart io. */
1180 if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { 1167 if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
1181 CDEBUG(D_VFSTRACE, "Restart %s on %s from %lld, count:%zd\n", 1168 CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n",
1182 iot == CIT_READ ? "read" : "write", 1169 iot == CIT_READ ? "read" : "write",
1183 file->f_dentry->d_name.name, *ppos, count); 1170 file, *ppos, count);
1184 LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob); 1171 LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob);
1185 goto restart; 1172 goto restart;
1186 } 1173 }
1187 1174
1188 if (iot == CIT_READ) { 1175 if (iot == CIT_READ) {
1189 if (result >= 0) 1176 if (result >= 0)
1190 ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), 1177 ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
1191 LPROC_LL_READ_BYTES, result); 1178 LPROC_LL_READ_BYTES, result);
1192 } else if (iot == CIT_WRITE) { 1179 } else if (iot == CIT_WRITE) {
1193 if (result >= 0) { 1180 if (result >= 0) {
1194 ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), 1181 ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
1195 LPROC_LL_WRITE_BYTES, result); 1182 LPROC_LL_WRITE_BYTES, result);
1196 fd->fd_write_failed = false; 1183 fd->fd_write_failed = false;
1197 } else if (result != -ERESTARTSYS) { 1184 } else if (result != -ERESTARTSYS) {
@@ -1354,7 +1341,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
1354 return ll_lov_recreate(inode, &oi, ost_idx); 1341 return ll_lov_recreate(inode, &oi, ost_idx);
1355} 1342}
1356 1343
1357int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file, 1344int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
1358 int flags, struct lov_user_md *lum, int lum_size) 1345 int flags, struct lov_user_md *lum, int lum_size)
1359{ 1346{
1360 struct lov_stripe_md *lsm = NULL; 1347 struct lov_stripe_md *lsm = NULL;
@@ -1371,21 +1358,20 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
1371 } 1358 }
1372 1359
1373 ll_inode_size_lock(inode); 1360 ll_inode_size_lock(inode);
1374 rc = ll_intent_file_open(file, lum, lum_size, &oit); 1361 rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
1375 if (rc) 1362 if (rc)
1376 goto out_unlock; 1363 goto out_unlock;
1377 rc = oit.d.lustre.it_status; 1364 rc = oit.d.lustre.it_status;
1378 if (rc < 0) 1365 if (rc < 0)
1379 goto out_req_free; 1366 goto out_req_free;
1380 1367
1381 ll_release_openhandle(file->f_dentry, &oit); 1368 ll_release_openhandle(inode, &oit);
1382 1369
1383out_unlock: 1370out_unlock:
1384 ll_inode_size_unlock(inode); 1371 ll_inode_size_unlock(inode);
1385 ll_intent_release(&oit); 1372 ll_intent_release(&oit);
1386 ccc_inode_lsm_put(inode, lsm); 1373 ccc_inode_lsm_put(inode, lsm);
1387out: 1374out:
1388 cl_lov_delay_create_clear(&file->f_flags);
1389 return rc; 1375 return rc;
1390out_req_free: 1376out_req_free:
1391 ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data); 1377 ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
@@ -1499,7 +1485,9 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
1499 return -EFAULT; 1485 return -EFAULT;
1500 } 1486 }
1501 1487
1502 rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size); 1488 rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump,
1489 lum_size);
1490 cl_lov_delay_create_clear(&file->f_flags);
1503 1491
1504 OBD_FREE_LARGE(lump, lum_size); 1492 OBD_FREE_LARGE(lump, lum_size);
1505 return rc; 1493 return rc;
@@ -1526,7 +1514,9 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
1526 return -EFAULT; 1514 return -EFAULT;
1527 } 1515 }
1528 1516
1529 rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size); 1517 rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lumv1,
1518 lum_size);
1519 cl_lov_delay_create_clear(&file->f_flags);
1530 if (rc == 0) { 1520 if (rc == 0) {
1531 struct lov_stripe_md *lsm; 1521 struct lov_stripe_md *lsm;
1532 __u32 gen; 1522 __u32 gen;
@@ -1631,22 +1621,21 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
1631/** 1621/**
1632 * Close inode open handle 1622 * Close inode open handle
1633 * 1623 *
1634 * \param dentry [in] dentry which contains the inode 1624 * \param inode [in] inode in question
1635 * \param it [in,out] intent which contains open info and result 1625 * \param it [in,out] intent which contains open info and result
1636 * 1626 *
1637 * \retval 0 success 1627 * \retval 0 success
1638 * \retval <0 failure 1628 * \retval <0 failure
1639 */ 1629 */
1640int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it) 1630int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
1641{ 1631{
1642 struct inode *inode = dentry->d_inode;
1643 struct obd_client_handle *och; 1632 struct obd_client_handle *och;
1644 int rc; 1633 int rc;
1645 1634
1646 LASSERT(inode); 1635 LASSERT(inode);
1647 1636
1648 /* Root ? Do nothing. */ 1637 /* Root ? Do nothing. */
1649 if (dentry->d_inode->i_sb->s_root == dentry) 1638 if (is_root_inode(inode))
1650 return 0; 1639 return 0;
1651 1640
1652 /* No open handle to close? Move away */ 1641 /* No open handle to close? Move away */
@@ -1959,8 +1948,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
1959 if (!llss) 1948 if (!llss)
1960 return -ENOMEM; 1949 return -ENOMEM;
1961 1950
1962 llss->inode1 = file1->f_dentry->d_inode; 1951 llss->inode1 = file_inode(file1);
1963 llss->inode2 = file2->f_dentry->d_inode; 1952 llss->inode2 = file_inode(file2);
1964 1953
1965 if (!S_ISREG(llss->inode2->i_mode)) { 1954 if (!S_ISREG(llss->inode2->i_mode)) {
1966 rc = -EINVAL; 1955 rc = -EINVAL;
@@ -2092,7 +2081,7 @@ putgl:
2092 rc = 0; 2081 rc = 0;
2093 if (llss->ia2.ia_valid != 0) { 2082 if (llss->ia2.ia_valid != 0) {
2094 mutex_lock(&llss->inode1->i_mutex); 2083 mutex_lock(&llss->inode1->i_mutex);
2095 rc = ll_setattr(file1->f_dentry, &llss->ia2); 2084 rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
2096 mutex_unlock(&llss->inode1->i_mutex); 2085 mutex_unlock(&llss->inode1->i_mutex);
2097 } 2086 }
2098 2087
@@ -2100,7 +2089,7 @@ putgl:
2100 int rc1; 2089 int rc1;
2101 2090
2102 mutex_lock(&llss->inode2->i_mutex); 2091 mutex_lock(&llss->inode2->i_mutex);
2103 rc1 = ll_setattr(file2->f_dentry, &llss->ia1); 2092 rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
2104 mutex_unlock(&llss->inode2->i_mutex); 2093 mutex_unlock(&llss->inode2->i_mutex);
2105 if (rc == 0) 2094 if (rc == 0)
2106 rc = rc1; 2095 rc = rc1;
@@ -2185,7 +2174,7 @@ static int ll_hsm_import(struct inode *inode, struct file *file,
2185 2174
2186 mutex_lock(&inode->i_mutex); 2175 mutex_lock(&inode->i_mutex);
2187 2176
2188 rc = ll_setattr_raw(file->f_dentry, attr, true); 2177 rc = ll_setattr_raw(file->f_path.dentry, attr, true);
2189 if (rc == -ENODATA) 2178 if (rc == -ENODATA)
2190 rc = 0; 2179 rc = 0;
2191 2180
@@ -2204,7 +2193,7 @@ out:
2204static long 2193static long
2205ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2194ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2206{ 2195{
2207 struct inode *inode = file->f_dentry->d_inode; 2196 struct inode *inode = file_inode(file);
2208 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 2197 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
2209 int flags, rc; 2198 int flags, rc;
2210 2199
@@ -2523,7 +2512,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2523 2512
2524static loff_t ll_file_seek(struct file *file, loff_t offset, int origin) 2513static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
2525{ 2514{
2526 struct inode *inode = file->f_dentry->d_inode; 2515 struct inode *inode = file_inode(file);
2527 loff_t retval, eof = 0; 2516 loff_t retval, eof = 0;
2528 2517
2529 retval = offset + ((origin == SEEK_END) ? i_size_read(inode) : 2518 retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
@@ -2547,7 +2536,7 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
2547 2536
2548static int ll_flush(struct file *file, fl_owner_t id) 2537static int ll_flush(struct file *file, fl_owner_t id)
2549{ 2538{
2550 struct inode *inode = file->f_dentry->d_inode; 2539 struct inode *inode = file_inode(file);
2551 struct ll_inode_info *lli = ll_i2info(inode); 2540 struct ll_inode_info *lli = ll_i2info(inode);
2552 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 2541 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
2553 int rc, err; 2542 int rc, err;
@@ -2622,16 +2611,9 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
2622 return result; 2611 return result;
2623} 2612}
2624 2613
2625/*
2626 * When dentry is provided (the 'else' case), *file->f_dentry may be
2627 * null and dentry must be used directly rather than pulled from
2628 * *file->f_dentry as is done otherwise.
2629 */
2630
2631int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) 2614int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2632{ 2615{
2633 struct dentry *dentry = file->f_dentry; 2616 struct inode *inode = file_inode(file);
2634 struct inode *inode = dentry->d_inode;
2635 struct ll_inode_info *lli = ll_i2info(inode); 2617 struct ll_inode_info *lli = ll_i2info(inode);
2636 struct ptlrpc_request *req; 2618 struct ptlrpc_request *req;
2637 struct obd_capa *oc; 2619 struct obd_capa *oc;
@@ -2684,7 +2666,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2684static int 2666static int
2685ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) 2667ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
2686{ 2668{
2687 struct inode *inode = file->f_dentry->d_inode; 2669 struct inode *inode = file_inode(file);
2688 struct ll_sb_info *sbi = ll_i2sbi(inode); 2670 struct ll_sb_info *sbi = ll_i2sbi(inode);
2689 struct ldlm_enqueue_info einfo = { 2671 struct ldlm_enqueue_info einfo = {
2690 .ei_type = LDLM_FLOCK, 2672 .ei_type = LDLM_FLOCK,
@@ -2908,8 +2890,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
2908 2890
2909 LASSERT(inode != NULL); 2891 LASSERT(inode != NULL);
2910 2892
2911 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n", 2893 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
2912 inode->i_ino, inode->i_generation, inode, dentry->d_name.name); 2894 inode->i_ino, inode->i_generation, inode, dentry);
2913 2895
2914 exp = ll_i2mdexp(inode); 2896 exp = ll_i2mdexp(inode);
2915 2897
@@ -3119,7 +3101,7 @@ int ll_inode_permission(struct inode *inode, int mask)
3119 /* as root inode are NOT getting validated in lookup operation, 3101 /* as root inode are NOT getting validated in lookup operation,
3120 * need to do it before permission check. */ 3102 * need to do it before permission check. */
3121 3103
3122 if (inode == inode->i_sb->s_root->d_inode) { 3104 if (is_root_inode(inode)) {
3123 rc = __ll_inode_revalidate(inode->i_sb->s_root, 3105 rc = __ll_inode_revalidate(inode->i_sb->s_root,
3124 MDS_INODELOCK_LOOKUP); 3106 MDS_INODELOCK_LOOKUP);
3125 if (rc) 3107 if (rc)
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 36aa0fd147f2..77d1c12704b4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -748,7 +748,7 @@ int ll_file_release(struct inode *inode, struct file *file);
748int ll_glimpse_ioctl(struct ll_sb_info *sbi, 748int ll_glimpse_ioctl(struct ll_sb_info *sbi,
749 struct lov_stripe_md *lsm, lstat_t *st); 749 struct lov_stripe_md *lsm, lstat_t *st);
750void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch); 750void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
751int ll_release_openhandle(struct dentry *, struct lookup_intent *); 751int ll_release_openhandle(struct inode *, struct lookup_intent *);
752int ll_md_real_close(struct inode *inode, fmode_t fmode); 752int ll_md_real_close(struct inode *inode, fmode_t fmode);
753void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, 753void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
754 struct obd_client_handle **och, unsigned long flags); 754 struct obd_client_handle **och, unsigned long flags);
@@ -763,7 +763,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
763 763
764int ll_inode_permission(struct inode *inode, int mask); 764int ll_inode_permission(struct inode *inode, int mask);
765 765
766int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file, 766int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
767 int flags, struct lov_user_md *lum, 767 int flags, struct lov_user_md *lum,
768 int lum_size); 768 int lum_size);
769int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, 769int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
@@ -1413,7 +1413,7 @@ extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
1413static inline int ll_file_nolock(const struct file *file) 1413static inline int ll_file_nolock(const struct file *file)
1414{ 1414{
1415 struct ll_file_data *fd = LUSTRE_FPRIVATE(file); 1415 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1416 struct inode *inode = file->f_dentry->d_inode; 1416 struct inode *inode = file_inode(file);
1417 1417
1418 LASSERT(fd != NULL); 1418 LASSERT(fd != NULL);
1419 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || 1419 return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
@@ -1489,8 +1489,8 @@ static inline void __d_lustre_invalidate(struct dentry *dentry)
1489 */ 1489 */
1490static inline void d_lustre_invalidate(struct dentry *dentry, int nested) 1490static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
1491{ 1491{
1492 CDEBUG(D_DENTRY, "invalidate dentry %.*s (%p) parent %p inode %p " 1492 CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p "
1493 "refc %d\n", dentry->d_name.len, dentry->d_name.name, dentry, 1493 "refc %d\n", dentry, dentry,
1494 dentry->d_parent, dentry->d_inode, d_count(dentry)); 1494 dentry->d_parent, dentry->d_inode, d_count(dentry));
1495 1495
1496 spin_lock_nested(&dentry->d_lock, 1496 spin_lock_nested(&dentry->d_lock,
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index a8bcc51057f1..7b6b9e2e0102 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -698,10 +698,8 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
698 list_for_each(tmp, &dentry->d_subdirs) 698 list_for_each(tmp, &dentry->d_subdirs)
699 subdirs++; 699 subdirs++;
700 700
701 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u," 701 CERROR("dentry %p dump: name=%pd parent=%p, inode=%p, count=%u,"
702 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry, 702 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry, dentry,
703 dentry->d_name.len, dentry->d_name.name,
704 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
705 dentry->d_parent, dentry->d_inode, d_count(dentry), 703 dentry->d_parent, dentry->d_inode, d_count(dentry),
706 dentry->d_flags, dentry->d_fsdata, subdirs); 704 dentry->d_flags, dentry->d_fsdata, subdirs);
707 if (dentry->d_inode != NULL) 705 if (dentry->d_inode != NULL)
@@ -711,7 +709,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
711 return; 709 return;
712 710
713 list_for_each(tmp, &dentry->d_subdirs) { 711 list_for_each(tmp, &dentry->d_subdirs) {
714 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child); 712 struct dentry *d = list_entry(tmp, struct dentry, d_child);
715 lustre_dump_dentry(d, recur - 1); 713 lustre_dump_dentry(d, recur - 1);
716 } 714 }
717} 715}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ae605a6d9dc2..ba1c047ae927 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -100,7 +100,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
100 unsigned long *ra_flags) 100 unsigned long *ra_flags)
101{ 101{
102 struct file *file = vma->vm_file; 102 struct file *file = vma->vm_file;
103 struct inode *inode = file->f_dentry->d_inode; 103 struct inode *inode = file_inode(file);
104 struct cl_io *io; 104 struct cl_io *io;
105 struct cl_fault_io *fio; 105 struct cl_fault_io *fio;
106 struct lu_env *env; 106 struct lu_env *env;
@@ -213,7 +213,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
213 cfs_restore_sigs(set); 213 cfs_restore_sigs(set);
214 214
215 if (result == 0) { 215 if (result == 0) {
216 struct inode *inode = vma->vm_file->f_dentry->d_inode; 216 struct inode *inode = file_inode(vma->vm_file);
217 struct ll_inode_info *lli = ll_i2info(inode); 217 struct ll_inode_info *lli = ll_i2info(inode);
218 218
219 lock_page(vmpage); 219 lock_page(vmpage);
@@ -396,7 +396,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
396 CWARN("app(%s): the page %lu of file %lu is under heavy" 396 CWARN("app(%s): the page %lu of file %lu is under heavy"
397 " contention.\n", 397 " contention.\n",
398 current->comm, vmf->pgoff, 398 current->comm, vmf->pgoff,
399 vma->vm_file->f_dentry->d_inode->i_ino); 399 file_inode(vma->vm_file)->i_ino);
400 printed = true; 400 printed = true;
401 } 401 }
402 } while (retry); 402 } while (retry);
@@ -430,7 +430,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
430 */ 430 */
431static void ll_vm_open(struct vm_area_struct *vma) 431static void ll_vm_open(struct vm_area_struct *vma)
432{ 432{
433 struct inode *inode = vma->vm_file->f_dentry->d_inode; 433 struct inode *inode = file_inode(vma->vm_file);
434 struct ccc_object *vob = cl_inode2ccc(inode); 434 struct ccc_object *vob = cl_inode2ccc(inode);
435 435
436 LASSERT(vma->vm_file); 436 LASSERT(vma->vm_file);
@@ -443,7 +443,7 @@ static void ll_vm_open(struct vm_area_struct *vma)
443 */ 443 */
444static void ll_vm_close(struct vm_area_struct *vma) 444static void ll_vm_close(struct vm_area_struct *vma)
445{ 445{
446 struct inode *inode = vma->vm_file->f_dentry->d_inode; 446 struct inode *inode = file_inode(vma->vm_file);
447 struct ccc_object *vob = cl_inode2ccc(inode); 447 struct ccc_object *vob = cl_inode2ccc(inode);
448 448
449 LASSERT(vma->vm_file); 449 LASSERT(vma->vm_file);
@@ -476,7 +476,7 @@ static const struct vm_operations_struct ll_file_vm_ops = {
476 476
477int ll_file_mmap(struct file *file, struct vm_area_struct *vma) 477int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
478{ 478{
479 struct inode *inode = file->f_dentry->d_inode; 479 struct inode *inode = file_inode(file);
480 int rc; 480 int rc;
481 481
482 if (ll_file_nolock(file)) 482 if (ll_file_nolock(file))
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index ae3a12ab7fa1..243a7840457f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -207,13 +207,15 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
207 return LUSTRE_NFS_FID; 207 return LUSTRE_NFS_FID;
208} 208}
209 209
210static int ll_nfs_get_name_filldir(void *cookie, const char *name, int namelen, 210static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
211 loff_t hash, u64 ino, unsigned type) 211 int namelen, loff_t hash, u64 ino,
212 unsigned type)
212{ 213{
213 /* It is hack to access lde_fid for comparison with lgd_fid. 214 /* It is hack to access lde_fid for comparison with lgd_fid.
214 * So the input 'name' must be part of the 'lu_dirent'. */ 215 * So the input 'name' must be part of the 'lu_dirent'. */
215 struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name); 216 struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
216 struct ll_getname_data *lgd = cookie; 217 struct ll_getname_data *lgd =
218 container_of(ctx, struct ll_getname_data, ctx);
217 struct lu_fid fid; 219 struct lu_fid fid;
218 220
219 fid_le_to_cpu(&fid, &lde->lde_fid); 221 fid_le_to_cpu(&fid, &lde->lde_fid);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 264e5ec3fed6..9e31b789b790 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -187,7 +187,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
187{ 187{
188 const struct lu_env *env = lo->lo_env; 188 const struct lu_env *env = lo->lo_env;
189 struct cl_io *io = &lo->lo_io; 189 struct cl_io *io = &lo->lo_io;
190 struct inode *inode = lo->lo_backing_file->f_dentry->d_inode; 190 struct inode *inode = file_inode(lo->lo_backing_file);
191 struct cl_object *obj = ll_i2info(inode)->lli_clob; 191 struct cl_object *obj = ll_i2info(inode)->lli_clob;
192 pgoff_t offset; 192 pgoff_t offset;
193 int ret; 193 int ret;
@@ -626,7 +626,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
626 break; 626 break;
627 } 627 }
628 if (inode == NULL) 628 if (inode == NULL)
629 inode = lo->lo_backing_file->f_dentry->d_inode; 629 inode = file_inode(lo->lo_backing_file);
630 if (lo->lo_state == LLOOP_BOUND) 630 if (lo->lo_state == LLOOP_BOUND)
631 fid = ll_i2info(inode)->lli_fid; 631 fid = ll_i2info(inode)->lli_fid;
632 else 632 else
@@ -692,8 +692,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
692 lo_free = lo; 692 lo_free = lo;
693 continue; 693 continue;
694 } 694 }
695 if (lo->lo_backing_file->f_dentry->d_inode == 695 if (file_inode(lo->lo_backing_file) == file_inode(file))
696 file->f_dentry->d_inode)
697 break; 696 break;
698 } 697 }
699 if (lo || !lo_free) { 698 if (lo || !lo_free) {
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 7a68c1e027e0..8e926b385a60 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -54,27 +54,6 @@
54static int ll_create_it(struct inode *, struct dentry *, 54static int ll_create_it(struct inode *, struct dentry *,
55 int, struct lookup_intent *); 55 int, struct lookup_intent *);
56 56
57/*
58 * Check if we have something mounted at the named dchild.
59 * In such a case there would always be dentry present.
60 */
61static int ll_d_mountpoint(struct dentry *dparent, struct dentry *dchild,
62 struct qstr *name)
63{
64 int mounted = 0;
65
66 if (unlikely(dchild)) {
67 mounted = d_mountpoint(dchild);
68 } else if (dparent) {
69 dchild = d_lookup(dparent, name);
70 if (dchild) {
71 mounted = d_mountpoint(dchild);
72 dput(dchild);
73 }
74 }
75 return mounted;
76}
77
78/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */ 57/* called from iget5_locked->find_inode() under inode_hash_lock spinlock */
79static int ll_test_inode(struct inode *inode, void *opaque) 58static int ll_test_inode(struct inode *inode, void *opaque)
80{ 59{
@@ -167,14 +146,14 @@ static void ll_invalidate_negative_children(struct inode *dir)
167 struct ll_d_hlist_node *p; 146 struct ll_d_hlist_node *p;
168 147
169 ll_lock_dcache(dir); 148 ll_lock_dcache(dir);
170 ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) { 149 ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
171 spin_lock(&dentry->d_lock); 150 spin_lock(&dentry->d_lock);
172 if (!list_empty(&dentry->d_subdirs)) { 151 if (!list_empty(&dentry->d_subdirs)) {
173 struct dentry *child; 152 struct dentry *child;
174 153
175 list_for_each_entry_safe(child, tmp_subdir, 154 list_for_each_entry_safe(child, tmp_subdir,
176 &dentry->d_subdirs, 155 &dentry->d_subdirs,
177 d_u.d_child) { 156 d_child) {
178 if (child->d_inode == NULL) 157 if (child->d_inode == NULL)
179 d_lustre_invalidate(child, 1); 158 d_lustre_invalidate(child, 1);
180 } 159 }
@@ -285,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
285 264
286 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && 265 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
287 inode->i_sb->s_root != NULL && 266 inode->i_sb->s_root != NULL &&
288 inode != inode->i_sb->s_root->d_inode) 267 is_root_inode(inode))
289 ll_invalidate_aliases(inode); 268 ll_invalidate_aliases(inode);
290 269
291 iput(inode); 270 iput(inode);
@@ -362,7 +341,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
362 discon_alias = invalid_alias = NULL; 341 discon_alias = invalid_alias = NULL;
363 342
364 ll_lock_dcache(inode); 343 ll_lock_dcache(inode);
365 ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 344 ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
366 LASSERT(alias != dentry); 345 LASSERT(alias != dentry);
367 346
368 spin_lock(&alias->d_lock); 347 spin_lock(&alias->d_lock);
@@ -509,8 +488,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
509 if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen) 488 if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
510 return ERR_PTR(-ENAMETOOLONG); 489 return ERR_PTR(-ENAMETOOLONG);
511 490
512 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n", 491 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
513 dentry->d_name.len, dentry->d_name.name, parent->i_ino, 492 dentry, parent->i_ino,
514 parent->i_generation, parent, LL_IT2STR(it)); 493 parent->i_generation, parent, LL_IT2STR(it));
515 494
516 if (d_mountpoint(dentry)) 495 if (d_mountpoint(dentry))
@@ -563,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
563 if ((it->it_op & IT_OPEN) && dentry->d_inode && 542 if ((it->it_op & IT_OPEN) && dentry->d_inode &&
564 !S_ISREG(dentry->d_inode->i_mode) && 543 !S_ISREG(dentry->d_inode->i_mode) &&
565 !S_ISDIR(dentry->d_inode->i_mode)) { 544 !S_ISDIR(dentry->d_inode->i_mode)) {
566 ll_release_openhandle(dentry, it); 545 ll_release_openhandle(dentry->d_inode, it);
567 } 546 }
568 ll_lookup_finish_locks(it, dentry); 547 ll_lookup_finish_locks(it, dentry);
569 548
@@ -586,8 +565,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
586 struct lookup_intent *itp, it = { .it_op = IT_GETATTR }; 565 struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
587 struct dentry *de; 566 struct dentry *de;
588 567
589 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),flags=%u\n", 568 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n",
590 dentry->d_name.len, dentry->d_name.name, parent->i_ino, 569 dentry, parent->i_ino,
591 parent->i_generation, parent, flags); 570 parent->i_generation, parent, flags);
592 571
593 /* Optimize away (CREATE && !OPEN). Let .create handle the race. */ 572 /* Optimize away (CREATE && !OPEN). Let .create handle the race. */
@@ -619,9 +598,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
619 long long lookup_flags = LOOKUP_OPEN; 598 long long lookup_flags = LOOKUP_OPEN;
620 int rc = 0; 599 int rc = 0;
621 600
622 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),file %p," 601 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,"
623 "open_flags %x,mode %x opened %d\n", 602 "open_flags %x,mode %x opened %d\n",
624 dentry->d_name.len, dentry->d_name.name, dir->i_ino, 603 dentry, dir->i_ino,
625 dir->i_generation, dir, file, open_flags, mode, *opened); 604 dir->i_generation, dir, file, open_flags, mode, *opened);
626 605
627 it = kzalloc(sizeof(*it), GFP_NOFS); 606 it = kzalloc(sizeof(*it), GFP_NOFS);
@@ -741,8 +720,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
741 struct inode *inode; 720 struct inode *inode;
742 int rc = 0; 721 int rc = 0;
743 722
744 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n", 723 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
745 dentry->d_name.len, dentry->d_name.name, dir->i_ino, 724 dentry, dir->i_ino,
746 dir->i_generation, dir, LL_IT2STR(it)); 725 dir->i_generation, dir, LL_IT2STR(it));
747 726
748 rc = it_open_error(DISP_OPEN_CREATE, it); 727 rc = it_open_error(DISP_OPEN_CREATE, it);
@@ -775,9 +754,9 @@ static void ll_update_times(struct ptlrpc_request *request,
775 LTIME_S(inode->i_ctime) = body->ctime; 754 LTIME_S(inode->i_ctime) = body->ctime;
776} 755}
777 756
778static int ll_new_node(struct inode *dir, struct qstr *name, 757static int ll_new_node(struct inode *dir, struct dentry *dentry,
779 const char *tgt, int mode, int rdev, 758 const char *tgt, int mode, int rdev,
780 struct dentry *dchild, __u32 opc) 759 __u32 opc)
781{ 760{
782 struct ptlrpc_request *request = NULL; 761 struct ptlrpc_request *request = NULL;
783 struct md_op_data *op_data; 762 struct md_op_data *op_data;
@@ -789,8 +768,10 @@ static int ll_new_node(struct inode *dir, struct qstr *name,
789 if (unlikely(tgt != NULL)) 768 if (unlikely(tgt != NULL))
790 tgt_len = strlen(tgt) + 1; 769 tgt_len = strlen(tgt) + 1;
791 770
792 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, 771 op_data = ll_prep_md_op_data(NULL, dir, NULL,
793 name->len, 0, opc, NULL); 772 dentry->d_name.name,
773 dentry->d_name.len,
774 0, opc, NULL);
794 if (IS_ERR(op_data)) { 775 if (IS_ERR(op_data)) {
795 err = PTR_ERR(op_data); 776 err = PTR_ERR(op_data);
796 goto err_exit; 777 goto err_exit;
@@ -806,27 +787,25 @@ static int ll_new_node(struct inode *dir, struct qstr *name,
806 787
807 ll_update_times(request, dir); 788 ll_update_times(request, dir);
808 789
809 if (dchild) { 790 err = ll_prep_inode(&inode, request, dir->i_sb, NULL);
810 err = ll_prep_inode(&inode, request, dchild->d_sb, NULL); 791 if (err)
811 if (err) 792 goto err_exit;
812 goto err_exit;
813 793
814 d_instantiate(dchild, inode); 794 d_instantiate(dentry, inode);
815 }
816err_exit: 795err_exit:
817 ptlrpc_req_finished(request); 796 ptlrpc_req_finished(request);
818 797
819 return err; 798 return err;
820} 799}
821 800
822static int ll_mknod_generic(struct inode *dir, struct qstr *name, int mode, 801static int ll_mknod(struct inode *dir, struct dentry *dchild,
823 unsigned rdev, struct dentry *dchild) 802 umode_t mode, dev_t rdev)
824{ 803{
825 int err; 804 int err;
826 805
827 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p) mode %o dev %x\n", 806 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n",
828 name->len, name->name, dir->i_ino, dir->i_generation, dir, 807 dchild, dir->i_ino, dir->i_generation, dir,
829 mode, rdev); 808 mode, old_encode_dev(rdev));
830 809
831 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir))) 810 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
832 mode &= ~current_umask(); 811 mode &= ~current_umask();
@@ -839,7 +818,8 @@ static int ll_mknod_generic(struct inode *dir, struct qstr *name, int mode,
839 case S_IFBLK: 818 case S_IFBLK:
840 case S_IFIFO: 819 case S_IFIFO:
841 case S_IFSOCK: 820 case S_IFSOCK:
842 err = ll_new_node(dir, name, NULL, mode, rdev, dchild, 821 err = ll_new_node(dir, dchild, NULL, mode,
822 old_encode_dev(rdev),
843 LUSTRE_OPC_MKNOD); 823 LUSTRE_OPC_MKNOD);
844 break; 824 break;
845 case S_IFDIR: 825 case S_IFDIR:
@@ -863,134 +843,25 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
863{ 843{
864 int rc; 844 int rc;
865 845
866 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)," 846 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),"
867 "flags=%u, excl=%d\n", 847 "flags=%u, excl=%d\n",
868 dentry->d_name.len, dentry->d_name.name, dir->i_ino, 848 dentry, dir->i_ino,
869 dir->i_generation, dir, mode, want_excl); 849 dir->i_generation, dir, mode, want_excl);
870 850
871 rc = ll_mknod_generic(dir, &dentry->d_name, mode, 0, dentry); 851 rc = ll_mknod(dir, dentry, mode, 0);
872 852
873 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1); 853 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
874 854
875 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, unhashed %d\n", 855 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, unhashed %d\n",
876 dentry->d_name.len, dentry->d_name.name, d_unhashed(dentry)); 856 dentry, d_unhashed(dentry));
877 857
878 return rc; 858 return rc;
879} 859}
880 860
881static int ll_symlink_generic(struct inode *dir, struct qstr *name, 861static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
882 const char *tgt, struct dentry *dchild)
883{ 862{
884 int err; 863 if (child->d_inode)
885 864 *fid = *ll_inode2fid(child->d_inode);
886 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),target=%.*s\n",
887 name->len, name->name, dir->i_ino, dir->i_generation,
888 dir, 3000, tgt);
889
890 err = ll_new_node(dir, name, (char *)tgt, S_IFLNK | S_IRWXUGO,
891 0, dchild, LUSTRE_OPC_SYMLINK);
892
893 if (!err)
894 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
895
896 return err;
897}
898
899static int ll_link_generic(struct inode *src, struct inode *dir,
900 struct qstr *name, struct dentry *dchild)
901{
902 struct ll_sb_info *sbi = ll_i2sbi(dir);
903 struct ptlrpc_request *request = NULL;
904 struct md_op_data *op_data;
905 int err;
906
907 CDEBUG(D_VFSTRACE,
908 "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%.*s\n",
909 src->i_ino, src->i_generation, src, dir->i_ino,
910 dir->i_generation, dir, name->len, name->name);
911
912 op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
913 0, LUSTRE_OPC_ANY, NULL);
914 if (IS_ERR(op_data))
915 return PTR_ERR(op_data);
916
917 err = md_link(sbi->ll_md_exp, op_data, &request);
918 ll_finish_md_op_data(op_data);
919 if (err)
920 goto out;
921
922 ll_update_times(request, dir);
923 ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
924out:
925 ptlrpc_req_finished(request);
926 return err;
927}
928
929static int ll_mkdir_generic(struct inode *dir, struct qstr *name,
930 int mode, struct dentry *dchild)
931
932{
933 int err;
934
935 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
936 name->len, name->name, dir->i_ino, dir->i_generation, dir);
937
938 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
939 mode &= ~current_umask();
940 mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
941 err = ll_new_node(dir, name, NULL, mode, 0, dchild, LUSTRE_OPC_MKDIR);
942
943 if (!err)
944 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
945
946 return err;
947}
948
949/* Try to find the child dentry by its name.
950 If found, put the result fid into @fid. */
951static void ll_get_child_fid(struct inode * dir, struct qstr *name,
952 struct lu_fid *fid)
953{
954 struct dentry *parent, *child;
955
956 parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
957 child = d_lookup(parent, name);
958 if (child) {
959 if (child->d_inode)
960 *fid = *ll_inode2fid(child->d_inode);
961 dput(child);
962 }
963}
964
965static int ll_rmdir_generic(struct inode *dir, struct dentry *dparent,
966 struct dentry *dchild, struct qstr *name)
967{
968 struct ptlrpc_request *request = NULL;
969 struct md_op_data *op_data;
970 int rc;
971
972 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
973 name->len, name->name, dir->i_ino, dir->i_generation, dir);
974
975 if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
976 return -EBUSY;
977
978 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, name->len,
979 S_IFDIR, LUSTRE_OPC_ANY, NULL);
980 if (IS_ERR(op_data))
981 return PTR_ERR(op_data);
982
983 ll_get_child_fid(dir, name, &op_data->op_fid3);
984 op_data->op_fid2 = op_data->op_fid3;
985 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
986 ll_finish_md_op_data(op_data);
987 if (rc == 0) {
988 ll_update_times(request, dir);
989 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
990 }
991
992 ptlrpc_req_finished(request);
993 return rc;
994} 865}
995 866
996/** 867/**
@@ -1099,32 +970,26 @@ out:
1099 return rc; 970 return rc;
1100} 971}
1101 972
1102/* ll_unlink_generic() doesn't update the inode with the new link count. 973/* ll_unlink() doesn't update the inode with the new link count.
1103 * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there 974 * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
1104 * is any lock existing. They will recycle dentries and inodes based upon locks 975 * is any lock existing. They will recycle dentries and inodes based upon locks
1105 * too. b=20433 */ 976 * too. b=20433 */
1106static int ll_unlink_generic(struct inode *dir, struct dentry *dparent, 977static int ll_unlink(struct inode * dir, struct dentry *dentry)
1107 struct dentry *dchild, struct qstr *name)
1108{ 978{
1109 struct ptlrpc_request *request = NULL; 979 struct ptlrpc_request *request = NULL;
1110 struct md_op_data *op_data; 980 struct md_op_data *op_data;
1111 int rc; 981 int rc;
1112 CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n", 982 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
1113 name->len, name->name, dir->i_ino, dir->i_generation, dir); 983 dentry, dir->i_ino, dir->i_generation, dir);
1114
1115 /*
1116 * XXX: unlink bind mountpoint maybe call to here,
1117 * just check it as vfs_unlink does.
1118 */
1119 if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
1120 return -EBUSY;
1121 984
1122 op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, 985 op_data = ll_prep_md_op_data(NULL, dir, NULL,
1123 name->len, 0, LUSTRE_OPC_ANY, NULL); 986 dentry->d_name.name,
987 dentry->d_name.len,
988 0, LUSTRE_OPC_ANY, NULL);
1124 if (IS_ERR(op_data)) 989 if (IS_ERR(op_data))
1125 return PTR_ERR(op_data); 990 return PTR_ERR(op_data);
1126 991
1127 ll_get_child_fid(dir, name, &op_data->op_fid3); 992 ll_get_child_fid(dentry, &op_data->op_fid3);
1128 op_data->op_fid2 = op_data->op_fid3; 993 op_data->op_fid2 = op_data->op_fid3;
1129 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request); 994 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
1130 ll_finish_md_op_data(op_data); 995 ll_finish_md_op_data(op_data);
@@ -1140,95 +1005,140 @@ static int ll_unlink_generic(struct inode *dir, struct dentry *dparent,
1140 return rc; 1005 return rc;
1141} 1006}
1142 1007
1143static int ll_rename_generic(struct inode *src, struct dentry *src_dparent, 1008static int ll_mkdir(struct inode *dir, struct dentry *dentry, ll_umode_t mode)
1144 struct dentry *src_dchild, struct qstr *src_name,
1145 struct inode *tgt, struct dentry *tgt_dparent,
1146 struct dentry *tgt_dchild, struct qstr *tgt_name)
1147{ 1009{
1148 struct ptlrpc_request *request = NULL;
1149 struct ll_sb_info *sbi = ll_i2sbi(src);
1150 struct md_op_data *op_data;
1151 int err; 1010 int err;
1152 1011
1153 CDEBUG(D_VFSTRACE, 1012 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
1154 "VFS Op:oldname=%.*s,src_dir=%lu/%u(%p),newname=%.*s," 1013 dentry, dir->i_ino, dir->i_generation, dir);
1155 "tgt_dir=%lu/%u(%p)\n", src_name->len, src_name->name,
1156 src->i_ino, src->i_generation, src, tgt_name->len,
1157 tgt_name->name, tgt->i_ino, tgt->i_generation, tgt);
1158 1014
1159 if (unlikely(ll_d_mountpoint(src_dparent, src_dchild, src_name) || 1015 if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
1160 ll_d_mountpoint(tgt_dparent, tgt_dchild, tgt_name))) 1016 mode &= ~current_umask();
1161 return -EBUSY; 1017 mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
1018 err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
1162 1019
1163 op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0, 1020 if (!err)
1164 LUSTRE_OPC_ANY, NULL); 1021 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
1022
1023 return err;
1024}
1025
1026static int ll_rmdir(struct inode *dir, struct dentry *dentry)
1027{
1028 struct ptlrpc_request *request = NULL;
1029 struct md_op_data *op_data;
1030 int rc;
1031
1032 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
1033 dentry, dir->i_ino, dir->i_generation, dir);
1034
1035 op_data = ll_prep_md_op_data(NULL, dir, NULL,
1036 dentry->d_name.name,
1037 dentry->d_name.len,
1038 S_IFDIR, LUSTRE_OPC_ANY, NULL);
1165 if (IS_ERR(op_data)) 1039 if (IS_ERR(op_data))
1166 return PTR_ERR(op_data); 1040 return PTR_ERR(op_data);
1167 1041
1168 ll_get_child_fid(src, src_name, &op_data->op_fid3); 1042 ll_get_child_fid(dentry, &op_data->op_fid3);
1169 ll_get_child_fid(tgt, tgt_name, &op_data->op_fid4); 1043 op_data->op_fid2 = op_data->op_fid3;
1170 err = md_rename(sbi->ll_md_exp, op_data, 1044 rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
1171 src_name->name, src_name->len,
1172 tgt_name->name, tgt_name->len, &request);
1173 ll_finish_md_op_data(op_data); 1045 ll_finish_md_op_data(op_data);
1174 if (!err) { 1046 if (rc == 0) {
1175 ll_update_times(request, src); 1047 ll_update_times(request, dir);
1176 ll_update_times(request, tgt); 1048 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
1177 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
1178 err = ll_objects_destroy(request, src);
1179 } 1049 }
1180 1050
1181 ptlrpc_req_finished(request); 1051 ptlrpc_req_finished(request);
1182 1052 return rc;
1183 return err;
1184} 1053}
1185 1054
1186static int ll_mknod(struct inode *dir, struct dentry *dchild, ll_umode_t mode, 1055static int ll_symlink(struct inode *dir, struct dentry *dentry,
1187 dev_t rdev) 1056 const char *oldname)
1188{ 1057{
1189 return ll_mknod_generic(dir, &dchild->d_name, mode, 1058 int err;
1190 old_encode_dev(rdev), dchild);
1191}
1192 1059
1193static int ll_unlink(struct inode * dir, struct dentry *dentry) 1060 CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n",
1194{ 1061 dentry, dir->i_ino, dir->i_generation,
1195 return ll_unlink_generic(dir, NULL, dentry, &dentry->d_name); 1062 dir, 3000, oldname);
1196}
1197 1063
1198static int ll_mkdir(struct inode *dir, struct dentry *dentry, ll_umode_t mode) 1064 err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
1199{ 1065 0, LUSTRE_OPC_SYMLINK);
1200 return ll_mkdir_generic(dir, &dentry->d_name, mode, dentry);
1201}
1202 1066
1203static int ll_rmdir(struct inode *dir, struct dentry *dentry) 1067 if (!err)
1204{ 1068 ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
1205 return ll_rmdir_generic(dir, NULL, dentry, &dentry->d_name);
1206}
1207 1069
1208static int ll_symlink(struct inode *dir, struct dentry *dentry, 1070 return err;
1209 const char *oldname)
1210{
1211 return ll_symlink_generic(dir, &dentry->d_name, oldname, dentry);
1212} 1071}
1213 1072
1214static int ll_link(struct dentry *old_dentry, struct inode *dir, 1073static int ll_link(struct dentry *old_dentry, struct inode *dir,
1215 struct dentry *new_dentry) 1074 struct dentry *new_dentry)
1216{ 1075{
1217 return ll_link_generic(old_dentry->d_inode, dir, &new_dentry->d_name, 1076 struct inode *src = old_dentry->d_inode;
1218 new_dentry); 1077 struct ll_sb_info *sbi = ll_i2sbi(dir);
1078 struct ptlrpc_request *request = NULL;
1079 struct md_op_data *op_data;
1080 int err;
1081
1082 CDEBUG(D_VFSTRACE,
1083 "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n",
1084 src->i_ino, src->i_generation, src, dir->i_ino,
1085 dir->i_generation, dir, new_dentry);
1086
1087 op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
1088 new_dentry->d_name.len,
1089 0, LUSTRE_OPC_ANY, NULL);
1090 if (IS_ERR(op_data))
1091 return PTR_ERR(op_data);
1092
1093 err = md_link(sbi->ll_md_exp, op_data, &request);
1094 ll_finish_md_op_data(op_data);
1095 if (err)
1096 goto out;
1097
1098 ll_update_times(request, dir);
1099 ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
1100out:
1101 ptlrpc_req_finished(request);
1102 return err;
1219} 1103}
1220 1104
1221static int ll_rename(struct inode *old_dir, struct dentry *old_dentry, 1105static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
1222 struct inode *new_dir, struct dentry *new_dentry) 1106 struct inode *new_dir, struct dentry *new_dentry)
1223{ 1107{
1108 struct ptlrpc_request *request = NULL;
1109 struct ll_sb_info *sbi = ll_i2sbi(old_dir);
1110 struct md_op_data *op_data;
1224 int err; 1111 int err;
1225 err = ll_rename_generic(old_dir, NULL, 1112
1226 old_dentry, &old_dentry->d_name, 1113 CDEBUG(D_VFSTRACE,
1227 new_dir, NULL, new_dentry, 1114 "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,"
1228 &new_dentry->d_name); 1115 "tgt_dir=%lu/%u(%p)\n", old_dentry,
1116 old_dir->i_ino, old_dir->i_generation, old_dir, new_dentry,
1117 new_dir->i_ino, new_dir->i_generation, new_dir);
1118
1119 op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0,
1120 LUSTRE_OPC_ANY, NULL);
1121 if (IS_ERR(op_data))
1122 return PTR_ERR(op_data);
1123
1124 ll_get_child_fid(old_dentry, &op_data->op_fid3);
1125 ll_get_child_fid(new_dentry, &op_data->op_fid4);
1126 err = md_rename(sbi->ll_md_exp, op_data,
1127 old_dentry->d_name.name,
1128 old_dentry->d_name.len,
1129 new_dentry->d_name.name,
1130 new_dentry->d_name.len, &request);
1131 ll_finish_md_op_data(op_data);
1229 if (!err) { 1132 if (!err) {
1230 d_move(old_dentry, new_dentry); 1133 ll_update_times(request, old_dir);
1134 ll_update_times(request, new_dir);
1135 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
1136 err = ll_objects_destroy(request, old_dir);
1231 } 1137 }
1138
1139 ptlrpc_req_finished(request);
1140 if (!err)
1141 d_move(old_dentry, new_dentry);
1232 return err; 1142 return err;
1233} 1143}
1234 1144
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 06b71bcf97a7..09d965e76842 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -969,8 +969,8 @@ static int ll_agl_thread(void *arg)
969 struct l_wait_info lwi = { 0 }; 969 struct l_wait_info lwi = { 0 };
970 970
971 thread->t_pid = current_pid(); 971 thread->t_pid = current_pid();
972 CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n", 972 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
973 sai, parent->d_name.len, parent->d_name.name); 973 sai, parent);
974 974
975 atomic_inc(&sbi->ll_agl_total); 975 atomic_inc(&sbi->ll_agl_total);
976 spin_lock(&plli->lli_agl_lock); 976 spin_lock(&plli->lli_agl_lock);
@@ -1019,8 +1019,8 @@ static int ll_agl_thread(void *arg)
1019 spin_unlock(&plli->lli_agl_lock); 1019 spin_unlock(&plli->lli_agl_lock);
1020 wake_up(&thread->t_ctl_waitq); 1020 wake_up(&thread->t_ctl_waitq);
1021 ll_sai_put(sai); 1021 ll_sai_put(sai);
1022 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n", 1022 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1023 sai, parent->d_name.len, parent->d_name.name); 1023 sai, parent);
1024 return 0; 1024 return 0;
1025} 1025}
1026 1026
@@ -1031,8 +1031,8 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1031 struct ll_inode_info *plli; 1031 struct ll_inode_info *plli;
1032 struct task_struct *task; 1032 struct task_struct *task;
1033 1033
1034 CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n", 1034 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1035 sai, parent->d_name.len, parent->d_name.name); 1035 sai, parent);
1036 1036
1037 plli = ll_i2info(parent->d_inode); 1037 plli = ll_i2info(parent->d_inode);
1038 task = kthread_run(ll_agl_thread, parent, 1038 task = kthread_run(ll_agl_thread, parent,
@@ -1066,8 +1066,8 @@ static int ll_statahead_thread(void *arg)
1066 struct l_wait_info lwi = { 0 }; 1066 struct l_wait_info lwi = { 0 };
1067 1067
1068 thread->t_pid = current_pid(); 1068 thread->t_pid = current_pid();
1069 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n", 1069 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1070 sai, parent->d_name.len, parent->d_name.name); 1070 sai, parent);
1071 1071
1072 if (sbi->ll_flags & LL_SBI_AGL_ENABLED) 1072 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1073 ll_start_agl(parent, sai); 1073 ll_start_agl(parent, sai);
@@ -1288,8 +1288,8 @@ out:
1288 wake_up(&thread->t_ctl_waitq); 1288 wake_up(&thread->t_ctl_waitq);
1289 ll_sai_put(sai); 1289 ll_sai_put(sai);
1290 dput(parent); 1290 dput(parent);
1291 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n", 1291 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1292 sai, parent->d_name.len, parent->d_name.name); 1292 sai, parent);
1293 return rc; 1293 return rc;
1294} 1294}
1295 1295
@@ -1612,10 +1612,9 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1612 } else if ((*dentryp)->d_inode != inode) { 1612 } else if ((*dentryp)->d_inode != inode) {
1613 /* revalidate, but inode is recreated */ 1613 /* revalidate, but inode is recreated */
1614 CDEBUG(D_READA, 1614 CDEBUG(D_READA,
1615 "stale dentry %.*s inode %lu/%u, " 1615 "stale dentry %pd inode %lu/%u, "
1616 "statahead inode %lu/%u\n", 1616 "statahead inode %lu/%u\n",
1617 (*dentryp)->d_name.len, 1617 *dentryp,
1618 (*dentryp)->d_name.name,
1619 (*dentryp)->d_inode->i_ino, 1618 (*dentryp)->d_inode->i_ino,
1620 (*dentryp)->d_inode->i_generation, 1619 (*dentryp)->d_inode->i_generation,
1621 inode->i_ino, 1620 inode->i_ino,
@@ -1666,9 +1665,9 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1666 if (unlikely(sai->sai_inode != parent->d_inode)) { 1665 if (unlikely(sai->sai_inode != parent->d_inode)) {
1667 struct ll_inode_info *nlli = ll_i2info(parent->d_inode); 1666 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1668 1667
1669 CWARN("Race condition, someone changed %.*s just now: " 1668 CWARN("Race condition, someone changed %pd just now: "
1670 "old parent "DFID", new parent "DFID"\n", 1669 "old parent "DFID", new parent "DFID"\n",
1671 (*dentryp)->d_name.len, (*dentryp)->d_name.name, 1670 *dentryp,
1672 PFID(&lli->lli_fid), PFID(&nlli->lli_fid)); 1671 PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1673 dput(parent); 1672 dput(parent);
1674 iput(sai->sai_inode); 1673 iput(sai->sai_inode);
@@ -1676,8 +1675,8 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1676 goto out; 1675 goto out;
1677 } 1676 }
1678 1677
1679 CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n", 1678 CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1680 sai, parent->d_name.len, parent->d_name.name); 1679 sai, parent);
1681 1680
1682 /* The sai buffer already has one reference taken at allocation time, 1681 /* The sai buffer already has one reference taken at allocation time,
1683 * but as soon as we expose the sai by attaching it to the lli that 1682 * but as soon as we expose the sai by attaching it to the lli that
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index d3f967a78138..e540a6d286f8 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -108,7 +108,7 @@ static int vvp_io_fault_iter_init(const struct lu_env *env,
108 struct inode *inode = ccc_object_inode(ios->cis_obj); 108 struct inode *inode = ccc_object_inode(ios->cis_obj);
109 109
110 LASSERT(inode == 110 LASSERT(inode ==
111 cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode); 111 file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
112 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime); 112 vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
113 return 0; 113 return 0;
114} 114}
@@ -239,7 +239,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
239 239
240 down_read(&mm->mmap_sem); 240 down_read(&mm->mmap_sem);
241 while ((vma = our_vma(mm, addr, count)) != NULL) { 241 while ((vma = our_vma(mm, addr, count)) != NULL) {
242 struct inode *inode = vma->vm_file->f_dentry->d_inode; 242 struct inode *inode = file_inode(vma->vm_file);
243 int flags = CEF_MUST; 243 int flags = CEF_MUST;
244 244
245 if (ll_file_nolock(vma->vm_file)) { 245 if (ll_file_nolock(vma->vm_file)) {
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 252a6194ed9b..3151baf5585c 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -241,14 +241,11 @@ int ll_setxattr(struct dentry *dentry, const char *name,
241 lump->lmm_stripe_offset = -1; 241 lump->lmm_stripe_offset = -1;
242 242
243 if (lump != NULL && S_ISREG(inode->i_mode)) { 243 if (lump != NULL && S_ISREG(inode->i_mode)) {
244 struct file f;
245 int flags = FMODE_WRITE; 244 int flags = FMODE_WRITE;
246 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? 245 int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
247 sizeof(*lump) : sizeof(struct lov_user_md_v3); 246 sizeof(*lump) : sizeof(struct lov_user_md_v3);
248 247
249 memset(&f, 0, sizeof(f)); /* f.f_flags is used below */ 248 rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump,
250 f.f_dentry = dentry;
251 rc = ll_lov_setstripe_ea_info(inode, &f, flags, lump,
252 lum_size); 249 lum_size);
253 /* b10667: rc always be 0 here for now */ 250 /* b10667: rc always be 0 here for now */
254 rc = 0; 251 rc = 0;
@@ -519,8 +516,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name,
519 } 516 }
520 517
521 if (size < lmmsize) { 518 if (size < lmmsize) {
522 CERROR("server bug: replied size %d > %d for %s (%s)\n", 519 CERROR("server bug: replied size %d > %d for %pd (%s)\n",
523 lmmsize, (int)size, dentry->d_name.name, name); 520 lmmsize, (int)size, dentry, name);
524 rc = -ERANGE; 521 rc = -ERANGE;
525 goto out; 522 goto out;
526 } 523 }
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 655cf5037b0b..96498b7fc20e 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,7 +27,9 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
27 27
28source "drivers/staging/media/dt3155v4l/Kconfig" 28source "drivers/staging/media/dt3155v4l/Kconfig"
29 29
30source "drivers/staging/media/omap24xx/Kconfig" 30source "drivers/staging/media/mn88472/Kconfig"
31
32source "drivers/staging/media/mn88473/Kconfig"
31 33
32source "drivers/staging/media/omap4iss/Kconfig" 34source "drivers/staging/media/omap4iss/Kconfig"
33 35
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 6dbe578178cd..30fb352fc4a9 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,6 +4,6 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
4obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ 4obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
5obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ 5obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
6obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ 6obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
7obj-$(CONFIG_VIDEO_OMAP2) += omap24xx/ 7obj-$(CONFIG_DVB_MN88472) += mn88472/
8obj-$(CONFIG_VIDEO_TCM825X) += omap24xx/ 8obj-$(CONFIG_DVB_MN88473) += mn88473/
9 9
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 2bba370a47ca..60a57b2a8fb2 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2327,9 +2327,10 @@ static int bcm2048_vidioc_querycap(struct file *file, void *priv,
2327 strlcpy(capability->card, BCM2048_DRIVER_CARD, 2327 strlcpy(capability->card, BCM2048_DRIVER_CARD,
2328 sizeof(capability->card)); 2328 sizeof(capability->card));
2329 snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr); 2329 snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
2330 capability->version = BCM2048_DRIVER_VERSION; 2330 capability->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
2331 capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
2332 V4L2_CAP_HW_FREQ_SEEK; 2331 V4L2_CAP_HW_FREQ_SEEK;
2332 capability->capabilities = capability->device_caps |
2333 V4L2_CAP_DEVICE_CAPS;
2333 2334
2334 return 0; 2335 return 0;
2335} 2336}
@@ -2707,7 +2708,7 @@ static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
2707 * bcm2048_i2c_driver - i2c driver interface 2708 * bcm2048_i2c_driver - i2c driver interface
2708 */ 2709 */
2709static const struct i2c_device_id bcm2048_id[] = { 2710static const struct i2c_device_id bcm2048_id[] = {
2710 { "bcm2048" , 0 }, 2711 { "bcm2048", 0 },
2711 { }, 2712 { },
2712}; 2713};
2713MODULE_DEVICE_TABLE(i2c, bcm2048_id); 2714MODULE_DEVICE_TABLE(i2c, bcm2048_id);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index bdc7f005b3ba..704fa202ee18 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -37,15 +37,15 @@
37 37
38/* ipipe input format's */ 38/* ipipe input format's */
39static const unsigned int ipipe_input_fmts[] = { 39static const unsigned int ipipe_input_fmts[] = {
40 V4L2_MBUS_FMT_UYVY8_2X8, 40 MEDIA_BUS_FMT_UYVY8_2X8,
41 V4L2_MBUS_FMT_SGRBG12_1X12, 41 MEDIA_BUS_FMT_SGRBG12_1X12,
42 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 42 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
43 V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8, 43 MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
44}; 44};
45 45
46/* ipipe output format's */ 46/* ipipe output format's */
47static const unsigned int ipipe_output_fmts[] = { 47static const unsigned int ipipe_output_fmts[] = {
48 V4L2_MBUS_FMT_UYVY8_2X8, 48 MEDIA_BUS_FMT_UYVY8_2X8,
49}; 49};
50 50
51static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc) 51static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
@@ -1457,7 +1457,7 @@ ipipe_try_format(struct vpfe_ipipe_device *ipipe,
1457 1457
1458 /* If not found, use SBGGR10 as default */ 1458 /* If not found, use SBGGR10 as default */
1459 if (i >= ARRAY_SIZE(ipipe_input_fmts)) 1459 if (i >= ARRAY_SIZE(ipipe_input_fmts))
1460 fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12; 1460 fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
1461 } else if (pad == IPIPE_PAD_SOURCE) { 1461 } else if (pad == IPIPE_PAD_SOURCE) {
1462 for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++) 1462 for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
1463 if (fmt->code == ipipe_output_fmts[i]) 1463 if (fmt->code == ipipe_output_fmts[i])
@@ -1465,7 +1465,7 @@ ipipe_try_format(struct vpfe_ipipe_device *ipipe,
1465 1465
1466 /* If not found, use UYVY as default */ 1466 /* If not found, use UYVY as default */
1467 if (i >= ARRAY_SIZE(ipipe_output_fmts)) 1467 if (i >= ARRAY_SIZE(ipipe_output_fmts))
1468 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 1468 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
1469 } 1469 }
1470 1470
1471 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width); 1471 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
@@ -1642,7 +1642,7 @@ ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1642 memset(&format, 0, sizeof(format)); 1642 memset(&format, 0, sizeof(format));
1643 format.pad = IPIPE_PAD_SINK; 1643 format.pad = IPIPE_PAD_SINK;
1644 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1644 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1645 format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12; 1645 format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
1646 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A; 1646 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
1647 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A; 1647 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
1648 ipipe_set_format(sd, fh, &format); 1648 ipipe_set_format(sd, fh, &format);
@@ -1650,7 +1650,7 @@ ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1650 memset(&format, 0, sizeof(format)); 1650 memset(&format, 0, sizeof(format));
1651 format.pad = IPIPE_PAD_SOURCE; 1651 format.pad = IPIPE_PAD_SOURCE;
1652 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1652 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1653 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 1653 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
1654 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A; 1654 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
1655 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A; 1655 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
1656 ipipe_set_format(sd, fh, &format); 1656 ipipe_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index b2daf5e63f88..6461de1a61fd 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -196,12 +196,12 @@ ipipe_setup_resizer(void *__iomem rsz_base, struct resizer_params *params)
196 rsz_set_rsz_regs(rsz_base, RSZ_B, params); 196 rsz_set_rsz_regs(rsz_base, RSZ_B, params);
197} 197}
198 198
199static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix) 199static u32 ipipe_get_color_pat(u32 pix)
200{ 200{
201 switch (pix) { 201 switch (pix) {
202 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 202 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
203 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 203 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
204 case V4L2_MBUS_FMT_SGRBG12_1X12: 204 case MEDIA_BUS_FMT_SGRBG12_1X12:
205 return ipipe_sgrbg_pattern; 205 return ipipe_sgrbg_pattern;
206 206
207 default: 207 default:
@@ -211,23 +211,23 @@ static u32 ipipe_get_color_pat(enum v4l2_mbus_pixelcode pix)
211 211
212static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe) 212static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
213{ 213{
214 enum v4l2_mbus_pixelcode temp_pix_fmt; 214 u32 temp_pix_fmt;
215 215
216 switch (ipipe->formats[IPIPE_PAD_SINK].code) { 216 switch (ipipe->formats[IPIPE_PAD_SINK].code) {
217 case V4L2_MBUS_FMT_SBGGR8_1X8: 217 case MEDIA_BUS_FMT_SBGGR8_1X8:
218 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 218 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
219 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 219 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
220 case V4L2_MBUS_FMT_SGRBG12_1X12: 220 case MEDIA_BUS_FMT_SGRBG12_1X12:
221 temp_pix_fmt = V4L2_MBUS_FMT_SGRBG12_1X12; 221 temp_pix_fmt = MEDIA_BUS_FMT_SGRBG12_1X12;
222 break; 222 break;
223 223
224 default: 224 default:
225 temp_pix_fmt = V4L2_MBUS_FMT_UYVY8_2X8; 225 temp_pix_fmt = MEDIA_BUS_FMT_UYVY8_2X8;
226 } 226 }
227 227
228 if (temp_pix_fmt == V4L2_MBUS_FMT_SGRBG12_1X12) { 228 if (temp_pix_fmt == MEDIA_BUS_FMT_SGRBG12_1X12) {
229 if (ipipe->formats[IPIPE_PAD_SOURCE].code == 229 if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
230 V4L2_MBUS_FMT_SGRBG12_1X12) 230 MEDIA_BUS_FMT_SGRBG12_1X12)
231 return IPIPE_RAW2RAW; 231 return IPIPE_RAW2RAW;
232 return IPIPE_RAW2YUV; 232 return IPIPE_RAW2YUV;
233 } 233 }
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 6d4893b44c1f..a86f16ff5818 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -23,42 +23,42 @@
23#include "vpfe_mc_capture.h" 23#include "vpfe_mc_capture.h"
24 24
25static const unsigned int ipipeif_input_fmts[] = { 25static const unsigned int ipipeif_input_fmts[] = {
26 V4L2_MBUS_FMT_UYVY8_2X8, 26 MEDIA_BUS_FMT_UYVY8_2X8,
27 V4L2_MBUS_FMT_SGRBG12_1X12, 27 MEDIA_BUS_FMT_SGRBG12_1X12,
28 V4L2_MBUS_FMT_Y8_1X8, 28 MEDIA_BUS_FMT_Y8_1X8,
29 V4L2_MBUS_FMT_UV8_1X8, 29 MEDIA_BUS_FMT_UV8_1X8,
30 V4L2_MBUS_FMT_YDYUYDYV8_1X16, 30 MEDIA_BUS_FMT_YDYUYDYV8_1X16,
31 V4L2_MBUS_FMT_SBGGR8_1X8, 31 MEDIA_BUS_FMT_SBGGR8_1X8,
32}; 32};
33 33
34static const unsigned int ipipeif_output_fmts[] = { 34static const unsigned int ipipeif_output_fmts[] = {
35 V4L2_MBUS_FMT_UYVY8_2X8, 35 MEDIA_BUS_FMT_UYVY8_2X8,
36 V4L2_MBUS_FMT_SGRBG12_1X12, 36 MEDIA_BUS_FMT_SGRBG12_1X12,
37 V4L2_MBUS_FMT_Y8_1X8, 37 MEDIA_BUS_FMT_Y8_1X8,
38 V4L2_MBUS_FMT_UV8_1X8, 38 MEDIA_BUS_FMT_UV8_1X8,
39 V4L2_MBUS_FMT_YDYUYDYV8_1X16, 39 MEDIA_BUS_FMT_YDYUYDYV8_1X16,
40 V4L2_MBUS_FMT_SBGGR8_1X8, 40 MEDIA_BUS_FMT_SBGGR8_1X8,
41 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 41 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
42 V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8, 42 MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
43}; 43};
44 44
45static int 45static int
46ipipeif_get_pack_mode(enum v4l2_mbus_pixelcode in_pix_fmt) 46ipipeif_get_pack_mode(u32 in_pix_fmt)
47{ 47{
48 switch (in_pix_fmt) { 48 switch (in_pix_fmt) {
49 case V4L2_MBUS_FMT_SBGGR8_1X8: 49 case MEDIA_BUS_FMT_SBGGR8_1X8:
50 case V4L2_MBUS_FMT_Y8_1X8: 50 case MEDIA_BUS_FMT_Y8_1X8:
51 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 51 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
52 case V4L2_MBUS_FMT_UV8_1X8: 52 case MEDIA_BUS_FMT_UV8_1X8:
53 return IPIPEIF_5_1_PACK_8_BIT; 53 return IPIPEIF_5_1_PACK_8_BIT;
54 54
55 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 55 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
56 return IPIPEIF_5_1_PACK_8_BIT_A_LAW; 56 return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
57 57
58 case V4L2_MBUS_FMT_SGRBG12_1X12: 58 case MEDIA_BUS_FMT_SGRBG12_1X12:
59 return IPIPEIF_5_1_PACK_16_BIT; 59 return IPIPEIF_5_1_PACK_16_BIT;
60 60
61 case V4L2_MBUS_FMT_SBGGR12_1X12: 61 case MEDIA_BUS_FMT_SBGGR12_1X12:
62 return IPIPEIF_5_1_PACK_12_BIT; 62 return IPIPEIF_5_1_PACK_12_BIT;
63 63
64 default: 64 default:
@@ -107,8 +107,8 @@ ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
107 107
108 informat = &ipipeif->formats[IPIPEIF_PAD_SINK]; 108 informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
109 if (ipipeif->input == IPIPEIF_INPUT_MEMORY && 109 if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
110 (informat->code == V4L2_MBUS_FMT_Y8_1X8 || 110 (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
111 informat->code == V4L2_MBUS_FMT_UV8_1X8)) 111 informat->code == MEDIA_BUS_FMT_UV8_1X8))
112 return IPIPEIF_CCDC; 112 return IPIPEIF_CCDC;
113 113
114 return IPIPEIF_SRC1_PARALLEL_PORT; 114 return IPIPEIF_SRC1_PARALLEL_PORT;
@@ -122,11 +122,11 @@ ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
122 informat = &ipipeif->formats[IPIPEIF_PAD_SINK]; 122 informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
123 123
124 switch (informat->code) { 124 switch (informat->code) {
125 case V4L2_MBUS_FMT_SGRBG12_1X12: 125 case MEDIA_BUS_FMT_SGRBG12_1X12:
126 return IPIPEIF_5_1_BITS11_0; 126 return IPIPEIF_5_1_BITS11_0;
127 127
128 case V4L2_MBUS_FMT_Y8_1X8: 128 case MEDIA_BUS_FMT_Y8_1X8:
129 case V4L2_MBUS_FMT_UV8_1X8: 129 case MEDIA_BUS_FMT_UV8_1X8:
130 return IPIPEIF_5_1_BITS11_0; 130 return IPIPEIF_5_1_BITS11_0;
131 131
132 default: 132 default:
@@ -143,7 +143,7 @@ ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
143 if (ipipeif->input == IPIPEIF_INPUT_ISIF) 143 if (ipipeif->input == IPIPEIF_INPUT_ISIF)
144 return IPIPEIF_CCDC; 144 return IPIPEIF_CCDC;
145 145
146 if (informat->code == V4L2_MBUS_FMT_UYVY8_2X8) 146 if (informat->code == MEDIA_BUS_FMT_UYVY8_2X8)
147 return IPIPEIF_SDRAM_YUV; 147 return IPIPEIF_SDRAM_YUV;
148 148
149 return IPIPEIF_SDRAM_RAW; 149 return IPIPEIF_SDRAM_RAW;
@@ -190,7 +190,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
190 struct v4l2_mbus_framefmt *informat, *outformat; 190 struct v4l2_mbus_framefmt *informat, *outformat;
191 struct ipipeif_params params = ipipeif->config; 191 struct ipipeif_params params = ipipeif->config;
192 enum ipipeif_input_source ipipeif_source; 192 enum ipipeif_input_source ipipeif_source;
193 enum v4l2_mbus_pixelcode isif_port_if; 193 u32 isif_port_if;
194 void *ipipeif_base_addr; 194 void *ipipeif_base_addr;
195 unsigned int val; 195 unsigned int val;
196 int data_shift; 196 int data_shift;
@@ -268,16 +268,16 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
268 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ); 268 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
269 isif_port_if = informat->code; 269 isif_port_if = informat->code;
270 270
271 if (isif_port_if == V4L2_MBUS_FMT_Y8_1X8) 271 if (isif_port_if == MEDIA_BUS_FMT_Y8_1X8)
272 isif_port_if = V4L2_MBUS_FMT_YUYV8_1X16; 272 isif_port_if = MEDIA_BUS_FMT_YUYV8_1X16;
273 else if (isif_port_if == V4L2_MBUS_FMT_UV8_1X8) 273 else if (isif_port_if == MEDIA_BUS_FMT_UV8_1X8)
274 isif_port_if = V4L2_MBUS_FMT_SGRBG12_1X12; 274 isif_port_if = MEDIA_BUS_FMT_SGRBG12_1X12;
275 275
276 /* Enable DPCM decompression */ 276 /* Enable DPCM decompression */
277 switch (ipipeif_source) { 277 switch (ipipeif_source) {
278 case IPIPEIF_SDRAM_RAW: 278 case IPIPEIF_SDRAM_RAW:
279 val = 0; 279 val = 0;
280 if (outformat->code == V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8) { 280 if (outformat->code == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) {
281 val = 1; 281 val = 1;
282 val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) << 282 val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
283 IPIPEIF_DPCM_BITS_SHIFT; 283 IPIPEIF_DPCM_BITS_SHIFT;
@@ -296,9 +296,9 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
296 /* configure CFG2 */ 296 /* configure CFG2 */
297 val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2); 297 val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
298 switch (isif_port_if) { 298 switch (isif_port_if) {
299 case V4L2_MBUS_FMT_YUYV8_1X16: 299 case MEDIA_BUS_FMT_YUYV8_1X16:
300 case V4L2_MBUS_FMT_UYVY8_2X8: 300 case MEDIA_BUS_FMT_UYVY8_2X8:
301 case V4L2_MBUS_FMT_Y8_1X8: 301 case MEDIA_BUS_FMT_Y8_1X8:
302 RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT); 302 RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
303 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT); 303 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
304 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2); 304 ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
@@ -344,16 +344,16 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
344 val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT; 344 val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
345 345
346 switch (isif_port_if) { 346 switch (isif_port_if) {
347 case V4L2_MBUS_FMT_YUYV8_1X16: 347 case MEDIA_BUS_FMT_YUYV8_1X16:
348 case V4L2_MBUS_FMT_YUYV10_1X20: 348 case MEDIA_BUS_FMT_YUYV10_1X20:
349 RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT); 349 RESETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
350 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT); 350 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
351 break; 351 break;
352 352
353 case V4L2_MBUS_FMT_YUYV8_2X8: 353 case MEDIA_BUS_FMT_YUYV8_2X8:
354 case V4L2_MBUS_FMT_UYVY8_2X8: 354 case MEDIA_BUS_FMT_UYVY8_2X8:
355 case V4L2_MBUS_FMT_Y8_1X8: 355 case MEDIA_BUS_FMT_Y8_1X8:
356 case V4L2_MBUS_FMT_YUYV10_2X10: 356 case MEDIA_BUS_FMT_YUYV10_2X10:
357 SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT); 357 SETBIT(val, IPIPEIF_CFG2_YUV8_SHIFT);
358 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT); 358 SETBIT(val, IPIPEIF_CFG2_YUV16_SHIFT);
359 val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT; 359 val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
@@ -625,7 +625,7 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
625 625
626 /* If not found, use SBGGR10 as default */ 626 /* If not found, use SBGGR10 as default */
627 if (i >= ARRAY_SIZE(ipipeif_input_fmts)) 627 if (i >= ARRAY_SIZE(ipipeif_input_fmts))
628 fmt->code = V4L2_MBUS_FMT_SGRBG12_1X12; 628 fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
629 } else if (pad == IPIPEIF_PAD_SOURCE) { 629 } else if (pad == IPIPEIF_PAD_SOURCE) {
630 for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++) 630 for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
631 if (fmt->code == ipipeif_output_fmts[i]) 631 if (fmt->code == ipipeif_output_fmts[i])
@@ -633,7 +633,7 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
633 633
634 /* If not found, use UYVY as default */ 634 /* If not found, use UYVY as default */
635 if (i >= ARRAY_SIZE(ipipeif_output_fmts)) 635 if (i >= ARRAY_SIZE(ipipeif_output_fmts))
636 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 636 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
637 } 637 }
638 638
639 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width); 639 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
@@ -770,7 +770,7 @@ ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
770 memset(&format, 0, sizeof(format)); 770 memset(&format, 0, sizeof(format));
771 format.pad = IPIPEIF_PAD_SINK; 771 format.pad = IPIPEIF_PAD_SINK;
772 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 772 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
773 format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12; 773 format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
774 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A; 774 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
775 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A; 775 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
776 ipipeif_set_format(sd, fh, &format); 776 ipipeif_set_format(sd, fh, &format);
@@ -778,7 +778,7 @@ ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
778 memset(&format, 0, sizeof(format)); 778 memset(&format, 0, sizeof(format));
779 format.pad = IPIPEIF_PAD_SOURCE; 779 format.pad = IPIPEIF_PAD_SOURCE;
780 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 780 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
781 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 781 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
782 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A; 782 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
783 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A; 783 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
784 ipipeif_set_format(sd, fh, &format); 784 ipipeif_set_format(sd, fh, &format);
@@ -805,9 +805,9 @@ ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
805 return -EINVAL; 805 return -EINVAL;
806 806
807 switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) { 807 switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
808 case V4L2_MBUS_FMT_Y8_1X8: 808 case MEDIA_BUS_FMT_Y8_1X8:
809 case V4L2_MBUS_FMT_UV8_1X8: 809 case MEDIA_BUS_FMT_UV8_1X8:
810 case V4L2_MBUS_FMT_YDYUYDYV8_1X16: 810 case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
811 adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width; 811 adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
812 break; 812 break;
813 813
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 0d535b062e4e..fa26f63831b7 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -27,13 +27,13 @@
27#define MAX_HEIGHT 4096 27#define MAX_HEIGHT 4096
28 28
29static const unsigned int isif_fmts[] = { 29static const unsigned int isif_fmts[] = {
30 V4L2_MBUS_FMT_YUYV8_2X8, 30 MEDIA_BUS_FMT_YUYV8_2X8,
31 V4L2_MBUS_FMT_UYVY8_2X8, 31 MEDIA_BUS_FMT_UYVY8_2X8,
32 V4L2_MBUS_FMT_YUYV8_1X16, 32 MEDIA_BUS_FMT_YUYV8_1X16,
33 V4L2_MBUS_FMT_YUYV10_1X20, 33 MEDIA_BUS_FMT_YUYV10_1X20,
34 V4L2_MBUS_FMT_SGRBG12_1X12, 34 MEDIA_BUS_FMT_SGRBG12_1X12,
35 V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8, 35 MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
36 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 36 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
37}; 37};
38 38
39#define ISIF_COLPTN_R_Ye 0x0 39#define ISIF_COLPTN_R_Ye 0x0
@@ -154,7 +154,7 @@ enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
154static int 154static int
155isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt) 155isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
156{ 156{
157 if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) { 157 if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
158 if (pixfmt == V4L2_PIX_FMT_SBGGR16) 158 if (pixfmt == V4L2_PIX_FMT_SBGGR16)
159 isif->isif_cfg.data_pack = ISIF_PACK_16BIT; 159 isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
160 else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) || 160 else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
@@ -184,7 +184,7 @@ static int
184isif_set_frame_format(struct vpfe_isif_device *isif, 184isif_set_frame_format(struct vpfe_isif_device *isif,
185 enum isif_frmfmt frm_fmt) 185 enum isif_frmfmt frm_fmt)
186{ 186{
187 if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) 187 if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
188 isif->isif_cfg.bayer.frm_fmt = frm_fmt; 188 isif->isif_cfg.bayer.frm_fmt = frm_fmt;
189 else 189 else
190 isif->isif_cfg.ycbcr.frm_fmt = frm_fmt; 190 isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
@@ -196,7 +196,7 @@ static int isif_set_image_window(struct vpfe_isif_device *isif)
196{ 196{
197 struct v4l2_rect *win = &isif->crop; 197 struct v4l2_rect *win = &isif->crop;
198 198
199 if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) { 199 if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
200 isif->isif_cfg.bayer.win.top = win->top; 200 isif->isif_cfg.bayer.win.top = win->top;
201 isif->isif_cfg.bayer.win.left = win->left; 201 isif->isif_cfg.bayer.win.left = win->left;
202 isif->isif_cfg.bayer.win.width = win->width; 202 isif->isif_cfg.bayer.win.width = win->width;
@@ -214,7 +214,7 @@ static int isif_set_image_window(struct vpfe_isif_device *isif)
214static int 214static int
215isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type) 215isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
216{ 216{
217 if (isif->formats[ISIF_PAD_SINK].code == V4L2_MBUS_FMT_SGRBG12_1X12) 217 if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
218 isif->isif_cfg.bayer.buf_type = buf_type; 218 isif->isif_cfg.bayer.buf_type = buf_type;
219 else 219 else
220 isif->isif_cfg.ycbcr.buf_type = buf_type; 220 isif->isif_cfg.ycbcr.buf_type = buf_type;
@@ -296,7 +296,7 @@ isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_fh *fh,
296 296
297 /* If not found, use YUYV8_2x8 as default */ 297 /* If not found, use YUYV8_2x8 as default */
298 if (i >= ARRAY_SIZE(isif_fmts)) 298 if (i >= ARRAY_SIZE(isif_fmts))
299 fmt->format.code = V4L2_MBUS_FMT_YUYV8_2X8; 299 fmt->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
300 300
301 /* Clamp the size. */ 301 /* Clamp the size. */
302 fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH); 302 fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
@@ -429,7 +429,7 @@ static int isif_get_params(struct v4l2_subdev *sd, void *params)
429 struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd); 429 struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
430 430
431 /* only raw module parameters can be set through the IOCTL */ 431 /* only raw module parameters can be set through the IOCTL */
432 if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12) 432 if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
433 return -EINVAL; 433 return -EINVAL;
434 memcpy(params, &isif->isif_cfg.bayer.config_params, 434 memcpy(params, &isif->isif_cfg.bayer.config_params,
435 sizeof(isif->isif_cfg.bayer.config_params)); 435 sizeof(isif->isif_cfg.bayer.config_params));
@@ -604,7 +604,7 @@ static int isif_set_params(struct v4l2_subdev *sd, void *params)
604 int ret = -EINVAL; 604 int ret = -EINVAL;
605 605
606 /* only raw module parameters can be set through the IOCTL */ 606 /* only raw module parameters can be set through the IOCTL */
607 if (isif->formats[ISIF_PAD_SINK].code != V4L2_MBUS_FMT_SGRBG12_1X12) 607 if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
608 return ret; 608 return ret;
609 609
610 memcpy(&isif_raw_params, params, sizeof(isif_raw_params)); 610 memcpy(&isif_raw_params, params, sizeof(isif_raw_params));
@@ -1041,19 +1041,19 @@ isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
1041static int isif_get_pix_fmt(u32 mbus_code) 1041static int isif_get_pix_fmt(u32 mbus_code)
1042{ 1042{
1043 switch (mbus_code) { 1043 switch (mbus_code) {
1044 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 1044 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
1045 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 1045 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
1046 case V4L2_MBUS_FMT_SGRBG12_1X12: 1046 case MEDIA_BUS_FMT_SGRBG12_1X12:
1047 return ISIF_PIXFMT_RAW; 1047 return ISIF_PIXFMT_RAW;
1048 1048
1049 case V4L2_MBUS_FMT_YUYV8_2X8: 1049 case MEDIA_BUS_FMT_YUYV8_2X8:
1050 case V4L2_MBUS_FMT_UYVY8_2X8: 1050 case MEDIA_BUS_FMT_UYVY8_2X8:
1051 case V4L2_MBUS_FMT_YUYV10_2X10: 1051 case MEDIA_BUS_FMT_YUYV10_2X10:
1052 case V4L2_MBUS_FMT_Y8_1X8: 1052 case MEDIA_BUS_FMT_Y8_1X8:
1053 return ISIF_PIXFMT_YCBCR_8BIT; 1053 return ISIF_PIXFMT_YCBCR_8BIT;
1054 1054
1055 case V4L2_MBUS_FMT_YUYV8_1X16: 1055 case MEDIA_BUS_FMT_YUYV8_1X16:
1056 case V4L2_MBUS_FMT_YUYV10_1X20: 1056 case MEDIA_BUS_FMT_YUYV10_1X20:
1057 return ISIF_PIXFMT_YCBCR_16BIT; 1057 return ISIF_PIXFMT_YCBCR_16BIT;
1058 1058
1059 default: 1059 default:
@@ -1121,11 +1121,11 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
1121 ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt & 1121 ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
1122 ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT); 1122 ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
1123 1123
1124 /* currently only V4L2_MBUS_FMT_SGRBG12_1X12 is 1124 /* currently only MEDIA_BUS_FMT_SGRBG12_1X12 is
1125 * supported. shift appropriately depending on 1125 * supported. shift appropriately depending on
1126 * different MBUS fmt's added 1126 * different MBUS fmt's added
1127 */ 1127 */
1128 if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12) 1128 if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
1129 val |= ((VPFE_ISIF_NO_SHIFT & 1129 val |= ((VPFE_ISIF_NO_SHIFT &
1130 ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT); 1130 ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
1131 1131
@@ -1154,7 +1154,7 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
1154 /* Configure Gain & Offset */ 1154 /* Configure Gain & Offset */
1155 isif_config_gain_offset(isif); 1155 isif_config_gain_offset(isif);
1156 /* Configure Color pattern */ 1156 /* Configure Color pattern */
1157 if (format->code == V4L2_MBUS_FMT_SGRBG12_1X12) 1157 if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
1158 val = isif_sgrbg_pattern; 1158 val = isif_sgrbg_pattern;
1159 else 1159 else
1160 /* default set to rggb */ 1160 /* default set to rggb */
@@ -1254,8 +1254,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1254 (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT)); 1254 (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
1255 /* pack the data to 8-bit CCDCCFG */ 1255 /* pack the data to 8-bit CCDCCFG */
1256 switch (format->code) { 1256 switch (format->code) {
1257 case V4L2_MBUS_FMT_YUYV8_2X8: 1257 case MEDIA_BUS_FMT_YUYV8_2X8:
1258 case V4L2_MBUS_FMT_UYVY8_2X8: 1258 case MEDIA_BUS_FMT_UYVY8_2X8:
1259 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) { 1259 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
1260 pr_debug("Invalid pix_fmt(input mode)\n"); 1260 pr_debug("Invalid pix_fmt(input mode)\n");
1261 return -EINVAL; 1261 return -EINVAL;
@@ -1266,7 +1266,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1266 ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR; 1266 ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
1267 break; 1267 break;
1268 1268
1269 case V4L2_MBUS_FMT_YUYV10_2X10: 1269 case MEDIA_BUS_FMT_YUYV10_2X10:
1270 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) { 1270 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
1271 pr_debug("Invalid pix_fmt(input mode)\n"); 1271 pr_debug("Invalid pix_fmt(input mode)\n");
1272 return -EINVAL; 1272 return -EINVAL;
@@ -1278,7 +1278,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1278 ISIF_BW656_ENABLE; 1278 ISIF_BW656_ENABLE;
1279 break; 1279 break;
1280 1280
1281 case V4L2_MBUS_FMT_YUYV10_1X20: 1281 case MEDIA_BUS_FMT_YUYV10_1X20:
1282 if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) { 1282 if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
1283 pr_debug("Invalid pix_fmt(input mode)\n"); 1283 pr_debug("Invalid pix_fmt(input mode)\n");
1284 return -EINVAL; 1284 return -EINVAL;
@@ -1286,7 +1286,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1286 isif_write(isif->isif_cfg.base_addr, 3, REC656IF); 1286 isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
1287 break; 1287 break;
1288 1288
1289 case V4L2_MBUS_FMT_Y8_1X8: 1289 case MEDIA_BUS_FMT_Y8_1X8:
1290 ccdcfg |= ISIF_PACK_8BIT; 1290 ccdcfg |= ISIF_PACK_8BIT;
1291 ccdcfg |= ISIF_YCINSWP_YCBCR; 1291 ccdcfg |= ISIF_YCINSWP_YCBCR;
1292 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) { 1292 if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
@@ -1295,7 +1295,7 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1295 } 1295 }
1296 break; 1296 break;
1297 1297
1298 case V4L2_MBUS_FMT_YUYV8_1X16: 1298 case MEDIA_BUS_FMT_YUYV8_1X16:
1299 if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) { 1299 if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
1300 pr_debug("Invalid pix_fmt(input mode)\n"); 1300 pr_debug("Invalid pix_fmt(input mode)\n");
1301 return -EINVAL; 1301 return -EINVAL;
@@ -1313,8 +1313,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
1313 ISIF_PIX_ORDER_SHIFT; 1313 ISIF_PIX_ORDER_SHIFT;
1314 isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG); 1314 isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
1315 /* configure video window */ 1315 /* configure video window */
1316 if (format->code == V4L2_MBUS_FMT_YUYV10_1X20 || 1316 if (format->code == MEDIA_BUS_FMT_YUYV10_1X20 ||
1317 format->code == V4L2_MBUS_FMT_YUYV8_1X16) 1317 format->code == MEDIA_BUS_FMT_YUYV8_1X16)
1318 isif_setwin(isif, &params->win, params->frm_fmt, 1, mode); 1318 isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
1319 else 1319 else
1320 isif_setwin(isif, &params->win, params->frm_fmt, 2, mode); 1320 isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
@@ -1345,17 +1345,17 @@ static int isif_configure(struct v4l2_subdev *sd, int mode)
1345 format = &isif->formats[ISIF_PAD_SINK]; 1345 format = &isif->formats[ISIF_PAD_SINK];
1346 1346
1347 switch (format->code) { 1347 switch (format->code) {
1348 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 1348 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
1349 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 1349 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
1350 case V4L2_MBUS_FMT_SGRBG12_1X12: 1350 case MEDIA_BUS_FMT_SGRBG12_1X12:
1351 return isif_config_raw(sd, mode); 1351 return isif_config_raw(sd, mode);
1352 1352
1353 case V4L2_MBUS_FMT_YUYV8_2X8: 1353 case MEDIA_BUS_FMT_YUYV8_2X8:
1354 case V4L2_MBUS_FMT_UYVY8_2X8: 1354 case MEDIA_BUS_FMT_UYVY8_2X8:
1355 case V4L2_MBUS_FMT_YUYV10_2X10: 1355 case MEDIA_BUS_FMT_YUYV10_2X10:
1356 case V4L2_MBUS_FMT_Y8_1X8: 1356 case MEDIA_BUS_FMT_Y8_1X8:
1357 case V4L2_MBUS_FMT_YUYV8_1X16: 1357 case MEDIA_BUS_FMT_YUYV8_1X16:
1358 case V4L2_MBUS_FMT_YUYV10_1X20: 1358 case MEDIA_BUS_FMT_YUYV10_1X20:
1359 return isif_config_ycbcr(sd, mode); 1359 return isif_config_ycbcr(sd, mode);
1360 1360
1361 default: 1361 default:
@@ -1630,7 +1630,7 @@ isif_init_formats(struct v4l2_subdev *sd,
1630 memset(&format, 0, sizeof(format)); 1630 memset(&format, 0, sizeof(format));
1631 format.pad = ISIF_PAD_SINK; 1631 format.pad = ISIF_PAD_SINK;
1632 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1632 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1633 format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12; 1633 format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
1634 format.format.width = MAX_WIDTH; 1634 format.format.width = MAX_WIDTH;
1635 format.format.height = MAX_HEIGHT; 1635 format.format.height = MAX_HEIGHT;
1636 isif_set_format(sd, fh, &format); 1636 isif_set_format(sd, fh, &format);
@@ -1638,7 +1638,7 @@ isif_init_formats(struct v4l2_subdev *sd,
1638 memset(&format, 0, sizeof(format)); 1638 memset(&format, 0, sizeof(format));
1639 format.pad = ISIF_PAD_SOURCE; 1639 format.pad = ISIF_PAD_SOURCE;
1640 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1640 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1641 format.format.code = V4L2_MBUS_FMT_SGRBG12_1X12; 1641 format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
1642 format.format.width = MAX_WIDTH; 1642 format.format.width = MAX_WIDTH;
1643 format.format.height = MAX_HEIGHT; 1643 format.format.height = MAX_HEIGHT;
1644 isif_set_format(sd, fh, &format); 1644 isif_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 8828d6c2aab1..e0b29c8ca221 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -35,18 +35,18 @@
35#define MIN_OUT_HEIGHT 2 35#define MIN_OUT_HEIGHT 2
36 36
37static const unsigned int resizer_input_formats[] = { 37static const unsigned int resizer_input_formats[] = {
38 V4L2_MBUS_FMT_UYVY8_2X8, 38 MEDIA_BUS_FMT_UYVY8_2X8,
39 V4L2_MBUS_FMT_Y8_1X8, 39 MEDIA_BUS_FMT_Y8_1X8,
40 V4L2_MBUS_FMT_UV8_1X8, 40 MEDIA_BUS_FMT_UV8_1X8,
41 V4L2_MBUS_FMT_SGRBG12_1X12, 41 MEDIA_BUS_FMT_SGRBG12_1X12,
42}; 42};
43 43
44static const unsigned int resizer_output_formats[] = { 44static const unsigned int resizer_output_formats[] = {
45 V4L2_MBUS_FMT_UYVY8_2X8, 45 MEDIA_BUS_FMT_UYVY8_2X8,
46 V4L2_MBUS_FMT_Y8_1X8, 46 MEDIA_BUS_FMT_Y8_1X8,
47 V4L2_MBUS_FMT_UV8_1X8, 47 MEDIA_BUS_FMT_UV8_1X8,
48 V4L2_MBUS_FMT_YDYUYDYV8_1X16, 48 MEDIA_BUS_FMT_YDYUYDYV8_1X16,
49 V4L2_MBUS_FMT_SGRBG12_1X12, 49 MEDIA_BUS_FMT_SGRBG12_1X12,
50}; 50};
51 51
52/* resizer_calculate_line_length() - This function calculates the line length of 52/* resizer_calculate_line_length() - This function calculates the line length of
@@ -54,17 +54,17 @@ static const unsigned int resizer_output_formats[] = {
54 * output. 54 * output.
55 */ 55 */
56static void 56static void
57resizer_calculate_line_length(enum v4l2_mbus_pixelcode pix, int width, 57resizer_calculate_line_length(u32 pix, int width, int height,
58 int height, int *line_len, int *line_len_c) 58 int *line_len, int *line_len_c)
59{ 59{
60 *line_len = 0; 60 *line_len = 0;
61 *line_len_c = 0; 61 *line_len_c = 0;
62 62
63 if (pix == V4L2_MBUS_FMT_UYVY8_2X8 || 63 if (pix == MEDIA_BUS_FMT_UYVY8_2X8 ||
64 pix == V4L2_MBUS_FMT_SGRBG12_1X12) { 64 pix == MEDIA_BUS_FMT_SGRBG12_1X12) {
65 *line_len = width << 1; 65 *line_len = width << 1;
66 } else if (pix == V4L2_MBUS_FMT_Y8_1X8 || 66 } else if (pix == MEDIA_BUS_FMT_Y8_1X8 ||
67 pix == V4L2_MBUS_FMT_UV8_1X8) { 67 pix == MEDIA_BUS_FMT_UV8_1X8) {
68 *line_len = width; 68 *line_len = width;
69 *line_len_c = width; 69 *line_len_c = width;
70 } else { 70 } else {
@@ -85,11 +85,11 @@ resizer_validate_output_image_format(struct device *dev,
85 struct v4l2_mbus_framefmt *format, 85 struct v4l2_mbus_framefmt *format,
86 int *in_line_len, int *in_line_len_c) 86 int *in_line_len, int *in_line_len_c)
87{ 87{
88 if (format->code != V4L2_MBUS_FMT_UYVY8_2X8 && 88 if (format->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
89 format->code != V4L2_MBUS_FMT_Y8_1X8 && 89 format->code != MEDIA_BUS_FMT_Y8_1X8 &&
90 format->code != V4L2_MBUS_FMT_UV8_1X8 && 90 format->code != MEDIA_BUS_FMT_UV8_1X8 &&
91 format->code != V4L2_MBUS_FMT_YDYUYDYV8_1X16 && 91 format->code != MEDIA_BUS_FMT_YDYUYDYV8_1X16 &&
92 format->code != V4L2_MBUS_FMT_SGRBG12_1X12) { 92 format->code != MEDIA_BUS_FMT_SGRBG12_1X12) {
93 dev_err(dev, "Invalid Mbus format, %d\n", format->code); 93 dev_err(dev, "Invalid Mbus format, %d\n", format->code);
94 return -EINVAL; 94 return -EINVAL;
95 } 95 }
@@ -281,7 +281,7 @@ resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
281 param->ext_mem_param[index].c_offset = 0; 281 param->ext_mem_param[index].c_offset = 0;
282 param->ext_mem_param[index].flip_ofst_y = 0; 282 param->ext_mem_param[index].flip_ofst_y = 0;
283 param->ext_mem_param[index].flip_ofst_c = 0; 283 param->ext_mem_param[index].flip_ofst_c = 0;
284 if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) { 284 if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16) {
285 /* YUV 420 */ 285 /* YUV 420 */
286 yuv_420 = 1; 286 yuv_420 = 1;
287 bytesperpixel = 1; 287 bytesperpixel = 1;
@@ -322,7 +322,7 @@ static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
322 outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE]; 322 outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
323 323
324 output_specs.vst_y = param->user_config.vst; 324 output_specs.vst_y = param->user_config.vst;
325 if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) 325 if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
326 output_specs.vst_c = param->user_config.vst; 326 output_specs.vst_c = param->user_config.vst;
327 327
328 configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0); 328 configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
@@ -336,7 +336,7 @@ static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
336 if (param->rsz_en[RSZ_B]) 336 if (param->rsz_en[RSZ_B])
337 resizer_calculate_resize_ratios(resizer, RSZ_B); 337 resizer_calculate_resize_ratios(resizer, RSZ_B);
338 338
339 if (outformat->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) 339 if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
340 resizer_enable_422_420_conversion(param, RSZ_A, ENABLE); 340 resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
341 else 341 else
342 resizer_enable_422_420_conversion(param, RSZ_A, DISABLE); 342 resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
@@ -447,26 +447,26 @@ resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
447 param->rsz_common.source = IPIPE_DATA; 447 param->rsz_common.source = IPIPE_DATA;
448 448
449 switch (informat->code) { 449 switch (informat->code) {
450 case V4L2_MBUS_FMT_UYVY8_2X8: 450 case MEDIA_BUS_FMT_UYVY8_2X8:
451 param->rsz_common.src_img_fmt = RSZ_IMG_422; 451 param->rsz_common.src_img_fmt = RSZ_IMG_422;
452 param->rsz_common.raw_flip = 0; 452 param->rsz_common.raw_flip = 0;
453 break; 453 break;
454 454
455 case V4L2_MBUS_FMT_Y8_1X8: 455 case MEDIA_BUS_FMT_Y8_1X8:
456 param->rsz_common.src_img_fmt = RSZ_IMG_420; 456 param->rsz_common.src_img_fmt = RSZ_IMG_420;
457 /* Select y */ 457 /* Select y */
458 param->rsz_common.y_c = 0; 458 param->rsz_common.y_c = 0;
459 param->rsz_common.raw_flip = 0; 459 param->rsz_common.raw_flip = 0;
460 break; 460 break;
461 461
462 case V4L2_MBUS_FMT_UV8_1X8: 462 case MEDIA_BUS_FMT_UV8_1X8:
463 param->rsz_common.src_img_fmt = RSZ_IMG_420; 463 param->rsz_common.src_img_fmt = RSZ_IMG_420;
464 /* Select y */ 464 /* Select y */
465 param->rsz_common.y_c = 1; 465 param->rsz_common.y_c = 1;
466 param->rsz_common.raw_flip = 0; 466 param->rsz_common.raw_flip = 0;
467 break; 467 break;
468 468
469 case V4L2_MBUS_FMT_SGRBG12_1X12: 469 case MEDIA_BUS_FMT_SGRBG12_1X12:
470 param->rsz_common.raw_flip = 1; 470 param->rsz_common.raw_flip = 1;
471 break; 471 break;
472 472
@@ -519,7 +519,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
519 param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c; 519 param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
520 configure_resizer_out_params(resizer, RSZ_B, 520 configure_resizer_out_params(resizer, RSZ_B,
521 &cont_config->output2, 0, 1); 521 &cont_config->output2, 0, 1);
522 if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) 522 if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
523 resizer_enable_422_420_conversion(param, 523 resizer_enable_422_420_conversion(param,
524 RSZ_B, ENABLE); 524 RSZ_B, ENABLE);
525 else 525 else
@@ -540,15 +540,15 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
540 540
541static inline int 541static inline int
542resizer_validate_input_image_format(struct device *dev, 542resizer_validate_input_image_format(struct device *dev,
543 enum v4l2_mbus_pixelcode pix, 543 u32 pix,
544 int width, int height, int *line_len) 544 int width, int height, int *line_len)
545{ 545{
546 int val; 546 int val;
547 547
548 if (pix != V4L2_MBUS_FMT_UYVY8_2X8 && 548 if (pix != MEDIA_BUS_FMT_UYVY8_2X8 &&
549 pix != V4L2_MBUS_FMT_Y8_1X8 && 549 pix != MEDIA_BUS_FMT_Y8_1X8 &&
550 pix != V4L2_MBUS_FMT_UV8_1X8 && 550 pix != MEDIA_BUS_FMT_UV8_1X8 &&
551 pix != V4L2_MBUS_FMT_SGRBG12_1X12) { 551 pix != MEDIA_BUS_FMT_SGRBG12_1X12) {
552 dev_err(dev, 552 dev_err(dev,
553 "resizer validate output: pix format not supported, %d\n", pix); 553 "resizer validate output: pix format not supported, %d\n", pix);
554 return -EINVAL; 554 return -EINVAL;
@@ -560,7 +560,7 @@ resizer_validate_input_image_format(struct device *dev,
560 return -EINVAL; 560 return -EINVAL;
561 } 561 }
562 562
563 if (pix == V4L2_MBUS_FMT_UV8_1X8) 563 if (pix == MEDIA_BUS_FMT_UV8_1X8)
564 resizer_calculate_line_length(pix, width, 564 resizer_calculate_line_length(pix, width,
565 height, &val, line_len); 565 height, &val, line_len);
566 else 566 else
@@ -709,12 +709,12 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
709 configure_resizer_out_params(resizer, RSZ_A, 709 configure_resizer_out_params(resizer, RSZ_A,
710 &param->user_config.output1, 0, 1); 710 &param->user_config.output1, 0, 1);
711 711
712 if (outformat1->code == V4L2_MBUS_FMT_SGRBG12_1X12) 712 if (outformat1->code == MEDIA_BUS_FMT_SGRBG12_1X12)
713 param->rsz_common.raw_flip = 1; 713 param->rsz_common.raw_flip = 1;
714 else 714 else
715 param->rsz_common.raw_flip = 0; 715 param->rsz_common.raw_flip = 0;
716 716
717 if (outformat1->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) 717 if (outformat1->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
718 resizer_enable_422_420_conversion(param, 718 resizer_enable_422_420_conversion(param,
719 RSZ_A, ENABLE); 719 RSZ_A, ENABLE);
720 else 720 else
@@ -732,7 +732,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
732 param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c; 732 param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
733 configure_resizer_out_params(resizer, RSZ_B, 733 configure_resizer_out_params(resizer, RSZ_B,
734 &param->user_config.output2, 0, 1); 734 &param->user_config.output2, 0, 1);
735 if (outformat2->code == V4L2_MBUS_FMT_YDYUYDYV8_1X16) 735 if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
736 resizer_enable_422_420_conversion(param, 736 resizer_enable_422_420_conversion(param,
737 RSZ_B, ENABLE); 737 RSZ_B, ENABLE);
738 else 738 else
@@ -745,7 +745,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
745 resizer_calculate_resize_ratios(resizer, RSZ_A); 745 resizer_calculate_resize_ratios(resizer, RSZ_A);
746 resizer_calculate_sdram_offsets(resizer, RSZ_A); 746 resizer_calculate_sdram_offsets(resizer, RSZ_A);
747 /* Overriding resize ratio calculation */ 747 /* Overriding resize ratio calculation */
748 if (informat->code == V4L2_MBUS_FMT_UV8_1X8) { 748 if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
749 param->rsz_rsc_param[RSZ_A].v_dif = 749 param->rsz_rsc_param[RSZ_A].v_dif =
750 (((informat->height + 1) * 2) * 256) / 750 (((informat->height + 1) * 2) * 256) /
751 (param->rsz_rsc_param[RSZ_A].o_vsz + 1); 751 (param->rsz_rsc_param[RSZ_A].o_vsz + 1);
@@ -756,7 +756,7 @@ resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
756 resizer_calculate_resize_ratios(resizer, RSZ_B); 756 resizer_calculate_resize_ratios(resizer, RSZ_B);
757 resizer_calculate_sdram_offsets(resizer, RSZ_B); 757 resizer_calculate_sdram_offsets(resizer, RSZ_B);
758 /* Overriding resize ratio calculation */ 758 /* Overriding resize ratio calculation */
759 if (informat->code == V4L2_MBUS_FMT_UV8_1X8) { 759 if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
760 param->rsz_rsc_param[RSZ_B].v_dif = 760 param->rsz_rsc_param[RSZ_B].v_dif =
761 (((informat->height + 1) * 2) * 256) / 761 (((informat->height + 1) * 2) * 256) /
762 (param->rsz_rsc_param[RSZ_B].o_vsz + 1); 762 (param->rsz_rsc_param[RSZ_B].o_vsz + 1);
@@ -1340,7 +1340,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
1340 } 1340 }
1341 /* If not found, use UYVY as default */ 1341 /* If not found, use UYVY as default */
1342 if (i >= ARRAY_SIZE(resizer_input_formats)) 1342 if (i >= ARRAY_SIZE(resizer_input_formats))
1343 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 1343 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
1344 1344
1345 fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH, 1345 fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
1346 MAX_IN_WIDTH); 1346 MAX_IN_WIDTH);
@@ -1357,7 +1357,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
1357 } 1357 }
1358 /* If not found, use UYVY as default */ 1358 /* If not found, use UYVY as default */
1359 if (i >= ARRAY_SIZE(resizer_output_formats)) 1359 if (i >= ARRAY_SIZE(resizer_output_formats))
1360 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 1360 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
1361 1361
1362 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH, 1362 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
1363 max_out_width); 1363 max_out_width);
@@ -1375,7 +1375,7 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
1375 } 1375 }
1376 /* If not found, use UYVY as default */ 1376 /* If not found, use UYVY as default */
1377 if (i >= ARRAY_SIZE(resizer_output_formats)) 1377 if (i >= ARRAY_SIZE(resizer_output_formats))
1378 fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; 1378 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
1379 1379
1380 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH, 1380 fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
1381 max_out_width); 1381 max_out_width);
@@ -1548,7 +1548,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1548 memset(&format, 0, sizeof(format)); 1548 memset(&format, 0, sizeof(format));
1549 format.pad = RESIZER_CROP_PAD_SINK; 1549 format.pad = RESIZER_CROP_PAD_SINK;
1550 format.which = which; 1550 format.which = which;
1551 format.format.code = V4L2_MBUS_FMT_YUYV8_2X8; 1551 format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
1552 format.format.width = MAX_IN_WIDTH; 1552 format.format.width = MAX_IN_WIDTH;
1553 format.format.height = MAX_IN_HEIGHT; 1553 format.format.height = MAX_IN_HEIGHT;
1554 resizer_set_format(sd, fh, &format); 1554 resizer_set_format(sd, fh, &format);
@@ -1556,7 +1556,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1556 memset(&format, 0, sizeof(format)); 1556 memset(&format, 0, sizeof(format));
1557 format.pad = RESIZER_CROP_PAD_SOURCE; 1557 format.pad = RESIZER_CROP_PAD_SOURCE;
1558 format.which = which; 1558 format.which = which;
1559 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 1559 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
1560 format.format.width = MAX_IN_WIDTH; 1560 format.format.width = MAX_IN_WIDTH;
1561 format.format.height = MAX_IN_WIDTH; 1561 format.format.height = MAX_IN_WIDTH;
1562 resizer_set_format(sd, fh, &format); 1562 resizer_set_format(sd, fh, &format);
@@ -1564,7 +1564,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1564 memset(&format, 0, sizeof(format)); 1564 memset(&format, 0, sizeof(format));
1565 format.pad = RESIZER_CROP_PAD_SOURCE2; 1565 format.pad = RESIZER_CROP_PAD_SOURCE2;
1566 format.which = which; 1566 format.which = which;
1567 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 1567 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
1568 format.format.width = MAX_IN_WIDTH; 1568 format.format.width = MAX_IN_WIDTH;
1569 format.format.height = MAX_IN_WIDTH; 1569 format.format.height = MAX_IN_WIDTH;
1570 resizer_set_format(sd, fh, &format); 1570 resizer_set_format(sd, fh, &format);
@@ -1572,7 +1572,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1572 memset(&format, 0, sizeof(format)); 1572 memset(&format, 0, sizeof(format));
1573 format.pad = RESIZER_PAD_SINK; 1573 format.pad = RESIZER_PAD_SINK;
1574 format.which = which; 1574 format.which = which;
1575 format.format.code = V4L2_MBUS_FMT_YUYV8_2X8; 1575 format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
1576 format.format.width = MAX_IN_WIDTH; 1576 format.format.width = MAX_IN_WIDTH;
1577 format.format.height = MAX_IN_HEIGHT; 1577 format.format.height = MAX_IN_HEIGHT;
1578 resizer_set_format(sd, fh, &format); 1578 resizer_set_format(sd, fh, &format);
@@ -1580,7 +1580,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1580 memset(&format, 0, sizeof(format)); 1580 memset(&format, 0, sizeof(format));
1581 format.pad = RESIZER_PAD_SOURCE; 1581 format.pad = RESIZER_PAD_SOURCE;
1582 format.which = which; 1582 format.which = which;
1583 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 1583 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
1584 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A; 1584 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
1585 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A; 1585 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
1586 resizer_set_format(sd, fh, &format); 1586 resizer_set_format(sd, fh, &format);
@@ -1588,7 +1588,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1588 memset(&format, 0, sizeof(format)); 1588 memset(&format, 0, sizeof(format));
1589 format.pad = RESIZER_PAD_SINK; 1589 format.pad = RESIZER_PAD_SINK;
1590 format.which = which; 1590 format.which = which;
1591 format.format.code = V4L2_MBUS_FMT_YUYV8_2X8; 1591 format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
1592 format.format.width = MAX_IN_WIDTH; 1592 format.format.width = MAX_IN_WIDTH;
1593 format.format.height = MAX_IN_HEIGHT; 1593 format.format.height = MAX_IN_HEIGHT;
1594 resizer_set_format(sd, fh, &format); 1594 resizer_set_format(sd, fh, &format);
@@ -1596,7 +1596,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
1596 memset(&format, 0, sizeof(format)); 1596 memset(&format, 0, sizeof(format));
1597 format.pad = RESIZER_PAD_SOURCE; 1597 format.pad = RESIZER_PAD_SOURCE;
1598 format.which = which; 1598 format.which = which;
1599 format.format.code = V4L2_MBUS_FMT_UYVY8_2X8; 1599 format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
1600 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B; 1600 format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
1601 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B; 1601 format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
1602 resizer_set_format(sd, fh, &format); 1602 resizer_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index a862b28092e4..bf45d2cc5965 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -99,47 +99,47 @@ void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
99 struct v4l2_pix_format *pix) 99 struct v4l2_pix_format *pix)
100{ 100{
101 switch (mbus->code) { 101 switch (mbus->code) {
102 case V4L2_MBUS_FMT_UYVY8_2X8: 102 case MEDIA_BUS_FMT_UYVY8_2X8:
103 pix->pixelformat = V4L2_PIX_FMT_UYVY; 103 pix->pixelformat = V4L2_PIX_FMT_UYVY;
104 pix->bytesperline = pix->width * 2; 104 pix->bytesperline = pix->width * 2;
105 break; 105 break;
106 106
107 case V4L2_MBUS_FMT_YUYV8_2X8: 107 case MEDIA_BUS_FMT_YUYV8_2X8:
108 pix->pixelformat = V4L2_PIX_FMT_YUYV; 108 pix->pixelformat = V4L2_PIX_FMT_YUYV;
109 pix->bytesperline = pix->width * 2; 109 pix->bytesperline = pix->width * 2;
110 break; 110 break;
111 111
112 case V4L2_MBUS_FMT_YUYV10_1X20: 112 case MEDIA_BUS_FMT_YUYV10_1X20:
113 pix->pixelformat = V4L2_PIX_FMT_UYVY; 113 pix->pixelformat = V4L2_PIX_FMT_UYVY;
114 pix->bytesperline = pix->width * 2; 114 pix->bytesperline = pix->width * 2;
115 break; 115 break;
116 116
117 case V4L2_MBUS_FMT_SGRBG12_1X12: 117 case MEDIA_BUS_FMT_SGRBG12_1X12:
118 pix->pixelformat = V4L2_PIX_FMT_SBGGR16; 118 pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
119 pix->bytesperline = pix->width * 2; 119 pix->bytesperline = pix->width * 2;
120 break; 120 break;
121 121
122 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 122 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
123 pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8; 123 pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
124 pix->bytesperline = pix->width; 124 pix->bytesperline = pix->width;
125 break; 125 break;
126 126
127 case V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8: 127 case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
128 pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8; 128 pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
129 pix->bytesperline = pix->width; 129 pix->bytesperline = pix->width;
130 break; 130 break;
131 131
132 case V4L2_MBUS_FMT_YDYUYDYV8_1X16: 132 case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
133 pix->pixelformat = V4L2_PIX_FMT_NV12; 133 pix->pixelformat = V4L2_PIX_FMT_NV12;
134 pix->bytesperline = pix->width; 134 pix->bytesperline = pix->width;
135 break; 135 break;
136 136
137 case V4L2_MBUS_FMT_Y8_1X8: 137 case MEDIA_BUS_FMT_Y8_1X8:
138 pix->pixelformat = V4L2_PIX_FMT_GREY; 138 pix->pixelformat = V4L2_PIX_FMT_GREY;
139 pix->bytesperline = pix->width; 139 pix->bytesperline = pix->width;
140 break; 140 break;
141 141
142 case V4L2_MBUS_FMT_UV8_1X8: 142 case MEDIA_BUS_FMT_UV8_1X8:
143 pix->pixelformat = V4L2_PIX_FMT_UV8; 143 pix->pixelformat = V4L2_PIX_FMT_UV8;
144 pix->bytesperline = pix->width; 144 pix->bytesperline = pix->width;
145 break; 145 break;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 6f9171c39bdc..06d48d5eb0a0 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -600,11 +600,11 @@ static int vpfe_querycap(struct file *file, void *priv,
600 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n"); 600 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
601 601
602 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 602 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
603 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 603 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
604 else 604 else
605 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 605 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
606 cap->device_caps = cap->capabilities; 606 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
607 cap->version = VPFE_CAPTURE_VERSION_CODE; 607 V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
608 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver)); 608 strlcpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
609 strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info)); 609 strlcpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
610 strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card)); 610 strlcpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 40580228a6c7..293ffda503e0 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -512,10 +512,9 @@ dt3155_ioc_querycap(struct file *filp, void *p, struct v4l2_capability *cap)
512 strcpy(cap->driver, DT3155_NAME); 512 strcpy(cap->driver, DT3155_NAME);
513 strcpy(cap->card, DT3155_NAME " frame grabber"); 513 strcpy(cap->card, DT3155_NAME " frame grabber");
514 sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev)); 514 sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev));
515 cap->version = 515 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
516 KERNEL_VERSION(DT3155_VER_MAJ, DT3155_VER_MIN, DT3155_VER_EXT);
517 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
518 DT3155_CAPTURE_METHOD; 516 DT3155_CAPTURE_METHOD;
517 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
519 return 0; 518 return 0;
520} 519}
521 520
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index e60a59fc252b..6879c4651b46 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -18,12 +18,6 @@ config LIRC_BT829
18 help 18 help
19 Driver for the IR interface on BT829-based hardware 19 Driver for the IR interface on BT829-based hardware
20 20
21config LIRC_IGORPLUGUSB
22 tristate "Igor Cesko's USB IR Receiver"
23 depends on LIRC && USB
24 help
25 Driver for Igor Cesko's USB IR Receiver
26
27config LIRC_IMON 21config LIRC_IMON
28 tristate "Legacy SoundGraph iMON Receiver and Display" 22 tristate "Legacy SoundGraph iMON Receiver and Display"
29 depends on LIRC && USB 23 depends on LIRC && USB
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index b90fcabddab6..5430adf0475d 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -4,7 +4,6 @@
4# Each configuration option enables a list of files. 4# Each configuration option enables a list of files.
5 5
6obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o 6obj-$(CONFIG_LIRC_BT829) += lirc_bt829.o
7obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o
8obj-$(CONFIG_LIRC_IMON) += lirc_imon.o 7obj-$(CONFIG_LIRC_IMON) += lirc_imon.o
9obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o 8obj-$(CONFIG_LIRC_PARALLEL) += lirc_parallel.o
10obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o 9obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
deleted file mode 100644
index 431d1e86ebf9..000000000000
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*
2 * lirc_igorplugusb - USB remote support for LIRC
3 *
4 * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
5 * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
6 *
7 * The device can only record bursts of up to 36 pulses/spaces.
8 * Works fine with RC5. Longer commands lead to device buffer overrun.
9 * (Maybe a better firmware or a microcontroller with more ram can help?)
10 *
11 * Version 0.1 [beta status]
12 *
13 * Copyright (C) 2004 Jan M. Hochstein
14 * <hochstein@algo.informatik.tu-darmstadt.de>
15 *
16 * This driver was derived from:
17 * Paul Miller <pmiller9@users.sourceforge.net>
18 * "lirc_atiusb" module
19 * Vladimir Dergachev <volodya@minspring.com>'s 2002
20 * "USB ATI Remote support" (input device)
21 * Adrian Dewhurst <sailor-lk@sailorfrag.net>'s 2002
22 * "USB StreamZap remote driver" (LIRC)
23 * Artur Lipowski <alipowski@kki.net.pl>'s 2002
24 * "lirc_dev" and "lirc_gpio" LIRC modules
25 */
26
27/*
28 * This program is free software; you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation; either version 2 of the License, or
31 * (at your option) any later version.
32 *
33 * This program is distributed in the hope that it will be useful,
34 * but WITHOUT ANY WARRANTY; without even the implied warranty of
35 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 * GNU General Public License for more details.
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 */
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/kmod.h>
46#include <linux/sched.h>
47#include <linux/errno.h>
48#include <linux/fs.h>
49#include <linux/usb.h>
50#include <linux/time.h>
51
52#include <media/lirc.h>
53#include <media/lirc_dev.h>
54
55
56/* module identification */
57#define DRIVER_VERSION "0.2"
58#define DRIVER_AUTHOR \
59 "Jan M. Hochstein <hochstein@algo.informatik.tu-darmstadt.de>"
60#define DRIVER_DESC "Igorplug USB remote driver for LIRC"
61#define DRIVER_NAME "lirc_igorplugusb"
62
63/* One mode2 pulse/space has 4 bytes. */
64#define CODE_LENGTH sizeof(int)
65
66/* Igor's firmware cannot record bursts longer than 36. */
67#define DEVICE_BUFLEN 36
68
69/*
70 * Header at the beginning of the device's buffer:
71 * unsigned char data_length
72 * unsigned char data_start (!=0 means ring-buffer overrun)
73 * unsigned char counter (incremented by each burst)
74 */
75#define DEVICE_HEADERLEN 3
76
77/* This is for the gap */
78#define ADDITIONAL_LIRC_BYTES 2
79
80/* times to poll per second */
81#define SAMPLE_RATE 100
82static int sample_rate = SAMPLE_RATE;
83
84
85/**** Igor's USB Request Codes */
86
87#define SET_INFRABUFFER_EMPTY 1
88/**
89 * Params: none
90 * Answer: empty
91 */
92
93#define GET_INFRACODE 2
94/**
95 * Params:
96 * wValue: offset to begin reading infra buffer
97 *
98 * Answer: infra data
99 */
100
101#define SET_DATAPORT_DIRECTION 3
102/**
103 * Params:
104 * wValue: (byte) 1 bit for each data port pin (0=in, 1=out)
105 *
106 * Answer: empty
107 */
108
109#define GET_DATAPORT_DIRECTION 4
110/**
111 * Params: none
112 *
113 * Answer: (byte) 1 bit for each data port pin (0=in, 1=out)
114 */
115
116#define SET_OUT_DATAPORT 5
117/**
118 * Params:
119 * wValue: byte to write to output data port
120 *
121 * Answer: empty
122 */
123
124#define GET_OUT_DATAPORT 6
125/**
126 * Params: none
127 *
128 * Answer: least significant 3 bits read from output data port
129 */
130
131#define GET_IN_DATAPORT 7
132/**
133 * Params: none
134 *
135 * Answer: least significant 3 bits read from input data port
136 */
137
138#define READ_EEPROM 8
139/**
140 * Params:
141 * wValue: offset to begin reading EEPROM
142 *
143 * Answer: EEPROM bytes
144 */
145
146#define WRITE_EEPROM 9
147/**
148 * Params:
149 * wValue: offset to EEPROM byte
150 * wIndex: byte to write
151 *
152 * Answer: empty
153 */
154
155#define SEND_RS232 10
156/**
157 * Params:
158 * wValue: byte to send
159 *
160 * Answer: empty
161 */
162
163#define RECV_RS232 11
164/**
165 * Params: none
166 *
167 * Answer: byte received
168 */
169
170#define SET_RS232_BAUD 12
171/**
172 * Params:
173 * wValue: byte to write to UART bit rate register (UBRR)
174 *
175 * Answer: empty
176 */
177
178#define GET_RS232_BAUD 13
179/**
180 * Params: none
181 *
182 * Answer: byte read from UART bit rate register (UBRR)
183 */
184
185
186/* data structure for each usb remote */
187struct igorplug {
188
189 /* usb */
190 struct usb_device *usbdev;
191 int devnum;
192
193 unsigned char *buf_in;
194 unsigned int len_in;
195 int in_space;
196 struct timeval last_time;
197
198 dma_addr_t dma_in;
199
200 /* lirc */
201 struct lirc_driver *d;
202
203 /* handle sending (init strings) */
204 int send_flags;
205};
206
207static int unregister_from_lirc(struct igorplug *ir)
208{
209 struct lirc_driver *d;
210 int devnum;
211
212 devnum = ir->devnum;
213 d = ir->d;
214
215 if (!d) {
216 dev_err(&ir->usbdev->dev,
217 "%s: called with NULL lirc driver struct!\n", __func__);
218 return -EINVAL;
219 }
220
221 dev_dbg(&ir->usbdev->dev, "calling lirc_unregister_driver\n");
222 lirc_unregister_driver(d->minor);
223
224 return devnum;
225}
226
227static int set_use_inc(void *data)
228{
229 struct igorplug *ir = data;
230
231 if (!ir) {
232 printk(DRIVER_NAME "[?]: set_use_inc called with no context\n");
233 return -EIO;
234 }
235
236 dev_dbg(&ir->usbdev->dev, "set use inc\n");
237
238 if (!ir->usbdev)
239 return -ENODEV;
240
241 return 0;
242}
243
244static void set_use_dec(void *data)
245{
246 struct igorplug *ir = data;
247
248 if (!ir) {
249 printk(DRIVER_NAME "[?]: set_use_dec called with no context\n");
250 return;
251 }
252
253 dev_dbg(&ir->usbdev->dev, "set use dec\n");
254}
255
256static void send_fragment(struct igorplug *ir, struct lirc_buffer *buf,
257 int i, int max)
258{
259 int code;
260
261 /* MODE2: pulse/space (PULSE_BIT) in 1us units */
262 while (i < max) {
263 /* 1 Igor-tick = 85.333333 us */
264 code = (unsigned int)ir->buf_in[i] * 85 +
265 (unsigned int)ir->buf_in[i] / 3;
266 ir->last_time.tv_usec += code;
267 if (ir->in_space)
268 code |= PULSE_BIT;
269 lirc_buffer_write(buf, (unsigned char *)&code);
270 /* 1 chunk = CODE_LENGTH bytes */
271 ir->in_space ^= 1;
272 ++i;
273 }
274}
275
276/**
277 * Called in user context.
278 * return 0 if data was added to the buffer and
279 * -ENODATA if none was available. This should add some number of bits
280 * evenly divisible by code_length to the buffer
281 */
282static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
283{
284 int ret;
285 struct igorplug *ir = (struct igorplug *)data;
286
287 if (!ir || !ir->usbdev) /* Has the device been removed? */
288 return -ENODEV;
289
290 memset(ir->buf_in, 0, ir->len_in);
291
292 ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
293 GET_INFRACODE, USB_TYPE_VENDOR | USB_DIR_IN,
294 0/* offset */, /*unused*/0,
295 ir->buf_in, ir->len_in,
296 /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
297 if (ret > 0) {
298 int code, timediff;
299 struct timeval now;
300
301 /* ACK packet has 1 byte --> ignore */
302 if (ret < DEVICE_HEADERLEN)
303 return -ENODATA;
304
305 dev_dbg(&ir->usbdev->dev, "Got %d bytes. Header: %*ph\n",
306 ret, 3, ir->buf_in);
307
308 do_gettimeofday(&now);
309 timediff = now.tv_sec - ir->last_time.tv_sec;
310 if (timediff + 1 > PULSE_MASK / 1000000)
311 timediff = PULSE_MASK;
312 else {
313 timediff *= 1000000;
314 timediff += now.tv_usec - ir->last_time.tv_usec;
315 }
316 ir->last_time.tv_sec = now.tv_sec;
317 ir->last_time.tv_usec = now.tv_usec;
318
319 /* create leading gap */
320 code = timediff;
321 lirc_buffer_write(buf, (unsigned char *)&code);
322 ir->in_space = 1; /* next comes a pulse */
323
324 if (ir->buf_in[2] == 0)
325 send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
326 else {
327 dev_warn(&ir->usbdev->dev,
328 "[%d]: Device buffer overrun.\n", ir->devnum);
329 /* HHHNNNNNNNNNNNOOOOOOOO H = header
330 <---[2]---> N = newer
331 <---------ret--------> O = older */
332 ir->buf_in[2] %= ret - DEVICE_HEADERLEN; /* sanitize */
333 /* keep even-ness to not desync pulse/pause */
334 send_fragment(ir, buf, DEVICE_HEADERLEN +
335 ir->buf_in[2] - (ir->buf_in[2] & 1), ret);
336 send_fragment(ir, buf, DEVICE_HEADERLEN,
337 DEVICE_HEADERLEN + ir->buf_in[2]);
338 }
339
340 ret = usb_control_msg(
341 ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
342 SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
343 /*unused*/0, /*unused*/0,
344 /*dummy*/ir->buf_in, /*dummy*/ir->len_in,
345 /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
346 if (ret < 0)
347 printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
348 ir->devnum, ret);
349 return 0;
350 } else if (ret < 0)
351 printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n",
352 ir->devnum, ret);
353
354 return -ENODATA;
355}
356
357static int igorplugusb_remote_probe(struct usb_interface *intf,
358 const struct usb_device_id *id)
359{
360 struct usb_device *dev;
361 struct usb_host_interface *idesc = NULL;
362 struct usb_endpoint_descriptor *ep;
363 struct igorplug *ir = NULL;
364 struct lirc_driver *driver = NULL;
365 int devnum, pipe, maxp;
366 char buf[63], name[128] = "";
367 int ret;
368
369 dev_dbg(&intf->dev, "%s: usb probe called.\n", __func__);
370
371 dev = interface_to_usbdev(intf);
372
373 idesc = intf->cur_altsetting;
374
375 if (idesc->desc.bNumEndpoints != 1)
376 return -ENODEV;
377
378 ep = &idesc->endpoint->desc;
379 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
380 != USB_DIR_IN)
381 || (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
382 != USB_ENDPOINT_XFER_CONTROL)
383 return -ENODEV;
384
385 pipe = usb_rcvctrlpipe(dev, ep->bEndpointAddress);
386 devnum = dev->devnum;
387 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
388
389 dev_dbg(&intf->dev, "%s: bytes_in_key=%zu maxp=%d\n",
390 __func__, CODE_LENGTH, maxp);
391
392 ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
393 if (!ir)
394 return -ENOMEM;
395
396 driver = devm_kzalloc(&intf->dev, sizeof(*driver), GFP_KERNEL);
397 if (!driver)
398 return -ENOMEM;
399
400 ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
401 GFP_ATOMIC, &ir->dma_in);
402 if (!ir->buf_in)
403 return -ENOMEM;
404
405 strcpy(driver->name, DRIVER_NAME " ");
406 driver->minor = -1;
407 driver->code_length = CODE_LENGTH * 8; /* in bits */
408 driver->features = LIRC_CAN_REC_MODE2;
409 driver->data = ir;
410 driver->chunk_size = CODE_LENGTH;
411 driver->buffer_size = DEVICE_BUFLEN + ADDITIONAL_LIRC_BYTES;
412 driver->set_use_inc = &set_use_inc;
413 driver->set_use_dec = &set_use_dec;
414 driver->sample_rate = sample_rate; /* per second */
415 driver->add_to_buf = &igorplugusb_remote_poll;
416 driver->dev = &intf->dev;
417 driver->owner = THIS_MODULE;
418
419 ret = lirc_register_driver(driver);
420 if (ret < 0) {
421 usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
422 ir->buf_in, ir->dma_in);
423 return ret;
424 }
425
426 driver->minor = ret;
427 ir->d = driver;
428 ir->devnum = devnum;
429 ir->usbdev = dev;
430 ir->len_in = DEVICE_BUFLEN + DEVICE_HEADERLEN;
431 ir->in_space = 1; /* First mode2 event is a space. */
432 do_gettimeofday(&ir->last_time);
433
434 if (dev->descriptor.iManufacturer
435 && usb_string(dev, dev->descriptor.iManufacturer,
436 buf, sizeof(buf)) > 0)
437 strlcpy(name, buf, sizeof(name));
438 if (dev->descriptor.iProduct
439 && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0)
440 snprintf(name + strlen(name), sizeof(name) - strlen(name),
441 " %s", buf);
442 printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name,
443 dev->bus->busnum, devnum);
444
445 /* clear device buffer */
446 ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
447 SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
448 /*unused*/0, /*unused*/0,
449 /*dummy*/ir->buf_in, /*dummy*/ir->len_in,
450 /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
451 if (ret < 0)
452 printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
453 devnum, ret);
454
455 usb_set_intfdata(intf, ir);
456 return 0;
457}
458
459static void igorplugusb_remote_disconnect(struct usb_interface *intf)
460{
461 struct usb_device *usbdev = interface_to_usbdev(intf);
462 struct igorplug *ir = usb_get_intfdata(intf);
463 struct device *dev = &intf->dev;
464 int devnum;
465
466 usb_set_intfdata(intf, NULL);
467
468 if (!ir || !ir->d)
469 return;
470
471 ir->usbdev = NULL;
472
473 usb_free_coherent(usbdev, ir->len_in, ir->buf_in, ir->dma_in);
474
475 devnum = unregister_from_lirc(ir);
476
477 dev_info(dev, DRIVER_NAME "[%d]: %s done\n", devnum, __func__);
478}
479
480static struct usb_device_id igorplugusb_remote_id_table[] = {
481 /* Igor Plug USB (Atmel's Manufact. ID) */
482 { USB_DEVICE(0x03eb, 0x0002) },
483 /* Fit PC2 Infrared Adapter */
484 { USB_DEVICE(0x03eb, 0x21fe) },
485
486 /* Terminating entry */
487 { }
488};
489
490static struct usb_driver igorplugusb_remote_driver = {
491 .name = DRIVER_NAME,
492 .probe = igorplugusb_remote_probe,
493 .disconnect = igorplugusb_remote_disconnect,
494 .id_table = igorplugusb_remote_id_table
495};
496
497module_usb_driver(igorplugusb_remote_driver);
498
499#include <linux/vermagic.h>
500MODULE_INFO(vermagic, VERMAGIC_STRING);
501
502MODULE_DESCRIPTION(DRIVER_DESC);
503MODULE_AUTHOR(DRIVER_AUTHOR);
504MODULE_LICENSE("GPL");
505MODULE_DEVICE_TABLE(usb, igorplugusb_remote_id_table);
506
507module_param(sample_rate, int, S_IRUGO | S_IWUSR);
508MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)");
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 7aca44f28c5a..232edd5b1742 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -606,7 +606,6 @@ static void imon_incoming_packet(struct imon_context *context,
606 struct device *dev = context->driver->dev; 606 struct device *dev = context->driver->dev;
607 int octet, bit; 607 int octet, bit;
608 unsigned char mask; 608 unsigned char mask;
609 int i;
610 609
611 /* 610 /*
612 * just bail out if no listening IR client 611 * just bail out if no listening IR client
@@ -620,13 +619,8 @@ static void imon_incoming_packet(struct imon_context *context,
620 return; 619 return;
621 } 620 }
622 621
623 if (debug) { 622 if (debug)
624 dev_info(dev, "raw packet: "); 623 dev_info(dev, "raw packet: %*ph\n", len, buf);
625 for (i = 0; i < len; ++i)
626 printk("%02x ", buf[i]);
627 printk("\n");
628 }
629
630 /* 624 /*
631 * Translate received data to pulse and space lengths. 625 * Translate received data to pulse and space lengths.
632 * Received data is active low, i.e. pulses are 0 and 626 * Received data is active low, i.e. pulses are 0 and
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index c20ef56202bf..2f0463eb9887 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -573,7 +573,6 @@ static void incoming_packet(struct sasem_context *context,
573 unsigned char *buf = urb->transfer_buffer; 573 unsigned char *buf = urb->transfer_buffer;
574 long ms; 574 long ms;
575 struct timeval tv; 575 struct timeval tv;
576 int i;
577 576
578 if (len != 8) { 577 if (len != 8) {
579 dev_warn(&context->dev->dev, 578 dev_warn(&context->dev->dev,
@@ -582,13 +581,8 @@ static void incoming_packet(struct sasem_context *context,
582 return; 581 return;
583 } 582 }
584 583
585 if (debug) { 584 if (debug)
586 printk(KERN_INFO "Incoming data: "); 585 dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf);
587 for (i = 0; i < 8; ++i)
588 printk(KERN_CONT "%02x ", buf[i]);
589 printk(KERN_CONT "\n");
590 }
591
592 /* 586 /*
593 * Lirc could deal with the repeat code, but we really need to block it 587 * Lirc could deal with the repeat code, but we really need to block it
594 * if it arrives too late. Otherwise we could repeat the wrong code. 588 * if it arrives too late. Otherwise we could repeat the wrong code.
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 567feba0011c..1e15d2cab727 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -199,7 +199,7 @@ static void release_ir_device(struct kref *ref)
199 lirc_unregister_driver(ir->l.minor); 199 lirc_unregister_driver(ir->l.minor);
200 ir->l.minor = MAX_IRCTL_DEVICES; 200 ir->l.minor = MAX_IRCTL_DEVICES;
201 } 201 }
202 if (ir->rbuf.fifo_initialized) 202 if (kfifo_initialized(&ir->rbuf.fifo))
203 lirc_buffer_free(&ir->rbuf); 203 lirc_buffer_free(&ir->rbuf);
204 list_del(&ir->list); 204 list_del(&ir->list);
205 kfree(ir); 205 kfree(ir);
@@ -730,11 +730,9 @@ static int send_boot_data(struct IR_tx *tx)
730static void fw_unload_locked(void) 730static void fw_unload_locked(void)
731{ 731{
732 if (tx_data) { 732 if (tx_data) {
733 if (tx_data->code_sets) 733 vfree(tx_data->code_sets);
734 vfree(tx_data->code_sets);
735 734
736 if (tx_data->datap) 735 vfree(tx_data->datap);
737 vfree(tx_data->datap);
738 736
739 vfree(tx_data); 737 vfree(tx_data);
740 tx_data = NULL; 738 tx_data = NULL;
diff --git a/drivers/staging/media/mn88472/Kconfig b/drivers/staging/media/mn88472/Kconfig
new file mode 100644
index 000000000000..a85c90a60bce
--- /dev/null
+++ b/drivers/staging/media/mn88472/Kconfig
@@ -0,0 +1,7 @@
1config DVB_MN88472
2 tristate "Panasonic MN88472"
3 depends on DVB_CORE && I2C
4 select REGMAP_I2C
5 default m if !MEDIA_SUBDRV_AUTOSELECT
6 help
7 Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88472/Makefile b/drivers/staging/media/mn88472/Makefile
new file mode 100644
index 000000000000..5987b7e6d82a
--- /dev/null
+++ b/drivers/staging/media/mn88472/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_DVB_MN88472) += mn88472.o
2
3ccflags-y += -Idrivers/media/dvb-core/
4ccflags-y += -Idrivers/media/dvb-frontends/
5ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88472/TODO b/drivers/staging/media/mn88472/TODO
new file mode 100644
index 000000000000..b90a14be3beb
--- /dev/null
+++ b/drivers/staging/media/mn88472/TODO
@@ -0,0 +1,21 @@
1Driver general quality is not good enough for mainline. Also, other
2device drivers (USB-bridge, tuner) needed for Astrometa receiver in
3question could need some changes. However, if that driver is mainlined
4due to some other device than Astrometa, unrelated TODOs could be
5skipped. In that case rtl28xxu driver needs module parameter to prevent
6driver loading.
7
8Required TODOs:
9* missing lock flags
10* I2C errors
11* tuner sensitivity
12
13*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
14checkpatch.pl tests. I don't want waste my time to review this kind of
15trivial stuff. *Do not* add missing register I/O error checks. Those are
16missing for the reason it is much easier to compare I2C data sniffs when
17there is less lines. Those error checks are about the last thing to be added.
18
19Patches should be submitted to:
20linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
21
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
new file mode 100644
index 000000000000..52de8f85d36c
--- /dev/null
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -0,0 +1,523 @@
1/*
2 * Panasonic MN88472 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "mn88472_priv.h"
18
19static int mn88472_get_tune_settings(struct dvb_frontend *fe,
20 struct dvb_frontend_tune_settings *s)
21{
22 s->min_delay_ms = 400;
23 return 0;
24}
25
26static int mn88472_set_frontend(struct dvb_frontend *fe)
27{
28 struct i2c_client *client = fe->demodulator_priv;
29 struct mn88472_dev *dev = i2c_get_clientdata(client);
30 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
31 int ret, i;
32 u32 if_frequency = 0;
33 u8 delivery_system_val, if_val[3], bw_val[7], bw_val2;
34
35 dev_dbg(&client->dev,
36 "delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d\n",
37 c->delivery_system, c->modulation,
38 c->frequency, c->symbol_rate, c->inversion);
39
40 if (!dev->warm) {
41 ret = -EAGAIN;
42 goto err;
43 }
44
45 switch (c->delivery_system) {
46 case SYS_DVBT:
47 delivery_system_val = 0x02;
48 break;
49 case SYS_DVBT2:
50 delivery_system_val = 0x03;
51 break;
52 case SYS_DVBC_ANNEX_A:
53 delivery_system_val = 0x04;
54 break;
55 default:
56 ret = -EINVAL;
57 goto err;
58 }
59
60 switch (c->delivery_system) {
61 case SYS_DVBT:
62 case SYS_DVBT2:
63 if (c->bandwidth_hz <= 6000000) {
64 /* IF 3570000 Hz, BW 6000000 Hz */
65 memcpy(if_val, "\x2c\x94\xdb", 3);
66 memcpy(bw_val, "\xbf\x55\x55\x15\x6b\x15\x6b", 7);
67 bw_val2 = 0x02;
68 } else if (c->bandwidth_hz <= 7000000) {
69 /* IF 4570000 Hz, BW 7000000 Hz */
70 memcpy(if_val, "\x39\x11\xbc", 3);
71 memcpy(bw_val, "\xa4\x00\x00\x0f\x2c\x0f\x2c", 7);
72 bw_val2 = 0x01;
73 } else if (c->bandwidth_hz <= 8000000) {
74 /* IF 4570000 Hz, BW 8000000 Hz */
75 memcpy(if_val, "\x39\x11\xbc", 3);
76 memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
77 bw_val2 = 0x00;
78 } else {
79 ret = -EINVAL;
80 goto err;
81 }
82 break;
83 case SYS_DVBC_ANNEX_A:
84 /* IF 5070000 Hz, BW 8000000 Hz */
85 memcpy(if_val, "\x3f\x50\x2c", 3);
86 memcpy(bw_val, "\x8f\x80\x00\x08\xee\x08\xee", 7);
87 bw_val2 = 0x00;
88 break;
89 default:
90 ret = -EINVAL;
91 goto err;
92 }
93
94 /* program tuner */
95 if (fe->ops.tuner_ops.set_params) {
96 ret = fe->ops.tuner_ops.set_params(fe);
97 if (ret)
98 goto err;
99 }
100
101 if (fe->ops.tuner_ops.get_if_frequency) {
102 ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
103 if (ret)
104 goto err;
105
106 dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
107 }
108
109 switch (if_frequency) {
110 case 3570000:
111 case 4570000:
112 case 5070000:
113 break;
114 default:
115 dev_err(&client->dev, "IF frequency %d not supported\n",
116 if_frequency);
117 ret = -EINVAL;
118 goto err;
119 }
120
121 ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
122 ret = regmap_write(dev->regmap[2], 0xef, 0x13);
123 ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
124 if (ret)
125 goto err;
126
127 ret = regmap_write(dev->regmap[2], 0x00, 0x66);
128 if (ret)
129 goto err;
130 ret = regmap_write(dev->regmap[2], 0x01, 0x00);
131 if (ret)
132 goto err;
133 ret = regmap_write(dev->regmap[2], 0x02, 0x01);
134 if (ret)
135 goto err;
136 ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
137 if (ret)
138 goto err;
139 ret = regmap_write(dev->regmap[2], 0x04, bw_val2);
140 if (ret)
141 goto err;
142
143 for (i = 0; i < sizeof(if_val); i++) {
144 ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
145 if (ret)
146 goto err;
147 }
148
149 for (i = 0; i < sizeof(bw_val); i++) {
150 ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
151 if (ret)
152 goto err;
153 }
154
155 switch (c->delivery_system) {
156 case SYS_DVBT:
157 ret = regmap_write(dev->regmap[0], 0x07, 0x26);
158 ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
159 ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
160 ret = regmap_write(dev->regmap[0], 0xcd, 0x1f);
161 ret = regmap_write(dev->regmap[0], 0xd4, 0x0a);
162 ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
163 ret = regmap_write(dev->regmap[0], 0x00, 0xba);
164 ret = regmap_write(dev->regmap[0], 0x01, 0x13);
165 if (ret)
166 goto err;
167 break;
168 case SYS_DVBT2:
169 ret = regmap_write(dev->regmap[2], 0x2b, 0x13);
170 ret = regmap_write(dev->regmap[2], 0x4f, 0x05);
171 ret = regmap_write(dev->regmap[1], 0xf6, 0x05);
172 ret = regmap_write(dev->regmap[0], 0xb0, 0x0a);
173 ret = regmap_write(dev->regmap[0], 0xb4, 0xf6);
174 ret = regmap_write(dev->regmap[0], 0xcd, 0x01);
175 ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
176 ret = regmap_write(dev->regmap[0], 0xd6, 0x46);
177 ret = regmap_write(dev->regmap[2], 0x30, 0x80);
178 ret = regmap_write(dev->regmap[2], 0x32, 0x00);
179 if (ret)
180 goto err;
181 break;
182 case SYS_DVBC_ANNEX_A:
183 ret = regmap_write(dev->regmap[0], 0xb0, 0x0b);
184 ret = regmap_write(dev->regmap[0], 0xb4, 0x00);
185 ret = regmap_write(dev->regmap[0], 0xcd, 0x17);
186 ret = regmap_write(dev->regmap[0], 0xd4, 0x09);
187 ret = regmap_write(dev->regmap[0], 0xd6, 0x48);
188 ret = regmap_write(dev->regmap[1], 0x00, 0xb0);
189 if (ret)
190 goto err;
191 break;
192 default:
193 ret = -EINVAL;
194 goto err;
195 }
196
197 ret = regmap_write(dev->regmap[0], 0x46, 0x00);
198 ret = regmap_write(dev->regmap[0], 0xae, 0x00);
199 ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
200 ret = regmap_write(dev->regmap[0], 0xd9, 0xe3);
201 ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
202 if (ret)
203 goto err;
204
205 dev->delivery_system = c->delivery_system;
206
207 return 0;
208err:
209 dev_dbg(&client->dev, "failed=%d\n", ret);
210 return ret;
211}
212
213static int mn88472_read_status(struct dvb_frontend *fe, fe_status_t *status)
214{
215 struct i2c_client *client = fe->demodulator_priv;
216 struct mn88472_dev *dev = i2c_get_clientdata(client);
217 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
218 int ret;
219 unsigned int utmp;
220
221 *status = 0;
222
223 if (!dev->warm) {
224 ret = -EAGAIN;
225 goto err;
226 }
227
228 switch (c->delivery_system) {
229 case SYS_DVBT:
230 case SYS_DVBT2:
231 /* FIXME: implement me */
232 utmp = 0x08; /* DVB-C lock value */
233 break;
234 case SYS_DVBC_ANNEX_A:
235 ret = regmap_read(dev->regmap[1], 0x84, &utmp);
236 if (ret)
237 goto err;
238 break;
239 default:
240 ret = -EINVAL;
241 goto err;
242 }
243
244 if (utmp == 0x08)
245 *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
246 FE_HAS_SYNC | FE_HAS_LOCK;
247
248 return 0;
249err:
250 dev_dbg(&client->dev, "failed=%d\n", ret);
251 return ret;
252}
253
254static int mn88472_init(struct dvb_frontend *fe)
255{
256 struct i2c_client *client = fe->demodulator_priv;
257 struct mn88472_dev *dev = i2c_get_clientdata(client);
258 int ret, len, remaining;
259 const struct firmware *fw = NULL;
260 u8 *fw_file = MN88472_FIRMWARE;
261
262 dev_dbg(&client->dev, "\n");
263
264 /* set cold state by default */
265 dev->warm = false;
266
267 /* power on */
268 ret = regmap_write(dev->regmap[2], 0x05, 0x00);
269 if (ret)
270 goto err;
271
272 ret = regmap_bulk_write(dev->regmap[2], 0x0b, "\x00\x00", 2);
273 if (ret)
274 goto err;
275
276 /* request the firmware, this will block and timeout */
277 ret = request_firmware(&fw, fw_file, &client->dev);
278 if (ret) {
279 dev_err(&client->dev, "firmare file '%s' not found\n",
280 fw_file);
281 goto err;
282 }
283
284 dev_info(&client->dev, "downloading firmware from file '%s'\n",
285 fw_file);
286
287 ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
288 if (ret)
289 goto err;
290
291 for (remaining = fw->size; remaining > 0;
292 remaining -= (dev->i2c_wr_max - 1)) {
293 len = remaining;
294 if (len > (dev->i2c_wr_max - 1))
295 len = (dev->i2c_wr_max - 1);
296
297 ret = regmap_bulk_write(dev->regmap[0], 0xf6,
298 &fw->data[fw->size - remaining], len);
299 if (ret) {
300 dev_err(&client->dev,
301 "firmware download failed=%d\n", ret);
302 goto err;
303 }
304 }
305
306 ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
307 if (ret)
308 goto err;
309
310 release_firmware(fw);
311 fw = NULL;
312
313 /* warm state */
314 dev->warm = true;
315
316 return 0;
317err:
318 if (fw)
319 release_firmware(fw);
320
321 dev_dbg(&client->dev, "failed=%d\n", ret);
322 return ret;
323}
324
325static int mn88472_sleep(struct dvb_frontend *fe)
326{
327 struct i2c_client *client = fe->demodulator_priv;
328 struct mn88472_dev *dev = i2c_get_clientdata(client);
329 int ret;
330
331 dev_dbg(&client->dev, "\n");
332
333 /* power off */
334 ret = regmap_write(dev->regmap[2], 0x0b, 0x30);
335
336 if (ret)
337 goto err;
338
339 ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
340 if (ret)
341 goto err;
342
343 dev->delivery_system = SYS_UNDEFINED;
344
345 return 0;
346err:
347 dev_dbg(&client->dev, "failed=%d\n", ret);
348 return ret;
349}
350
351static struct dvb_frontend_ops mn88472_ops = {
352 .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
353 .info = {
354 .name = "Panasonic MN88472",
355 .caps = FE_CAN_FEC_1_2 |
356 FE_CAN_FEC_2_3 |
357 FE_CAN_FEC_3_4 |
358 FE_CAN_FEC_5_6 |
359 FE_CAN_FEC_7_8 |
360 FE_CAN_FEC_AUTO |
361 FE_CAN_QPSK |
362 FE_CAN_QAM_16 |
363 FE_CAN_QAM_32 |
364 FE_CAN_QAM_64 |
365 FE_CAN_QAM_128 |
366 FE_CAN_QAM_256 |
367 FE_CAN_QAM_AUTO |
368 FE_CAN_TRANSMISSION_MODE_AUTO |
369 FE_CAN_GUARD_INTERVAL_AUTO |
370 FE_CAN_HIERARCHY_AUTO |
371 FE_CAN_MUTE_TS |
372 FE_CAN_2G_MODULATION |
373 FE_CAN_MULTISTREAM
374 },
375
376 .get_tune_settings = mn88472_get_tune_settings,
377
378 .init = mn88472_init,
379 .sleep = mn88472_sleep,
380
381 .set_frontend = mn88472_set_frontend,
382
383 .read_status = mn88472_read_status,
384};
385
386static int mn88472_probe(struct i2c_client *client,
387 const struct i2c_device_id *id)
388{
389 struct mn88472_config *config = client->dev.platform_data;
390 struct mn88472_dev *dev;
391 int ret;
392 unsigned int utmp;
393 static const struct regmap_config regmap_config = {
394 .reg_bits = 8,
395 .val_bits = 8,
396 };
397
398 dev_dbg(&client->dev, "\n");
399
400 /* Caller really need to provide pointer for frontend we create. */
401 if (config->fe == NULL) {
402 dev_err(&client->dev, "frontend pointer not defined\n");
403 ret = -EINVAL;
404 goto err;
405 }
406
407 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
408 if (dev == NULL) {
409 ret = -ENOMEM;
410 goto err;
411 }
412
413 dev->i2c_wr_max = config->i2c_wr_max;
414 dev->client[0] = client;
415 dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
416 if (IS_ERR(dev->regmap[0])) {
417 ret = PTR_ERR(dev->regmap[0]);
418 goto err_kfree;
419 }
420
421 /* check demod answers to I2C */
422 ret = regmap_read(dev->regmap[0], 0x00, &utmp);
423 if (ret)
424 goto err_regmap_0_regmap_exit;
425
426 /*
427 * Chip has three I2C addresses for different register pages. Used
428 * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
429 * 0x1a and 0x1c, in order to get own I2C client for each register page.
430 */
431 dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
432 if (dev->client[1] == NULL) {
433 ret = -ENODEV;
434 dev_err(&client->dev, "I2C registration failed\n");
435 if (ret)
436 goto err_regmap_0_regmap_exit;
437 }
438 dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
439 if (IS_ERR(dev->regmap[1])) {
440 ret = PTR_ERR(dev->regmap[1]);
441 goto err_client_1_i2c_unregister_device;
442 }
443 i2c_set_clientdata(dev->client[1], dev);
444
445 dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
446 if (dev->client[2] == NULL) {
447 ret = -ENODEV;
448 dev_err(&client->dev, "2nd I2C registration failed\n");
449 if (ret)
450 goto err_regmap_1_regmap_exit;
451 }
452 dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
453 if (IS_ERR(dev->regmap[2])) {
454 ret = PTR_ERR(dev->regmap[2]);
455 goto err_client_2_i2c_unregister_device;
456 }
457 i2c_set_clientdata(dev->client[2], dev);
458
459 /* create dvb_frontend */
460 memcpy(&dev->fe.ops, &mn88472_ops, sizeof(struct dvb_frontend_ops));
461 dev->fe.demodulator_priv = client;
462 *config->fe = &dev->fe;
463 i2c_set_clientdata(client, dev);
464
465 dev_info(&client->dev, "Panasonic MN88472 successfully attached\n");
466 return 0;
467
468err_client_2_i2c_unregister_device:
469 i2c_unregister_device(dev->client[2]);
470err_regmap_1_regmap_exit:
471 regmap_exit(dev->regmap[1]);
472err_client_1_i2c_unregister_device:
473 i2c_unregister_device(dev->client[1]);
474err_regmap_0_regmap_exit:
475 regmap_exit(dev->regmap[0]);
476err_kfree:
477 kfree(dev);
478err:
479 dev_dbg(&client->dev, "failed=%d\n", ret);
480 return ret;
481}
482
483static int mn88472_remove(struct i2c_client *client)
484{
485 struct mn88472_dev *dev = i2c_get_clientdata(client);
486
487 dev_dbg(&client->dev, "\n");
488
489 regmap_exit(dev->regmap[2]);
490 i2c_unregister_device(dev->client[2]);
491
492 regmap_exit(dev->regmap[1]);
493 i2c_unregister_device(dev->client[1]);
494
495 regmap_exit(dev->regmap[0]);
496
497 kfree(dev);
498
499 return 0;
500}
501
502static const struct i2c_device_id mn88472_id_table[] = {
503 {"mn88472", 0},
504 {}
505};
506MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
507
508static struct i2c_driver mn88472_driver = {
509 .driver = {
510 .owner = THIS_MODULE,
511 .name = "mn88472",
512 },
513 .probe = mn88472_probe,
514 .remove = mn88472_remove,
515 .id_table = mn88472_id_table,
516};
517
518module_i2c_driver(mn88472_driver);
519
520MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
521MODULE_DESCRIPTION("Panasonic MN88472 DVB-T/T2/C demodulator driver");
522MODULE_LICENSE("GPL");
523MODULE_FIRMWARE(MN88472_FIRMWARE);
diff --git a/drivers/staging/media/mn88472/mn88472_priv.h b/drivers/staging/media/mn88472/mn88472_priv.h
new file mode 100644
index 000000000000..1095949f040d
--- /dev/null
+++ b/drivers/staging/media/mn88472/mn88472_priv.h
@@ -0,0 +1,36 @@
1/*
2 * Panasonic MN88472 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef MN88472_PRIV_H
18#define MN88472_PRIV_H
19
20#include "dvb_frontend.h"
21#include "mn88472.h"
22#include <linux/firmware.h>
23#include <linux/regmap.h>
24
25#define MN88472_FIRMWARE "dvb-demod-mn88472-02.fw"
26
27struct mn88472_dev {
28 struct i2c_client *client[3];
29 struct regmap *regmap[3];
30 struct dvb_frontend fe;
31 u16 i2c_wr_max;
32 fe_delivery_system_t delivery_system;
33 bool warm; /* FW running */
34};
35
36#endif
diff --git a/drivers/staging/media/mn88473/Kconfig b/drivers/staging/media/mn88473/Kconfig
new file mode 100644
index 000000000000..6c9ebf51c2c7
--- /dev/null
+++ b/drivers/staging/media/mn88473/Kconfig
@@ -0,0 +1,7 @@
1config DVB_MN88473
2 tristate "Panasonic MN88473"
3 depends on DVB_CORE && I2C
4 select REGMAP_I2C
5 default m if !MEDIA_SUBDRV_AUTOSELECT
6 help
7 Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88473/Makefile b/drivers/staging/media/mn88473/Makefile
new file mode 100644
index 000000000000..fac55410ce55
--- /dev/null
+++ b/drivers/staging/media/mn88473/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_DVB_MN88473) += mn88473.o
2
3ccflags-y += -Idrivers/media/dvb-core/
4ccflags-y += -Idrivers/media/dvb-frontends/
5ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88473/TODO b/drivers/staging/media/mn88473/TODO
new file mode 100644
index 000000000000..b90a14be3beb
--- /dev/null
+++ b/drivers/staging/media/mn88473/TODO
@@ -0,0 +1,21 @@
1Driver general quality is not good enough for mainline. Also, other
2device drivers (USB-bridge, tuner) needed for Astrometa receiver in
3question could need some changes. However, if that driver is mainlined
4due to some other device than Astrometa, unrelated TODOs could be
5skipped. In that case rtl28xxu driver needs module parameter to prevent
6driver loading.
7
8Required TODOs:
9* missing lock flags
10* I2C errors
11* tuner sensitivity
12
13*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
14checkpatch.pl tests. I don't want waste my time to review this kind of
15trivial stuff. *Do not* add missing register I/O error checks. Those are
16missing for the reason it is much easier to compare I2C data sniffs when
17there is less lines. Those error checks are about the last thing to be added.
18
19Patches should be submitted to:
20linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
21
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/staging/media/mn88473/mn88473.c
new file mode 100644
index 000000000000..a333744b76b9
--- /dev/null
+++ b/drivers/staging/media/mn88473/mn88473.c
@@ -0,0 +1,464 @@
1/*
2 * Panasonic MN88473 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "mn88473_priv.h"
18
19static int mn88473_get_tune_settings(struct dvb_frontend *fe,
20 struct dvb_frontend_tune_settings *s)
21{
22 s->min_delay_ms = 1000;
23 return 0;
24}
25
26static int mn88473_set_frontend(struct dvb_frontend *fe)
27{
28 struct i2c_client *client = fe->demodulator_priv;
29 struct mn88473_dev *dev = i2c_get_clientdata(client);
30 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
31 int ret, i;
32 u32 if_frequency;
33 u8 delivery_system_val, if_val[3], bw_val[7];
34
35 dev_dbg(&client->dev,
36 "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
37 c->delivery_system, c->modulation,
38 c->frequency, c->bandwidth_hz, c->symbol_rate,
39 c->inversion, c->stream_id);
40
41 if (!dev->warm) {
42 ret = -EAGAIN;
43 goto err;
44 }
45
46 switch (c->delivery_system) {
47 case SYS_DVBT:
48 delivery_system_val = 0x02;
49 break;
50 case SYS_DVBT2:
51 delivery_system_val = 0x03;
52 break;
53 case SYS_DVBC_ANNEX_A:
54 delivery_system_val = 0x04;
55 break;
56 default:
57 ret = -EINVAL;
58 goto err;
59 }
60
61 switch (c->delivery_system) {
62 case SYS_DVBT:
63 case SYS_DVBT2:
64 if (c->bandwidth_hz <= 6000000) {
65 /* IF 3570000 Hz, BW 6000000 Hz */
66 memcpy(if_val, "\x24\x8e\x8a", 3);
67 memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
68 } else if (c->bandwidth_hz <= 7000000) {
69 /* IF 4570000 Hz, BW 7000000 Hz */
70 memcpy(if_val, "\x2e\xcb\xfb", 3);
71 memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
72 } else if (c->bandwidth_hz <= 8000000) {
73 /* IF 4570000 Hz, BW 8000000 Hz */
74 memcpy(if_val, "\x2e\xcb\xfb", 3);
75 memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
76 } else {
77 ret = -EINVAL;
78 goto err;
79 }
80 break;
81 case SYS_DVBC_ANNEX_A:
82 /* IF 5070000 Hz, BW 8000000 Hz */
83 memcpy(if_val, "\x33\xea\xb3", 3);
84 memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
85 break;
86 default:
87 ret = -EINVAL;
88 goto err;
89 }
90
91 /* program tuner */
92 if (fe->ops.tuner_ops.set_params) {
93 ret = fe->ops.tuner_ops.set_params(fe);
94 if (ret)
95 goto err;
96 }
97
98 if (fe->ops.tuner_ops.get_if_frequency) {
99 ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
100 if (ret)
101 goto err;
102
103 dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
104 } else {
105 if_frequency = 0;
106 }
107
108 switch (if_frequency) {
109 case 3570000:
110 case 4570000:
111 case 5070000:
112 break;
113 default:
114 dev_err(&client->dev, "IF frequency %d not supported\n",
115 if_frequency);
116 ret = -EINVAL;
117 goto err;
118 }
119
120 ret = regmap_write(dev->regmap[2], 0x05, 0x00);
121 ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
122 ret = regmap_write(dev->regmap[2], 0xef, 0x13);
123 ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
124 ret = regmap_write(dev->regmap[2], 0x00, 0x18);
125 ret = regmap_write(dev->regmap[2], 0x01, 0x01);
126 ret = regmap_write(dev->regmap[2], 0x02, 0x21);
127 ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
128 ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
129
130 for (i = 0; i < sizeof(if_val); i++) {
131 ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
132 if (ret)
133 goto err;
134 }
135
136 for (i = 0; i < sizeof(bw_val); i++) {
137 ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
138 if (ret)
139 goto err;
140 }
141
142 ret = regmap_write(dev->regmap[2], 0x2d, 0x3b);
143 ret = regmap_write(dev->regmap[2], 0x2e, 0x00);
144 ret = regmap_write(dev->regmap[2], 0x56, 0x0d);
145 ret = regmap_write(dev->regmap[0], 0x01, 0xba);
146 ret = regmap_write(dev->regmap[0], 0x02, 0x13);
147 ret = regmap_write(dev->regmap[0], 0x03, 0x80);
148 ret = regmap_write(dev->regmap[0], 0x04, 0xba);
149 ret = regmap_write(dev->regmap[0], 0x05, 0x91);
150 ret = regmap_write(dev->regmap[0], 0x07, 0xe7);
151 ret = regmap_write(dev->regmap[0], 0x08, 0x28);
152 ret = regmap_write(dev->regmap[0], 0x0a, 0x1a);
153 ret = regmap_write(dev->regmap[0], 0x13, 0x1f);
154 ret = regmap_write(dev->regmap[0], 0x19, 0x03);
155 ret = regmap_write(dev->regmap[0], 0x1d, 0xb0);
156 ret = regmap_write(dev->regmap[0], 0x2a, 0x72);
157 ret = regmap_write(dev->regmap[0], 0x2d, 0x00);
158 ret = regmap_write(dev->regmap[0], 0x3c, 0x00);
159 ret = regmap_write(dev->regmap[0], 0x3f, 0xf8);
160 ret = regmap_write(dev->regmap[0], 0x40, 0xf4);
161 ret = regmap_write(dev->regmap[0], 0x41, 0x08);
162 ret = regmap_write(dev->regmap[0], 0xd2, 0x29);
163 ret = regmap_write(dev->regmap[0], 0xd4, 0x55);
164 ret = regmap_write(dev->regmap[1], 0x10, 0x10);
165 ret = regmap_write(dev->regmap[1], 0x11, 0xab);
166 ret = regmap_write(dev->regmap[1], 0x12, 0x0d);
167 ret = regmap_write(dev->regmap[1], 0x13, 0xae);
168 ret = regmap_write(dev->regmap[1], 0x14, 0x1d);
169 ret = regmap_write(dev->regmap[1], 0x15, 0x9d);
170 ret = regmap_write(dev->regmap[1], 0xbe, 0x08);
171 ret = regmap_write(dev->regmap[2], 0x09, 0x08);
172 ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
173 ret = regmap_write(dev->regmap[0], 0xb2, 0x37);
174 ret = regmap_write(dev->regmap[0], 0xd7, 0x04);
175 ret = regmap_write(dev->regmap[2], 0x32, 0x80);
176 ret = regmap_write(dev->regmap[2], 0x36, 0x00);
177 ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
178 if (ret)
179 goto err;
180
181 dev->delivery_system = c->delivery_system;
182
183 return 0;
184err:
185 dev_dbg(&client->dev, "failed=%d\n", ret);
186 return ret;
187}
188
189static int mn88473_read_status(struct dvb_frontend *fe, fe_status_t *status)
190{
191 struct i2c_client *client = fe->demodulator_priv;
192 struct mn88473_dev *dev = i2c_get_clientdata(client);
193 int ret;
194
195 *status = 0;
196
197 if (!dev->warm) {
198 ret = -EAGAIN;
199 goto err;
200 }
201
202 *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
203 FE_HAS_SYNC | FE_HAS_LOCK;
204
205 return 0;
206err:
207 dev_dbg(&client->dev, "failed=%d\n", ret);
208 return ret;
209}
210
211static int mn88473_init(struct dvb_frontend *fe)
212{
213 struct i2c_client *client = fe->demodulator_priv;
214 struct mn88473_dev *dev = i2c_get_clientdata(client);
215 int ret, len, remaining;
216 const struct firmware *fw = NULL;
217 u8 *fw_file = MN88473_FIRMWARE;
218
219 dev_dbg(&client->dev, "\n");
220
221 if (dev->warm)
222 return 0;
223
224 /* request the firmware, this will block and timeout */
225 ret = request_firmware(&fw, fw_file, &client->dev);
226 if (ret) {
227 dev_err(&client->dev, "firmare file '%s' not found\n", fw_file);
228 goto err_request_firmware;
229 }
230
231 dev_info(&client->dev, "downloading firmware from file '%s'\n",
232 fw_file);
233
234 ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
235 if (ret)
236 goto err;
237
238 for (remaining = fw->size; remaining > 0;
239 remaining -= (dev->i2c_wr_max - 1)) {
240 len = remaining;
241 if (len > (dev->i2c_wr_max - 1))
242 len = (dev->i2c_wr_max - 1);
243
244 ret = regmap_bulk_write(dev->regmap[0], 0xf6,
245 &fw->data[fw->size - remaining], len);
246 if (ret) {
247 dev_err(&client->dev, "firmware download failed=%d\n",
248 ret);
249 goto err;
250 }
251 }
252
253 ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
254 if (ret)
255 goto err;
256
257 release_firmware(fw);
258 fw = NULL;
259
260 /* warm state */
261 dev->warm = true;
262
263 return 0;
264
265err:
266 release_firmware(fw);
267err_request_firmware:
268 dev_dbg(&client->dev, "failed=%d\n", ret);
269 return ret;
270}
271
272static int mn88473_sleep(struct dvb_frontend *fe)
273{
274 struct i2c_client *client = fe->demodulator_priv;
275 struct mn88473_dev *dev = i2c_get_clientdata(client);
276 int ret;
277
278 dev_dbg(&client->dev, "\n");
279
280 ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
281 if (ret)
282 goto err;
283
284 dev->delivery_system = SYS_UNDEFINED;
285
286 return 0;
287err:
288 dev_dbg(&client->dev, "failed=%d\n", ret);
289 return ret;
290}
291
292static struct dvb_frontend_ops mn88473_ops = {
293 .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
294 .info = {
295 .name = "Panasonic MN88473",
296 .caps = FE_CAN_FEC_1_2 |
297 FE_CAN_FEC_2_3 |
298 FE_CAN_FEC_3_4 |
299 FE_CAN_FEC_5_6 |
300 FE_CAN_FEC_7_8 |
301 FE_CAN_FEC_AUTO |
302 FE_CAN_QPSK |
303 FE_CAN_QAM_16 |
304 FE_CAN_QAM_32 |
305 FE_CAN_QAM_64 |
306 FE_CAN_QAM_128 |
307 FE_CAN_QAM_256 |
308 FE_CAN_QAM_AUTO |
309 FE_CAN_TRANSMISSION_MODE_AUTO |
310 FE_CAN_GUARD_INTERVAL_AUTO |
311 FE_CAN_HIERARCHY_AUTO |
312 FE_CAN_MUTE_TS |
313 FE_CAN_2G_MODULATION |
314 FE_CAN_MULTISTREAM
315 },
316
317 .get_tune_settings = mn88473_get_tune_settings,
318
319 .init = mn88473_init,
320 .sleep = mn88473_sleep,
321
322 .set_frontend = mn88473_set_frontend,
323
324 .read_status = mn88473_read_status,
325};
326
327static int mn88473_probe(struct i2c_client *client,
328 const struct i2c_device_id *id)
329{
330 struct mn88473_config *config = client->dev.platform_data;
331 struct mn88473_dev *dev;
332 int ret;
333 unsigned int utmp;
334 static const struct regmap_config regmap_config = {
335 .reg_bits = 8,
336 .val_bits = 8,
337 };
338
339 dev_dbg(&client->dev, "\n");
340
341 /* Caller really need to provide pointer for frontend we create. */
342 if (config->fe == NULL) {
343 dev_err(&client->dev, "frontend pointer not defined\n");
344 ret = -EINVAL;
345 goto err;
346 }
347
348 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
349 if (dev == NULL) {
350 ret = -ENOMEM;
351 goto err;
352 }
353
354 dev->i2c_wr_max = config->i2c_wr_max;
355 dev->client[0] = client;
356 dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
357 if (IS_ERR(dev->regmap[0])) {
358 ret = PTR_ERR(dev->regmap[0]);
359 goto err_kfree;
360 }
361
362 /* check demod answers to I2C */
363 ret = regmap_read(dev->regmap[0], 0x00, &utmp);
364 if (ret)
365 goto err_regmap_0_regmap_exit;
366
367 /*
368 * Chip has three I2C addresses for different register pages. Used
369 * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
370 * 0x1a and 0x1c, in order to get own I2C client for each register page.
371 */
372 dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
373 if (dev->client[1] == NULL) {
374 ret = -ENODEV;
375 dev_err(&client->dev, "I2C registration failed\n");
376 if (ret)
377 goto err_regmap_0_regmap_exit;
378 }
379 dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
380 if (IS_ERR(dev->regmap[1])) {
381 ret = PTR_ERR(dev->regmap[1]);
382 goto err_client_1_i2c_unregister_device;
383 }
384 i2c_set_clientdata(dev->client[1], dev);
385
386 dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
387 if (dev->client[2] == NULL) {
388 ret = -ENODEV;
389 dev_err(&client->dev, "2nd I2C registration failed\n");
390 if (ret)
391 goto err_regmap_1_regmap_exit;
392 }
393 dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
394 if (IS_ERR(dev->regmap[2])) {
395 ret = PTR_ERR(dev->regmap[2]);
396 goto err_client_2_i2c_unregister_device;
397 }
398 i2c_set_clientdata(dev->client[2], dev);
399
400 /* create dvb_frontend */
401 memcpy(&dev->fe.ops, &mn88473_ops, sizeof(struct dvb_frontend_ops));
402 dev->fe.demodulator_priv = client;
403 *config->fe = &dev->fe;
404 i2c_set_clientdata(client, dev);
405
406 dev_info(&dev->client[0]->dev, "Panasonic MN88473 successfully attached\n");
407 return 0;
408
409err_client_2_i2c_unregister_device:
410 i2c_unregister_device(dev->client[2]);
411err_regmap_1_regmap_exit:
412 regmap_exit(dev->regmap[1]);
413err_client_1_i2c_unregister_device:
414 i2c_unregister_device(dev->client[1]);
415err_regmap_0_regmap_exit:
416 regmap_exit(dev->regmap[0]);
417err_kfree:
418 kfree(dev);
419err:
420 dev_dbg(&client->dev, "failed=%d\n", ret);
421 return ret;
422}
423
424static int mn88473_remove(struct i2c_client *client)
425{
426 struct mn88473_dev *dev = i2c_get_clientdata(client);
427
428 dev_dbg(&client->dev, "\n");
429
430 regmap_exit(dev->regmap[2]);
431 i2c_unregister_device(dev->client[2]);
432
433 regmap_exit(dev->regmap[1]);
434 i2c_unregister_device(dev->client[1]);
435
436 regmap_exit(dev->regmap[0]);
437
438 kfree(dev);
439
440 return 0;
441}
442
443static const struct i2c_device_id mn88473_id_table[] = {
444 {"mn88473", 0},
445 {}
446};
447MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
448
449static struct i2c_driver mn88473_driver = {
450 .driver = {
451 .owner = THIS_MODULE,
452 .name = "mn88473",
453 },
454 .probe = mn88473_probe,
455 .remove = mn88473_remove,
456 .id_table = mn88473_id_table,
457};
458
459module_i2c_driver(mn88473_driver);
460
461MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
462MODULE_DESCRIPTION("Panasonic MN88473 DVB-T/T2/C demodulator driver");
463MODULE_LICENSE("GPL");
464MODULE_FIRMWARE(MN88473_FIRMWARE);
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/staging/media/mn88473/mn88473_priv.h
new file mode 100644
index 000000000000..78af112fb41d
--- /dev/null
+++ b/drivers/staging/media/mn88473/mn88473_priv.h
@@ -0,0 +1,36 @@
1/*
2 * Panasonic MN88473 DVB-T/T2/C demodulator driver
3 *
4 * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef MN88473_PRIV_H
18#define MN88473_PRIV_H
19
20#include "dvb_frontend.h"
21#include "mn88473.h"
22#include <linux/firmware.h>
23#include <linux/regmap.h>
24
25#define MN88473_FIRMWARE "dvb-demod-mn88473-01.fw"
26
27struct mn88473_dev {
28 struct i2c_client *client[3];
29 struct regmap *regmap[3];
30 struct dvb_frontend fe;
31 u16 i2c_wr_max;
32 fe_delivery_system_t delivery_system;
33 bool warm; /* FW running */
34};
35
36#endif
diff --git a/drivers/staging/media/omap24xx/Kconfig b/drivers/staging/media/omap24xx/Kconfig
deleted file mode 100644
index 82e569a21c46..000000000000
--- a/drivers/staging/media/omap24xx/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
1config VIDEO_V4L2_INT_DEVICE
2 tristate
3
4config VIDEO_OMAP2
5 tristate "OMAP2 Camera Capture Interface driver (DEPRECATED)"
6 depends on VIDEO_DEV && ARCH_OMAP2
7 select VIDEOBUF_DMA_SG
8 select VIDEO_V4L2_INT_DEVICE
9 ---help---
10 This is a v4l2 driver for the TI OMAP2 camera capture interface
11
12 It uses the deprecated int-device API. Since this driver is no
13 longer actively maintained and nobody is interested in converting
14 it to the subdev API, this driver will be removed soon.
15
16 If you do want to keep this driver in the kernel, and are willing
17 to convert it to the subdev API, then please contact the linux-media
18 mailinglist.
19
20config VIDEO_TCM825X
21 tristate "TCM825x camera sensor support (DEPRECATED)"
22 depends on I2C && VIDEO_V4L2
23 depends on MEDIA_CAMERA_SUPPORT
24 select VIDEO_V4L2_INT_DEVICE
25 ---help---
26 This is a driver for the Toshiba TCM825x VGA camera sensor.
27 It is used for example in Nokia N800.
28
29 It uses the deprecated int-device API. Since this driver is no
30 longer actively maintained and nobody is interested in converting
31 it to the subdev API, this driver will be removed soon.
32
33 If you do want to keep this driver in the kernel, and are willing
34 to convert it to the subdev API, then please contact the linux-media
35 mailinglist.
diff --git a/drivers/staging/media/omap24xx/Makefile b/drivers/staging/media/omap24xx/Makefile
deleted file mode 100644
index c2e7175599c2..000000000000
--- a/drivers/staging/media/omap24xx/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
2
3obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
4obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
5obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
diff --git a/drivers/staging/media/omap24xx/omap24xxcam-dma.c b/drivers/staging/media/omap24xx/omap24xxcam-dma.c
deleted file mode 100644
index c427eb94ea66..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam-dma.c
+++ /dev/null
@@ -1,598 +0,0 @@
1/*
2 * drivers/media/platform/omap24xxcam-dma.c
3 *
4 * Copyright (C) 2004 MontaVista Software, Inc.
5 * Copyright (C) 2004 Texas Instruments.
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * Based on code from Andy Lowe <source@mvista.com> and
11 * David Cohen <david.cohen@indt.org.br>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * version 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 * 02110-1301 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/io.h>
30#include <linux/scatterlist.h>
31
32#include "omap24xxcam.h"
33
34/*
35 *
36 * DMA hardware.
37 *
38 */
39
40/* Ack all interrupt on CSR and IRQSTATUS_L0 */
41static void omap24xxcam_dmahw_ack_all(void __iomem *base)
42{
43 u32 csr;
44 int i;
45
46 for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
47 csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
48 /* ack interrupt in CSR */
49 omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
50 }
51 omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
52}
53
54/* Ack dmach on CSR and IRQSTATUS_L0 */
55static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
56{
57 u32 csr;
58
59 csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
60 /* ack interrupt in CSR */
61 omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
62 /* ack interrupt in IRQSTATUS */
63 omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
64
65 return csr;
66}
67
68static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
69{
70 return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
71}
72
73static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
74 dma_addr_t start, u32 len)
75{
76 omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
77 CAMDMA_CCR_SEL_SRC_DST_SYNC
78 | CAMDMA_CCR_BS
79 | CAMDMA_CCR_DST_AMODE_POST_INC
80 | CAMDMA_CCR_SRC_AMODE_POST_INC
81 | CAMDMA_CCR_FS
82 | CAMDMA_CCR_WR_ACTIVE
83 | CAMDMA_CCR_RD_ACTIVE
84 | CAMDMA_CCR_SYNCHRO_CAMERA);
85 omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
86 omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
87 omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
88 omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
89 CAMDMA_CSDP_WRITE_MODE_POSTED
90 | CAMDMA_CSDP_DST_BURST_EN_32
91 | CAMDMA_CSDP_DST_PACKED
92 | CAMDMA_CSDP_SRC_BURST_EN_32
93 | CAMDMA_CSDP_SRC_PACKED
94 | CAMDMA_CSDP_DATA_TYPE_8BITS);
95 omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
96 omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
97 omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
98 omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
99 omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
100 omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
101 omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
102 CAMDMA_CSR_MISALIGNED_ERR
103 | CAMDMA_CSR_SECURE_ERR
104 | CAMDMA_CSR_TRANS_ERR
105 | CAMDMA_CSR_BLOCK
106 | CAMDMA_CSR_DROP);
107 omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
108 CAMDMA_CICR_MISALIGNED_ERR_IE
109 | CAMDMA_CICR_SECURE_ERR_IE
110 | CAMDMA_CICR_TRANS_ERR_IE
111 | CAMDMA_CICR_BLOCK_IE
112 | CAMDMA_CICR_DROP_IE);
113}
114
115static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
116{
117 omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
118 CAMDMA_CCR_SEL_SRC_DST_SYNC
119 | CAMDMA_CCR_BS
120 | CAMDMA_CCR_DST_AMODE_POST_INC
121 | CAMDMA_CCR_SRC_AMODE_POST_INC
122 | CAMDMA_CCR_ENABLE
123 | CAMDMA_CCR_FS
124 | CAMDMA_CCR_SYNCHRO_CAMERA);
125}
126
127static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
128 int free_dmach)
129{
130 int prev_dmach, ch;
131
132 if (dmach == 0)
133 prev_dmach = NUM_CAMDMA_CHANNELS - 1;
134 else
135 prev_dmach = dmach - 1;
136 omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
137 CAMDMA_CLNK_CTRL_ENABLE_LNK | dmach);
138 /* Did we chain the DMA transfer before the previous one
139 * finished?
140 */
141 ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
142 while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
143 & CAMDMA_CCR_ENABLE)) {
144 if (ch == dmach) {
145 /* The previous transfer has ended and this one
146 * hasn't started, so we must not have chained
147 * to the previous one in time. We'll have to
148 * start it now.
149 */
150 omap24xxcam_dmahw_transfer_start(base, dmach);
151 break;
152 }
153 ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
154 }
155}
156
157/* Abort all chained DMA transfers. After all transfers have been
158 * aborted and the DMA controller is idle, the completion routines for
159 * any aborted transfers will be called in sequence. The DMA
160 * controller may not be idle after this routine completes, because
161 * the completion routines might start new transfers.
162 */
163static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
164{
165 /* mask all interrupts from this channel */
166 omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
167 /* unlink this channel */
168 omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
169 CAMDMA_CLNK_CTRL_ENABLE_LNK);
170 /* disable this channel */
171 omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
172}
173
174static void omap24xxcam_dmahw_init(void __iomem *base)
175{
176 omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
177 CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
178 | CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE
179 | CAMDMA_OCP_SYSCONFIG_AUTOIDLE);
180
181 omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
182 CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH);
183
184 omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
185}
186
187/*
188 *
189 * Individual DMA channel handling.
190 *
191 */
192
193/* Start a DMA transfer from the camera to memory.
194 * Returns zero if the transfer was successfully started, or non-zero if all
195 * DMA channels are already in use or starting is currently inhibited.
196 */
197static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
198 u32 len, dma_callback_t callback, void *arg)
199{
200 unsigned long flags;
201 int dmach;
202
203 spin_lock_irqsave(&dma->lock, flags);
204
205 if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
206 spin_unlock_irqrestore(&dma->lock, flags);
207 return -EBUSY;
208 }
209
210 dmach = dma->next_dmach;
211
212 dma->ch_state[dmach].callback = callback;
213 dma->ch_state[dmach].arg = arg;
214
215 omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
216
217 /* We're ready to start the DMA transfer. */
218
219 if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
220 /* A transfer is already in progress, so try to chain to it. */
221 omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
222 dma->free_dmach);
223 } else {
224 /* No transfer is in progress, so we'll just start this one
225 * now.
226 */
227 omap24xxcam_dmahw_transfer_start(dma->base, dmach);
228 }
229
230 dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
231 dma->free_dmach--;
232
233 spin_unlock_irqrestore(&dma->lock, flags);
234
235 return 0;
236}
237
238/* Abort all chained DMA transfers. After all transfers have been
239 * aborted and the DMA controller is idle, the completion routines for
240 * any aborted transfers will be called in sequence. The DMA
241 * controller may not be idle after this routine completes, because
242 * the completion routines might start new transfers.
243 */
244static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
245{
246 unsigned long flags;
247 int dmach, i, free_dmach;
248 dma_callback_t callback;
249 void *arg;
250
251 spin_lock_irqsave(&dma->lock, flags);
252
253 /* stop any DMA transfers in progress */
254 dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
255 for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
256 omap24xxcam_dmahw_abort_ch(dma->base, dmach);
257 dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
258 }
259
260 /* We have to be careful here because the callback routine
261 * might start a new DMA transfer, and we only want to abort
262 * transfers that were started before this routine was called.
263 */
264 free_dmach = dma->free_dmach;
265 while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
266 (free_dmach < NUM_CAMDMA_CHANNELS)) {
267 dmach = (dma->next_dmach + dma->free_dmach)
268 % NUM_CAMDMA_CHANNELS;
269 callback = dma->ch_state[dmach].callback;
270 arg = dma->ch_state[dmach].arg;
271 dma->free_dmach++;
272 free_dmach++;
273 if (callback) {
274 /* leave interrupts disabled during callback */
275 spin_unlock(&dma->lock);
276 (*callback) (dma, csr, arg);
277 spin_lock(&dma->lock);
278 }
279 }
280
281 spin_unlock_irqrestore(&dma->lock, flags);
282}
283
284/* Abort all chained DMA transfers. After all transfers have been
285 * aborted and the DMA controller is idle, the completion routines for
286 * any aborted transfers will be called in sequence. If the completion
287 * routines attempt to start a new DMA transfer it will fail, so the
288 * DMA controller will be idle after this routine completes.
289 */
290static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
291{
292 atomic_inc(&dma->dma_stop);
293 omap24xxcam_dma_abort(dma, csr);
294 atomic_dec(&dma->dma_stop);
295}
296
297/* Camera DMA interrupt service routine. */
298void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma)
299{
300 int dmach;
301 dma_callback_t callback;
302 void *arg;
303 u32 csr;
304 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
305 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
306 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
307
308 spin_lock(&dma->lock);
309
310 if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
311 /* A camera DMA interrupt occurred while all channels
312 * are idle, so we'll acknowledge the interrupt in the
313 * IRQSTATUS register and exit.
314 */
315 omap24xxcam_dmahw_ack_all(dma->base);
316 spin_unlock(&dma->lock);
317 return;
318 }
319
320 while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
321 dmach = (dma->next_dmach + dma->free_dmach)
322 % NUM_CAMDMA_CHANNELS;
323 if (omap24xxcam_dmahw_running(dma->base, dmach)) {
324 /* This buffer hasn't finished yet, so we're done. */
325 break;
326 }
327 csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
328 if (csr & csr_error) {
329 /* A DMA error occurred, so stop all DMA
330 * transfers in progress.
331 */
332 spin_unlock(&dma->lock);
333 omap24xxcam_dma_stop(dma, csr);
334 return;
335 }
336 callback = dma->ch_state[dmach].callback;
337 arg = dma->ch_state[dmach].arg;
338 dma->free_dmach++;
339 if (callback) {
340 spin_unlock(&dma->lock);
341 (*callback) (dma, csr, arg);
342 spin_lock(&dma->lock);
343 }
344 }
345
346 spin_unlock(&dma->lock);
347
348 omap24xxcam_sgdma_process(
349 container_of(dma, struct omap24xxcam_sgdma, dma));
350}
351
352void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
353{
354 unsigned long flags;
355
356 spin_lock_irqsave(&dma->lock, flags);
357
358 omap24xxcam_dmahw_init(dma->base);
359
360 spin_unlock_irqrestore(&dma->lock, flags);
361}
362
363static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
364 void __iomem *base)
365{
366 int ch;
367
368 /* group all channels on DMA IRQ0 and unmask irq */
369 spin_lock_init(&dma->lock);
370 dma->base = base;
371 dma->free_dmach = NUM_CAMDMA_CHANNELS;
372 dma->next_dmach = 0;
373 for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
374 dma->ch_state[ch].callback = NULL;
375 dma->ch_state[ch].arg = NULL;
376 }
377}
378
379/*
380 *
381 * Scatter-gather DMA.
382 *
383 * High-level DMA construct for transferring whole picture frames to
384 * memory that is discontinuous.
385 *
386 */
387
388/* DMA completion routine for the scatter-gather DMA fragments. */
389static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
390 void *arg)
391{
392 struct omap24xxcam_sgdma *sgdma =
393 container_of(dma, struct omap24xxcam_sgdma, dma);
394 int sgslot = (int)arg;
395 struct sgdma_state *sg_state;
396 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
397 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
398 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
399
400 spin_lock(&sgdma->lock);
401
402 /* We got an interrupt, we can remove the timer */
403 del_timer(&sgdma->reset_timer);
404
405 sg_state = sgdma->sg_state + sgslot;
406 if (!sg_state->queued_sglist) {
407 spin_unlock(&sgdma->lock);
408 printk(KERN_ERR "%s: sgdma completed when none queued!\n",
409 __func__);
410 return;
411 }
412
413 sg_state->csr |= csr;
414 if (!--sg_state->queued_sglist) {
415 /* Queue for this sglist is empty, so check to see if we're
416 * done.
417 */
418 if ((sg_state->next_sglist == sg_state->sglen)
419 || (sg_state->csr & csr_error)) {
420 sgdma_callback_t callback = sg_state->callback;
421 void *arg = sg_state->arg;
422 u32 sg_csr = sg_state->csr;
423 /* All done with this sglist */
424 sgdma->free_sgdma++;
425 if (callback) {
426 spin_unlock(&sgdma->lock);
427 (*callback) (sgdma, sg_csr, arg);
428 return;
429 }
430 }
431 }
432
433 spin_unlock(&sgdma->lock);
434}
435
436/* Start queued scatter-gather DMA transfers. */
437void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma)
438{
439 unsigned long flags;
440 int queued_sgdma, sgslot;
441 struct sgdma_state *sg_state;
442 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
443 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
444 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
445
446 spin_lock_irqsave(&sgdma->lock, flags);
447
448 queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
449 sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
450 while (queued_sgdma > 0) {
451 sg_state = sgdma->sg_state + sgslot;
452 while ((sg_state->next_sglist < sg_state->sglen) &&
453 !(sg_state->csr & csr_error)) {
454 const struct scatterlist *sglist;
455 unsigned int len;
456
457 sglist = sg_state->sglist + sg_state->next_sglist;
458 /* try to start the next DMA transfer */
459 if (sg_state->next_sglist + 1 == sg_state->sglen) {
460 /*
461 * On the last sg, we handle the case where
462 * cam->img.pix.sizeimage % PAGE_ALIGN != 0
463 */
464 len = sg_state->len - sg_state->bytes_read;
465 } else {
466 len = sg_dma_len(sglist);
467 }
468
469 if (omap24xxcam_dma_start(&sgdma->dma,
470 sg_dma_address(sglist),
471 len,
472 omap24xxcam_sgdma_callback,
473 (void *)sgslot)) {
474 /* DMA start failed */
475 spin_unlock_irqrestore(&sgdma->lock, flags);
476 return;
477 }
478 /* DMA start was successful */
479 sg_state->next_sglist++;
480 sg_state->bytes_read += len;
481 sg_state->queued_sglist++;
482
483 /* We start the reset timer */
484 mod_timer(&sgdma->reset_timer, jiffies + HZ);
485 }
486 queued_sgdma--;
487 sgslot = (sgslot + 1) % NUM_SG_DMA;
488 }
489
490 spin_unlock_irqrestore(&sgdma->lock, flags);
491}
492
493/*
494 * Queue a scatter-gather DMA transfer from the camera to memory.
495 * Returns zero if the transfer was successfully queued, or non-zero
496 * if all of the scatter-gather slots are already in use.
497 */
498int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
499 const struct scatterlist *sglist, int sglen,
500 int len, sgdma_callback_t callback, void *arg)
501{
502 unsigned long flags;
503 struct sgdma_state *sg_state;
504
505 if ((sglen < 0) || ((sglen > 0) && !sglist))
506 return -EINVAL;
507
508 spin_lock_irqsave(&sgdma->lock, flags);
509
510 if (!sgdma->free_sgdma) {
511 spin_unlock_irqrestore(&sgdma->lock, flags);
512 return -EBUSY;
513 }
514
515 sg_state = sgdma->sg_state + sgdma->next_sgdma;
516
517 sg_state->sglist = sglist;
518 sg_state->sglen = sglen;
519 sg_state->next_sglist = 0;
520 sg_state->bytes_read = 0;
521 sg_state->len = len;
522 sg_state->queued_sglist = 0;
523 sg_state->csr = 0;
524 sg_state->callback = callback;
525 sg_state->arg = arg;
526
527 sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
528 sgdma->free_sgdma--;
529
530 spin_unlock_irqrestore(&sgdma->lock, flags);
531
532 omap24xxcam_sgdma_process(sgdma);
533
534 return 0;
535}
536
537/* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
538 * Any queued scatter-gather DMA transactions that have not yet been started
539 * will remain queued. The DMA controller will be idle after this routine
540 * completes. When the scatter-gather queue is restarted, the next
541 * scatter-gather DMA transfer will begin at the start of a new transaction.
542 */
543void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
544{
545 unsigned long flags;
546 int sgslot;
547 struct sgdma_state *sg_state;
548 u32 csr = CAMDMA_CSR_TRANS_ERR;
549
550 /* stop any DMA transfers in progress */
551 omap24xxcam_dma_stop(&sgdma->dma, csr);
552
553 spin_lock_irqsave(&sgdma->lock, flags);
554
555 if (sgdma->free_sgdma < NUM_SG_DMA) {
556 sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
557 sg_state = sgdma->sg_state + sgslot;
558 if (sg_state->next_sglist != 0) {
559 /* This DMA transfer was in progress, so abort it. */
560 sgdma_callback_t callback = sg_state->callback;
561 void *arg = sg_state->arg;
562
563 sgdma->free_sgdma++;
564 if (callback) {
565 /* leave interrupts masked */
566 spin_unlock(&sgdma->lock);
567 (*callback) (sgdma, csr, arg);
568 spin_lock(&sgdma->lock);
569 }
570 }
571 }
572
573 spin_unlock_irqrestore(&sgdma->lock, flags);
574}
575
576void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
577 void __iomem *base,
578 void (*reset_callback)(unsigned long data),
579 unsigned long reset_callback_data)
580{
581 int sg;
582
583 spin_lock_init(&sgdma->lock);
584 sgdma->free_sgdma = NUM_SG_DMA;
585 sgdma->next_sgdma = 0;
586 for (sg = 0; sg < NUM_SG_DMA; sg++) {
587 sgdma->sg_state[sg].sglen = 0;
588 sgdma->sg_state[sg].next_sglist = 0;
589 sgdma->sg_state[sg].bytes_read = 0;
590 sgdma->sg_state[sg].queued_sglist = 0;
591 sgdma->sg_state[sg].csr = 0;
592 sgdma->sg_state[sg].callback = NULL;
593 sgdma->sg_state[sg].arg = NULL;
594 }
595
596 omap24xxcam_dma_init(&sgdma->dma, base);
597 setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
598}
diff --git a/drivers/staging/media/omap24xx/omap24xxcam.c b/drivers/staging/media/omap24xx/omap24xxcam.c
deleted file mode 100644
index d590b3e8b70c..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam.c
+++ /dev/null
@@ -1,1882 +0,0 @@
1/*
2 * drivers/media/platform/omap24xxcam.c
3 *
4 * OMAP 2 camera block driver.
5 *
6 * Copyright (C) 2004 MontaVista Software, Inc.
7 * Copyright (C) 2004 Texas Instruments.
8 * Copyright (C) 2007-2008 Nokia Corporation.
9 *
10 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
11 *
12 * Based on code from Andy Lowe <source@mvista.com>
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 * 02110-1301 USA
27 */
28
29#include <linux/delay.h>
30#include <linux/kernel.h>
31#include <linux/interrupt.h>
32#include <linux/videodev2.h>
33#include <linux/pci.h> /* needed for videobufs */
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/sched.h>
39#include <linux/module.h>
40
41#include <media/v4l2-common.h>
42#include <media/v4l2-ioctl.h>
43
44#include "omap24xxcam.h"
45
46#define OMAP24XXCAM_VERSION "0.0.1"
47
48#define RESET_TIMEOUT_NS 10000
49
50static void omap24xxcam_reset(struct omap24xxcam_device *cam);
51static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam);
52static void omap24xxcam_device_unregister(struct v4l2_int_device *s);
53static int omap24xxcam_remove(struct platform_device *pdev);
54
55/* module parameters */
56static int video_nr = -1; /* video device minor (-1 ==> auto assign) */
57/*
58 * Maximum amount of memory to use for capture buffers.
59 * Default is 4800KB, enough to double-buffer SXGA.
60 */
61static int capture_mem = 1280 * 960 * 2 * 2;
62
63static struct v4l2_int_device omap24xxcam;
64
65/*
66 *
67 * Clocks.
68 *
69 */
70
71static void omap24xxcam_clock_put(struct omap24xxcam_device *cam)
72{
73 if (cam->ick != NULL && !IS_ERR(cam->ick))
74 clk_put(cam->ick);
75 if (cam->fck != NULL && !IS_ERR(cam->fck))
76 clk_put(cam->fck);
77
78 cam->ick = cam->fck = NULL;
79}
80
81static int omap24xxcam_clock_get(struct omap24xxcam_device *cam)
82{
83 int rval = 0;
84
85 cam->fck = clk_get(cam->dev, "fck");
86 if (IS_ERR(cam->fck)) {
87 dev_err(cam->dev, "can't get camera fck");
88 rval = PTR_ERR(cam->fck);
89 omap24xxcam_clock_put(cam);
90 return rval;
91 }
92
93 cam->ick = clk_get(cam->dev, "ick");
94 if (IS_ERR(cam->ick)) {
95 dev_err(cam->dev, "can't get camera ick");
96 rval = PTR_ERR(cam->ick);
97 omap24xxcam_clock_put(cam);
98 }
99
100 return rval;
101}
102
103static void omap24xxcam_clock_on(struct omap24xxcam_device *cam)
104{
105 clk_enable(cam->fck);
106 clk_enable(cam->ick);
107}
108
109static void omap24xxcam_clock_off(struct omap24xxcam_device *cam)
110{
111 clk_disable(cam->fck);
112 clk_disable(cam->ick);
113}
114
115/*
116 *
117 * Camera core
118 *
119 */
120
121/*
122 * Set xclk.
123 *
124 * To disable xclk, use value zero.
125 */
126static void omap24xxcam_core_xclk_set(const struct omap24xxcam_device *cam,
127 u32 xclk)
128{
129 if (xclk) {
130 u32 divisor = CAM_MCLK / xclk;
131
132 if (divisor == 1)
133 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
134 CC_CTRL_XCLK,
135 CC_CTRL_XCLK_DIV_BYPASS);
136 else
137 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
138 CC_CTRL_XCLK, divisor);
139 } else
140 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET,
141 CC_CTRL_XCLK, CC_CTRL_XCLK_DIV_STABLE_LOW);
142}
143
144static void omap24xxcam_core_hwinit(const struct omap24xxcam_device *cam)
145{
146 /*
147 * Setting the camera core AUTOIDLE bit causes problems with frame
148 * synchronization, so we will clear the AUTOIDLE bit instead.
149 */
150 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_SYSCONFIG,
151 CC_SYSCONFIG_AUTOIDLE);
152
153 /* program the camera interface DMA packet size */
154 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL_DMA,
155 CC_CTRL_DMA_EN | (DMA_THRESHOLD / 4 - 1));
156
157 /* enable camera core error interrupts */
158 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQENABLE,
159 CC_IRQENABLE_FW_ERR_IRQ
160 | CC_IRQENABLE_FSC_ERR_IRQ
161 | CC_IRQENABLE_SSC_ERR_IRQ
162 | CC_IRQENABLE_FIFO_OF_IRQ);
163}
164
165/*
166 * Enable the camera core.
167 *
168 * Data transfer to the camera DMA starts from next starting frame.
169 */
170static void omap24xxcam_core_enable(const struct omap24xxcam_device *cam)
171{
172
173 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
174 cam->cc_ctrl);
175}
176
177/*
178 * Disable camera core.
179 *
180 * The data transfer will be stopped immediately (CC_CTRL_CC_RST). The
181 * core internal state machines will be reset. Use
182 * CC_CTRL_CC_FRAME_TRIG instead if you want to transfer the current
183 * frame completely.
184 */
185static void omap24xxcam_core_disable(const struct omap24xxcam_device *cam)
186{
187 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_CTRL,
188 CC_CTRL_CC_RST);
189}
190
191/* Interrupt service routine for camera core interrupts. */
192static void omap24xxcam_core_isr(struct omap24xxcam_device *cam)
193{
194 u32 cc_irqstatus;
195 const u32 cc_irqstatus_err =
196 CC_IRQSTATUS_FW_ERR_IRQ
197 | CC_IRQSTATUS_FSC_ERR_IRQ
198 | CC_IRQSTATUS_SSC_ERR_IRQ
199 | CC_IRQSTATUS_FIFO_UF_IRQ
200 | CC_IRQSTATUS_FIFO_OF_IRQ;
201
202 cc_irqstatus = omap24xxcam_reg_in(cam->mmio_base + CC_REG_OFFSET,
203 CC_IRQSTATUS);
204 omap24xxcam_reg_out(cam->mmio_base + CC_REG_OFFSET, CC_IRQSTATUS,
205 cc_irqstatus);
206
207 if (cc_irqstatus & cc_irqstatus_err
208 && !atomic_read(&cam->in_reset)) {
209 dev_dbg(cam->dev, "resetting camera, cc_irqstatus 0x%x\n",
210 cc_irqstatus);
211 omap24xxcam_reset(cam);
212 }
213}
214
215/*
216 *
217 * videobuf_buffer handling.
218 *
219 * Memory for mmapped videobuf_buffers is not allocated
220 * conventionally, but by several kmalloc allocations and then
221 * creating the scatterlist on our own. User-space buffers are handled
222 * normally.
223 *
224 */
225
226/*
227 * Free the memory-mapped buffer memory allocated for a
228 * videobuf_buffer and the associated scatterlist.
229 */
230static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
231{
232 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
233 size_t alloc_size;
234 struct page *page;
235 int i;
236
237 if (dma->sglist == NULL)
238 return;
239
240 i = dma->sglen;
241 while (i) {
242 i--;
243 alloc_size = sg_dma_len(&dma->sglist[i]);
244 page = sg_page(&dma->sglist[i]);
245 do {
246 ClearPageReserved(page++);
247 } while (alloc_size -= PAGE_SIZE);
248 __free_pages(sg_page(&dma->sglist[i]),
249 get_order(sg_dma_len(&dma->sglist[i])));
250 }
251
252 kfree(dma->sglist);
253 dma->sglist = NULL;
254}
255
256/* Release all memory related to the videobuf_queue. */
257static void omap24xxcam_vbq_free_mmap_buffers(struct videobuf_queue *vbq)
258{
259 int i;
260
261 mutex_lock(&vbq->vb_lock);
262
263 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
264 if (NULL == vbq->bufs[i])
265 continue;
266 if (V4L2_MEMORY_MMAP != vbq->bufs[i]->memory)
267 continue;
268 vbq->ops->buf_release(vbq, vbq->bufs[i]);
269 omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
270 kfree(vbq->bufs[i]);
271 vbq->bufs[i] = NULL;
272 }
273
274 mutex_unlock(&vbq->vb_lock);
275
276 videobuf_mmap_free(vbq);
277}
278
279/*
280 * Allocate physically as contiguous as possible buffer for video
281 * frame and allocate and build DMA scatter-gather list for it.
282 */
283static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
284{
285 unsigned int order;
286 size_t alloc_size, size = vb->bsize; /* vb->bsize is page aligned */
287 struct page *page;
288 int max_pages, err = 0, i = 0;
289 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
290
291 /*
292 * allocate maximum size scatter-gather list. Note this is
293 * overhead. We may not use as many entries as we allocate
294 */
295 max_pages = vb->bsize >> PAGE_SHIFT;
296 dma->sglist = kcalloc(max_pages, sizeof(*dma->sglist), GFP_KERNEL);
297 if (dma->sglist == NULL) {
298 err = -ENOMEM;
299 goto out;
300 }
301
302 while (size) {
303 order = get_order(size);
304 /*
305 * do not over-allocate even if we would get larger
306 * contiguous chunk that way
307 */
308 if ((PAGE_SIZE << order) > size)
309 order--;
310
311 /* try to allocate as many contiguous pages as possible */
312 page = alloc_pages(GFP_KERNEL, order);
313 /* if allocation fails, try to allocate smaller amount */
314 while (page == NULL) {
315 order--;
316 page = alloc_pages(GFP_KERNEL, order);
317 if (page == NULL && !order) {
318 err = -ENOMEM;
319 goto out;
320 }
321 }
322 size -= (PAGE_SIZE << order);
323
324 /* append allocated chunk of pages into scatter-gather list */
325 sg_set_page(&dma->sglist[i], page, PAGE_SIZE << order, 0);
326 dma->sglen++;
327 i++;
328
329 alloc_size = (PAGE_SIZE << order);
330
331 /* clear pages before giving them to user space */
332 memset(page_address(page), 0, alloc_size);
333
334 /* mark allocated pages reserved */
335 do {
336 SetPageReserved(page++);
337 } while (alloc_size -= PAGE_SIZE);
338 }
339 /*
340 * REVISIT: not fully correct to assign nr_pages == sglen but
341 * video-buf is passing nr_pages for e.g. unmap_sg calls
342 */
343 dma->nr_pages = dma->sglen;
344 dma->direction = PCI_DMA_FROMDEVICE;
345
346 return 0;
347
348out:
349 omap24xxcam_vbq_free_mmap_buffer(vb);
350 return err;
351}
352
353static int omap24xxcam_vbq_alloc_mmap_buffers(struct videobuf_queue *vbq,
354 unsigned int count)
355{
356 int i, err = 0;
357 struct omap24xxcam_fh *fh =
358 container_of(vbq, struct omap24xxcam_fh, vbq);
359
360 mutex_lock(&vbq->vb_lock);
361
362 for (i = 0; i < count; i++) {
363 err = omap24xxcam_vbq_alloc_mmap_buffer(vbq->bufs[i]);
364 if (err)
365 goto out;
366 dev_dbg(fh->cam->dev, "sglen is %d for buffer %d\n",
367 videobuf_to_dma(vbq->bufs[i])->sglen, i);
368 }
369
370 mutex_unlock(&vbq->vb_lock);
371
372 return 0;
373out:
374 while (i) {
375 i--;
376 omap24xxcam_vbq_free_mmap_buffer(vbq->bufs[i]);
377 }
378
379 mutex_unlock(&vbq->vb_lock);
380
381 return err;
382}
383
384/*
385 * This routine is called from interrupt context when a scatter-gather DMA
386 * transfer of a videobuf_buffer completes.
387 */
388static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
389 u32 csr, void *arg)
390{
391 struct omap24xxcam_device *cam =
392 container_of(sgdma, struct omap24xxcam_device, sgdma);
393 struct omap24xxcam_fh *fh = cam->streaming->private_data;
394 struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
395 const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
396 | CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
397 | CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
398 unsigned long flags;
399
400 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
401 if (--cam->sgdma_in_queue == 0)
402 omap24xxcam_core_disable(cam);
403 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
404
405 v4l2_get_timestamp(&vb->ts);
406 vb->field_count = atomic_add_return(2, &fh->field_count);
407 if (csr & csr_error) {
408 vb->state = VIDEOBUF_ERROR;
409 if (!atomic_read(&fh->cam->in_reset)) {
410 dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
411 omap24xxcam_reset(cam);
412 }
413 } else
414 vb->state = VIDEOBUF_DONE;
415 wake_up(&vb->done);
416}
417
418static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
419 struct videobuf_buffer *vb)
420{
421 struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
422
423 /* wait for buffer, especially to get out of the sgdma queue */
424 videobuf_waiton(vbq, vb, 0, 0);
425 if (vb->memory == V4L2_MEMORY_MMAP) {
426 dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
427 dma->direction);
428 dma->direction = DMA_NONE;
429 } else {
430 videobuf_dma_unmap(vbq->dev, videobuf_to_dma(vb));
431 videobuf_dma_free(videobuf_to_dma(vb));
432 }
433
434 vb->state = VIDEOBUF_NEEDS_INIT;
435}
436
437/*
438 * Limit the number of available kernel image capture buffers based on the
439 * number requested, the currently selected image size, and the maximum
440 * amount of memory permitted for kernel capture buffers.
441 */
442static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
443 unsigned int *size)
444{
445 struct omap24xxcam_fh *fh = vbq->priv_data;
446
447 if (*cnt <= 0)
448 *cnt = VIDEO_MAX_FRAME; /* supply a default number of buffers */
449
450 if (*cnt > VIDEO_MAX_FRAME)
451 *cnt = VIDEO_MAX_FRAME;
452
453 *size = fh->pix.sizeimage;
454
455 /* accessing fh->cam->capture_mem is ok, it's constant */
456 if (*size * *cnt > fh->cam->capture_mem)
457 *cnt = fh->cam->capture_mem / *size;
458
459 return 0;
460}
461
462static int omap24xxcam_dma_iolock(struct videobuf_queue *vbq,
463 struct videobuf_dmabuf *dma)
464{
465 int err = 0;
466
467 dma->direction = PCI_DMA_FROMDEVICE;
468 if (!dma_map_sg(vbq->dev, dma->sglist, dma->sglen, dma->direction)) {
469 kfree(dma->sglist);
470 dma->sglist = NULL;
471 dma->sglen = 0;
472 err = -EIO;
473 }
474
475 return err;
476}
477
478static int omap24xxcam_vbq_prepare(struct videobuf_queue *vbq,
479 struct videobuf_buffer *vb,
480 enum v4l2_field field)
481{
482 struct omap24xxcam_fh *fh = vbq->priv_data;
483 int err = 0;
484
485 /*
486 * Accessing pix here is okay since it's constant while
487 * streaming is on (and we only get called then).
488 */
489 if (vb->baddr) {
490 /* This is a userspace buffer. */
491 if (fh->pix.sizeimage > vb->bsize) {
492 /* The buffer isn't big enough. */
493 err = -EINVAL;
494 } else
495 vb->size = fh->pix.sizeimage;
496 } else {
497 if (vb->state != VIDEOBUF_NEEDS_INIT) {
498 /*
499 * We have a kernel bounce buffer that has
500 * already been allocated.
501 */
502 if (fh->pix.sizeimage > vb->size) {
503 /*
504 * The image size has been changed to
505 * a larger size since this buffer was
506 * allocated, so we need to free and
507 * reallocate it.
508 */
509 omap24xxcam_vbq_release(vbq, vb);
510 vb->size = fh->pix.sizeimage;
511 }
512 } else {
513 /* We need to allocate a new kernel bounce buffer. */
514 vb->size = fh->pix.sizeimage;
515 }
516 }
517
518 if (err)
519 return err;
520
521 vb->width = fh->pix.width;
522 vb->height = fh->pix.height;
523 vb->field = field;
524
525 if (vb->state == VIDEOBUF_NEEDS_INIT) {
526 if (vb->memory == V4L2_MEMORY_MMAP)
527 /*
528 * we have built the scatter-gather list by ourself so
529 * do the scatter-gather mapping as well
530 */
531 err = omap24xxcam_dma_iolock(vbq, videobuf_to_dma(vb));
532 else
533 err = videobuf_iolock(vbq, vb, NULL);
534 }
535
536 if (!err)
537 vb->state = VIDEOBUF_PREPARED;
538 else
539 omap24xxcam_vbq_release(vbq, vb);
540
541 return err;
542}
543
544static void omap24xxcam_vbq_queue(struct videobuf_queue *vbq,
545 struct videobuf_buffer *vb)
546{
547 struct omap24xxcam_fh *fh = vbq->priv_data;
548 struct omap24xxcam_device *cam = fh->cam;
549 enum videobuf_state state = vb->state;
550 unsigned long flags;
551 int err;
552
553 /*
554 * FIXME: We're marking the buffer active since we have no
555 * pretty way of marking it active exactly when the
556 * scatter-gather transfer starts.
557 */
558 vb->state = VIDEOBUF_ACTIVE;
559
560 err = omap24xxcam_sgdma_queue(&fh->cam->sgdma,
561 videobuf_to_dma(vb)->sglist,
562 videobuf_to_dma(vb)->sglen, vb->size,
563 omap24xxcam_vbq_complete, vb);
564
565 if (!err) {
566 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
567 if (++cam->sgdma_in_queue == 1
568 && !atomic_read(&cam->in_reset))
569 omap24xxcam_core_enable(cam);
570 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
571 } else {
572 /*
573 * Oops. We're not supposed to get any errors here.
574 * The only way we could get an error is if we ran out
575 * of scatter-gather DMA slots, but we are supposed to
576 * have at least as many scatter-gather DMA slots as
577 * video buffers so that can't happen.
578 */
579 dev_err(cam->dev, "failed to queue a video buffer for dma!\n");
580 dev_err(cam->dev, "likely a bug in the driver!\n");
581 vb->state = state;
582 }
583}
584
585static struct videobuf_queue_ops omap24xxcam_vbq_ops = {
586 .buf_setup = omap24xxcam_vbq_setup,
587 .buf_prepare = omap24xxcam_vbq_prepare,
588 .buf_queue = omap24xxcam_vbq_queue,
589 .buf_release = omap24xxcam_vbq_release,
590};
591
592/*
593 *
594 * OMAP main camera system
595 *
596 */
597
598/*
599 * Reset camera block to power-on state.
600 */
601static void omap24xxcam_poweron_reset(struct omap24xxcam_device *cam)
602{
603 int max_loop = RESET_TIMEOUT_NS;
604
605 /* Reset whole camera subsystem */
606 omap24xxcam_reg_out(cam->mmio_base,
607 CAM_SYSCONFIG,
608 CAM_SYSCONFIG_SOFTRESET);
609
610 /* Wait till it's finished */
611 while (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
612 & CAM_SYSSTATUS_RESETDONE)
613 && --max_loop) {
614 ndelay(1);
615 }
616
617 if (!(omap24xxcam_reg_in(cam->mmio_base, CAM_SYSSTATUS)
618 & CAM_SYSSTATUS_RESETDONE))
619 dev_err(cam->dev, "camera soft reset timeout\n");
620}
621
622/*
623 * (Re)initialise the camera block.
624 */
625static void omap24xxcam_hwinit(struct omap24xxcam_device *cam)
626{
627 omap24xxcam_poweron_reset(cam);
628
629 /* set the camera subsystem autoidle bit */
630 omap24xxcam_reg_out(cam->mmio_base, CAM_SYSCONFIG,
631 CAM_SYSCONFIG_AUTOIDLE);
632
633 /* set the camera MMU autoidle bit */
634 omap24xxcam_reg_out(cam->mmio_base,
635 CAMMMU_REG_OFFSET + CAMMMU_SYSCONFIG,
636 CAMMMU_SYSCONFIG_AUTOIDLE);
637
638 omap24xxcam_core_hwinit(cam);
639
640 omap24xxcam_dma_hwinit(&cam->sgdma.dma);
641}
642
643/*
644 * Callback for dma transfer stalling.
645 */
646static void omap24xxcam_stalled_dma_reset(unsigned long data)
647{
648 struct omap24xxcam_device *cam = (struct omap24xxcam_device *)data;
649
650 if (!atomic_read(&cam->in_reset)) {
651 dev_dbg(cam->dev, "dma stalled, resetting camera\n");
652 omap24xxcam_reset(cam);
653 }
654}
655
656/*
657 * Stop capture. Mark we're doing a reset, stop DMA transfers and
658 * core. (No new scatter-gather transfers will be queued whilst
659 * in_reset is non-zero.)
660 *
661 * If omap24xxcam_capture_stop is called from several places at
662 * once, only the first call will have an effect. Similarly, the last
663 * call omap24xxcam_streaming_cont will have effect.
664 *
665 * Serialisation is ensured by using cam->core_enable_disable_lock.
666 */
667static void omap24xxcam_capture_stop(struct omap24xxcam_device *cam)
668{
669 unsigned long flags;
670
671 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
672
673 if (atomic_inc_return(&cam->in_reset) != 1) {
674 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
675 return;
676 }
677
678 omap24xxcam_core_disable(cam);
679
680 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
681
682 omap24xxcam_sgdma_sync(&cam->sgdma);
683}
684
685/*
686 * Reset and continue streaming.
687 *
688 * Note: Resetting the camera FIFO via the CC_RST bit in the CC_CTRL
689 * register is supposed to be sufficient to recover from a camera
690 * interface error, but it doesn't seem to be enough. If we only do
691 * that then subsequent image captures are out of sync by either one
692 * or two times DMA_THRESHOLD bytes. Resetting and re-initializing the
693 * entire camera subsystem prevents the problem with frame
694 * synchronization.
695 */
696static void omap24xxcam_capture_cont(struct omap24xxcam_device *cam)
697{
698 unsigned long flags;
699
700 spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
701
702 if (atomic_read(&cam->in_reset) != 1)
703 goto out;
704
705 omap24xxcam_hwinit(cam);
706
707 omap24xxcam_sensor_if_enable(cam);
708
709 omap24xxcam_sgdma_process(&cam->sgdma);
710
711 if (cam->sgdma_in_queue)
712 omap24xxcam_core_enable(cam);
713
714out:
715 atomic_dec(&cam->in_reset);
716 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
717}
718
719static ssize_t
720omap24xxcam_streaming_show(struct device *dev, struct device_attribute *attr,
721 char *buf)
722{
723 struct omap24xxcam_device *cam = dev_get_drvdata(dev);
724
725 return sprintf(buf, "%s\n", cam->streaming ? "active" : "inactive");
726}
727static DEVICE_ATTR(streaming, S_IRUGO, omap24xxcam_streaming_show, NULL);
728
729/*
730 * Stop capture and restart it. I.e. reset the camera during use.
731 */
732static void omap24xxcam_reset(struct omap24xxcam_device *cam)
733{
734 omap24xxcam_capture_stop(cam);
735 omap24xxcam_capture_cont(cam);
736}
737
738/*
739 * The main interrupt handler.
740 */
741static irqreturn_t omap24xxcam_isr(int irq, void *arg)
742{
743 struct omap24xxcam_device *cam = (struct omap24xxcam_device *)arg;
744 u32 irqstatus;
745 unsigned int irqhandled = 0;
746
747 irqstatus = omap24xxcam_reg_in(cam->mmio_base, CAM_IRQSTATUS);
748
749 if (irqstatus &
750 (CAM_IRQSTATUS_DMA_IRQ2 | CAM_IRQSTATUS_DMA_IRQ1
751 | CAM_IRQSTATUS_DMA_IRQ0)) {
752 omap24xxcam_dma_isr(&cam->sgdma.dma);
753 irqhandled = 1;
754 }
755 if (irqstatus & CAM_IRQSTATUS_CC_IRQ) {
756 omap24xxcam_core_isr(cam);
757 irqhandled = 1;
758 }
759 if (irqstatus & CAM_IRQSTATUS_MMU_IRQ)
760 dev_err(cam->dev, "unhandled camera MMU interrupt!\n");
761
762 return IRQ_RETVAL(irqhandled);
763}
764
765/*
766 *
767 * Sensor handling.
768 *
769 */
770
771/*
772 * Enable the external sensor interface. Try to negotiate interface
773 * parameters with the sensor and start using the new ones. The calls
774 * to sensor_if_enable and sensor_if_disable need not to be balanced.
775 */
776static int omap24xxcam_sensor_if_enable(struct omap24xxcam_device *cam)
777{
778 int rval;
779 struct v4l2_ifparm p;
780
781 rval = vidioc_int_g_ifparm(cam->sdev, &p);
782 if (rval) {
783 dev_err(cam->dev, "vidioc_int_g_ifparm failed with %d\n", rval);
784 return rval;
785 }
786
787 cam->if_type = p.if_type;
788
789 cam->cc_ctrl = CC_CTRL_CC_EN;
790
791 switch (p.if_type) {
792 case V4L2_IF_TYPE_BT656:
793 if (p.u.bt656.frame_start_on_rising_vs)
794 cam->cc_ctrl |= CC_CTRL_NOBT_SYNCHRO;
795 if (p.u.bt656.bt_sync_correct)
796 cam->cc_ctrl |= CC_CTRL_BT_CORRECT;
797 if (p.u.bt656.swap)
798 cam->cc_ctrl |= CC_CTRL_PAR_ORDERCAM;
799 if (p.u.bt656.latch_clk_inv)
800 cam->cc_ctrl |= CC_CTRL_PAR_CLK_POL;
801 if (p.u.bt656.nobt_hs_inv)
802 cam->cc_ctrl |= CC_CTRL_NOBT_HS_POL;
803 if (p.u.bt656.nobt_vs_inv)
804 cam->cc_ctrl |= CC_CTRL_NOBT_VS_POL;
805
806 switch (p.u.bt656.mode) {
807 case V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT:
808 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT8;
809 break;
810 case V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT:
811 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT10;
812 break;
813 case V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT:
814 cam->cc_ctrl |= CC_CTRL_PAR_MODE_NOBT12;
815 break;
816 case V4L2_IF_TYPE_BT656_MODE_BT_8BIT:
817 cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT8;
818 break;
819 case V4L2_IF_TYPE_BT656_MODE_BT_10BIT:
820 cam->cc_ctrl |= CC_CTRL_PAR_MODE_BT10;
821 break;
822 default:
823 dev_err(cam->dev,
824 "bt656 interface mode %d not supported\n",
825 p.u.bt656.mode);
826 return -EINVAL;
827 }
828 /*
829 * The clock rate that the sensor wants has changed.
830 * We have to adjust the xclk from OMAP 2 side to
831 * match the sensor's wish as closely as possible.
832 */
833 if (p.u.bt656.clock_curr != cam->if_u.bt656.xclk) {
834 u32 xclk = p.u.bt656.clock_curr;
835 u32 divisor;
836
837 if (xclk == 0)
838 return -EINVAL;
839
840 if (xclk > CAM_MCLK)
841 xclk = CAM_MCLK;
842
843 divisor = CAM_MCLK / xclk;
844 if (divisor * xclk < CAM_MCLK)
845 divisor++;
846 if (CAM_MCLK / divisor < p.u.bt656.clock_min
847 && divisor > 1)
848 divisor--;
849 if (divisor > 30)
850 divisor = 30;
851
852 xclk = CAM_MCLK / divisor;
853
854 if (xclk < p.u.bt656.clock_min
855 || xclk > p.u.bt656.clock_max)
856 return -EINVAL;
857
858 cam->if_u.bt656.xclk = xclk;
859 }
860 omap24xxcam_core_xclk_set(cam, cam->if_u.bt656.xclk);
861 break;
862 default:
863 /* FIXME: how about other interfaces? */
864 dev_err(cam->dev, "interface type %d not supported\n",
865 p.if_type);
866 return -EINVAL;
867 }
868
869 return 0;
870}
871
872static void omap24xxcam_sensor_if_disable(const struct omap24xxcam_device *cam)
873{
874 switch (cam->if_type) {
875 case V4L2_IF_TYPE_BT656:
876 omap24xxcam_core_xclk_set(cam, 0);
877 break;
878 }
879}
880
881/*
882 * Initialise the sensor hardware.
883 */
884static int omap24xxcam_sensor_init(struct omap24xxcam_device *cam)
885{
886 int err = 0;
887 struct v4l2_int_device *sdev = cam->sdev;
888
889 omap24xxcam_clock_on(cam);
890 err = omap24xxcam_sensor_if_enable(cam);
891 if (err) {
892 dev_err(cam->dev, "sensor interface could not be enabled at "
893 "initialisation, %d\n", err);
894 cam->sdev = NULL;
895 goto out;
896 }
897
898 /* power up sensor during sensor initialization */
899 vidioc_int_s_power(sdev, 1);
900
901 err = vidioc_int_dev_init(sdev);
902 if (err) {
903 dev_err(cam->dev, "cannot initialize sensor, error %d\n", err);
904 /* Sensor init failed --- it's nonexistent to us! */
905 cam->sdev = NULL;
906 goto out;
907 }
908
909 dev_info(cam->dev, "sensor is %s\n", sdev->name);
910
911out:
912 omap24xxcam_sensor_if_disable(cam);
913 omap24xxcam_clock_off(cam);
914
915 vidioc_int_s_power(sdev, 0);
916
917 return err;
918}
919
920static void omap24xxcam_sensor_exit(struct omap24xxcam_device *cam)
921{
922 if (cam->sdev)
923 vidioc_int_dev_exit(cam->sdev);
924}
925
926static void omap24xxcam_sensor_disable(struct omap24xxcam_device *cam)
927{
928 omap24xxcam_sensor_if_disable(cam);
929 omap24xxcam_clock_off(cam);
930 vidioc_int_s_power(cam->sdev, 0);
931}
932
933/*
934 * Power-up and configure camera sensor. It's ready for capturing now.
935 */
936static int omap24xxcam_sensor_enable(struct omap24xxcam_device *cam)
937{
938 int rval;
939
940 omap24xxcam_clock_on(cam);
941
942 omap24xxcam_sensor_if_enable(cam);
943
944 rval = vidioc_int_s_power(cam->sdev, 1);
945 if (rval)
946 goto out;
947
948 rval = vidioc_int_init(cam->sdev);
949 if (rval)
950 goto out;
951
952 return 0;
953
954out:
955 omap24xxcam_sensor_disable(cam);
956
957 return rval;
958}
959
960static void omap24xxcam_sensor_reset_work(struct work_struct *work)
961{
962 struct omap24xxcam_device *cam =
963 container_of(work, struct omap24xxcam_device,
964 sensor_reset_work);
965
966 if (atomic_read(&cam->reset_disable))
967 return;
968
969 omap24xxcam_capture_stop(cam);
970
971 if (vidioc_int_reset(cam->sdev) == 0) {
972 vidioc_int_init(cam->sdev);
973 } else {
974 /* Can't reset it by vidioc_int_reset. */
975 omap24xxcam_sensor_disable(cam);
976 omap24xxcam_sensor_enable(cam);
977 }
978
979 omap24xxcam_capture_cont(cam);
980}
981
982/*
983 *
984 * IOCTL interface.
985 *
986 */
987
988static int vidioc_querycap(struct file *file, void *fh,
989 struct v4l2_capability *cap)
990{
991 struct omap24xxcam_fh *ofh = fh;
992 struct omap24xxcam_device *cam = ofh->cam;
993
994 strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
995 strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
996 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
997
998 return 0;
999}
1000
1001static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
1002 struct v4l2_fmtdesc *f)
1003{
1004 struct omap24xxcam_fh *ofh = fh;
1005 struct omap24xxcam_device *cam = ofh->cam;
1006
1007 return vidioc_int_enum_fmt_cap(cam->sdev, f);
1008}
1009
1010static int vidioc_g_fmt_vid_cap(struct file *file, void *fh,
1011 struct v4l2_format *f)
1012{
1013 struct omap24xxcam_fh *ofh = fh;
1014 struct omap24xxcam_device *cam = ofh->cam;
1015 int rval;
1016
1017 mutex_lock(&cam->mutex);
1018 rval = vidioc_int_g_fmt_cap(cam->sdev, f);
1019 mutex_unlock(&cam->mutex);
1020
1021 return rval;
1022}
1023
1024static int vidioc_s_fmt_vid_cap(struct file *file, void *fh,
1025 struct v4l2_format *f)
1026{
1027 struct omap24xxcam_fh *ofh = fh;
1028 struct omap24xxcam_device *cam = ofh->cam;
1029 int rval;
1030
1031 mutex_lock(&cam->mutex);
1032 if (cam->streaming) {
1033 rval = -EBUSY;
1034 goto out;
1035 }
1036
1037 rval = vidioc_int_s_fmt_cap(cam->sdev, f);
1038
1039out:
1040 mutex_unlock(&cam->mutex);
1041
1042 if (!rval) {
1043 mutex_lock(&ofh->vbq.vb_lock);
1044 ofh->pix = f->fmt.pix;
1045 mutex_unlock(&ofh->vbq.vb_lock);
1046 }
1047
1048 memset(f, 0, sizeof(*f));
1049 vidioc_g_fmt_vid_cap(file, fh, f);
1050
1051 return rval;
1052}
1053
1054static int vidioc_try_fmt_vid_cap(struct file *file, void *fh,
1055 struct v4l2_format *f)
1056{
1057 struct omap24xxcam_fh *ofh = fh;
1058 struct omap24xxcam_device *cam = ofh->cam;
1059 int rval;
1060
1061 mutex_lock(&cam->mutex);
1062 rval = vidioc_int_try_fmt_cap(cam->sdev, f);
1063 mutex_unlock(&cam->mutex);
1064
1065 return rval;
1066}
1067
1068static int vidioc_reqbufs(struct file *file, void *fh,
1069 struct v4l2_requestbuffers *b)
1070{
1071 struct omap24xxcam_fh *ofh = fh;
1072 struct omap24xxcam_device *cam = ofh->cam;
1073 int rval;
1074
1075 mutex_lock(&cam->mutex);
1076 if (cam->streaming) {
1077 mutex_unlock(&cam->mutex);
1078 return -EBUSY;
1079 }
1080
1081 omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
1082 mutex_unlock(&cam->mutex);
1083
1084 rval = videobuf_reqbufs(&ofh->vbq, b);
1085
1086 /*
1087 * Either videobuf_reqbufs failed or the buffers are not
1088 * memory-mapped (which would need special attention).
1089 */
1090 if (rval < 0 || b->memory != V4L2_MEMORY_MMAP)
1091 goto out;
1092
1093 rval = omap24xxcam_vbq_alloc_mmap_buffers(&ofh->vbq, rval);
1094 if (rval)
1095 omap24xxcam_vbq_free_mmap_buffers(&ofh->vbq);
1096
1097out:
1098 return rval;
1099}
1100
1101static int vidioc_querybuf(struct file *file, void *fh,
1102 struct v4l2_buffer *b)
1103{
1104 struct omap24xxcam_fh *ofh = fh;
1105
1106 return videobuf_querybuf(&ofh->vbq, b);
1107}
1108
1109static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
1110{
1111 struct omap24xxcam_fh *ofh = fh;
1112
1113 return videobuf_qbuf(&ofh->vbq, b);
1114}
1115
1116static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
1117{
1118 struct omap24xxcam_fh *ofh = fh;
1119 struct omap24xxcam_device *cam = ofh->cam;
1120 struct videobuf_buffer *vb;
1121 int rval;
1122
1123videobuf_dqbuf_again:
1124 rval = videobuf_dqbuf(&ofh->vbq, b, file->f_flags & O_NONBLOCK);
1125 if (rval)
1126 goto out;
1127
1128 vb = ofh->vbq.bufs[b->index];
1129
1130 mutex_lock(&cam->mutex);
1131 /* _needs_reset returns -EIO if reset is required. */
1132 rval = vidioc_int_g_needs_reset(cam->sdev, (void *)vb->baddr);
1133 mutex_unlock(&cam->mutex);
1134 if (rval == -EIO)
1135 schedule_work(&cam->sensor_reset_work);
1136 else
1137 rval = 0;
1138
1139out:
1140 /*
1141 * This is a hack. We don't want to show -EIO to the user
1142 * space. Requeue the buffer and try again if we're not doing
1143 * this in non-blocking mode.
1144 */
1145 if (rval == -EIO) {
1146 videobuf_qbuf(&ofh->vbq, b);
1147 if (!(file->f_flags & O_NONBLOCK))
1148 goto videobuf_dqbuf_again;
1149 /*
1150 * We don't have a videobuf_buffer now --- maybe next
1151 * time...
1152 */
1153 rval = -EAGAIN;
1154 }
1155
1156 return rval;
1157}
1158
1159static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
1160{
1161 struct omap24xxcam_fh *ofh = fh;
1162 struct omap24xxcam_device *cam = ofh->cam;
1163 int rval;
1164
1165 mutex_lock(&cam->mutex);
1166 if (cam->streaming) {
1167 rval = -EBUSY;
1168 goto out;
1169 }
1170
1171 rval = omap24xxcam_sensor_if_enable(cam);
1172 if (rval) {
1173 dev_dbg(cam->dev, "vidioc_int_g_ifparm failed\n");
1174 goto out;
1175 }
1176
1177 rval = videobuf_streamon(&ofh->vbq);
1178 if (!rval) {
1179 cam->streaming = file;
1180 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1181 }
1182
1183out:
1184 mutex_unlock(&cam->mutex);
1185
1186 return rval;
1187}
1188
1189static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
1190{
1191 struct omap24xxcam_fh *ofh = fh;
1192 struct omap24xxcam_device *cam = ofh->cam;
1193 struct videobuf_queue *q = &ofh->vbq;
1194 int rval;
1195
1196 atomic_inc(&cam->reset_disable);
1197
1198 flush_work(&cam->sensor_reset_work);
1199
1200 rval = videobuf_streamoff(q);
1201 if (!rval) {
1202 mutex_lock(&cam->mutex);
1203 cam->streaming = NULL;
1204 mutex_unlock(&cam->mutex);
1205 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1206 }
1207
1208 atomic_dec(&cam->reset_disable);
1209
1210 return rval;
1211}
1212
1213static int vidioc_enum_input(struct file *file, void *fh,
1214 struct v4l2_input *inp)
1215{
1216 if (inp->index > 0)
1217 return -EINVAL;
1218
1219 strlcpy(inp->name, "camera", sizeof(inp->name));
1220 inp->type = V4L2_INPUT_TYPE_CAMERA;
1221
1222 return 0;
1223}
1224
1225static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
1226{
1227 *i = 0;
1228
1229 return 0;
1230}
1231
1232static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
1233{
1234 if (i > 0)
1235 return -EINVAL;
1236
1237 return 0;
1238}
1239
1240static int vidioc_queryctrl(struct file *file, void *fh,
1241 struct v4l2_queryctrl *a)
1242{
1243 struct omap24xxcam_fh *ofh = fh;
1244 struct omap24xxcam_device *cam = ofh->cam;
1245
1246 return vidioc_int_queryctrl(cam->sdev, a);
1247}
1248
1249static int vidioc_g_ctrl(struct file *file, void *fh,
1250 struct v4l2_control *a)
1251{
1252 struct omap24xxcam_fh *ofh = fh;
1253 struct omap24xxcam_device *cam = ofh->cam;
1254 int rval;
1255
1256 mutex_lock(&cam->mutex);
1257 rval = vidioc_int_g_ctrl(cam->sdev, a);
1258 mutex_unlock(&cam->mutex);
1259
1260 return rval;
1261}
1262
1263static int vidioc_s_ctrl(struct file *file, void *fh,
1264 struct v4l2_control *a)
1265{
1266 struct omap24xxcam_fh *ofh = fh;
1267 struct omap24xxcam_device *cam = ofh->cam;
1268 int rval;
1269
1270 mutex_lock(&cam->mutex);
1271 rval = vidioc_int_s_ctrl(cam->sdev, a);
1272 mutex_unlock(&cam->mutex);
1273
1274 return rval;
1275}
1276
1277static int vidioc_g_parm(struct file *file, void *fh,
1278 struct v4l2_streamparm *a) {
1279 struct omap24xxcam_fh *ofh = fh;
1280 struct omap24xxcam_device *cam = ofh->cam;
1281 int rval;
1282
1283 mutex_lock(&cam->mutex);
1284 rval = vidioc_int_g_parm(cam->sdev, a);
1285 mutex_unlock(&cam->mutex);
1286
1287 return rval;
1288}
1289
1290static int vidioc_s_parm(struct file *file, void *fh,
1291 struct v4l2_streamparm *a)
1292{
1293 struct omap24xxcam_fh *ofh = fh;
1294 struct omap24xxcam_device *cam = ofh->cam;
1295 struct v4l2_streamparm old_streamparm;
1296 int rval;
1297
1298 mutex_lock(&cam->mutex);
1299 if (cam->streaming) {
1300 rval = -EBUSY;
1301 goto out;
1302 }
1303
1304 old_streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1305 rval = vidioc_int_g_parm(cam->sdev, &old_streamparm);
1306 if (rval)
1307 goto out;
1308
1309 rval = vidioc_int_s_parm(cam->sdev, a);
1310 if (rval)
1311 goto out;
1312
1313 rval = omap24xxcam_sensor_if_enable(cam);
1314 /*
1315 * Revert to old streaming parameters if enabling sensor
1316 * interface with the new ones failed.
1317 */
1318 if (rval)
1319 vidioc_int_s_parm(cam->sdev, &old_streamparm);
1320
1321out:
1322 mutex_unlock(&cam->mutex);
1323
1324 return rval;
1325}
1326
1327/*
1328 *
1329 * File operations.
1330 *
1331 */
1332
1333static unsigned int omap24xxcam_poll(struct file *file,
1334 struct poll_table_struct *wait)
1335{
1336 struct omap24xxcam_fh *fh = file->private_data;
1337 struct omap24xxcam_device *cam = fh->cam;
1338 struct videobuf_buffer *vb;
1339
1340 mutex_lock(&cam->mutex);
1341 if (cam->streaming != file) {
1342 mutex_unlock(&cam->mutex);
1343 return POLLERR;
1344 }
1345 mutex_unlock(&cam->mutex);
1346
1347 mutex_lock(&fh->vbq.vb_lock);
1348 if (list_empty(&fh->vbq.stream)) {
1349 mutex_unlock(&fh->vbq.vb_lock);
1350 return POLLERR;
1351 }
1352 vb = list_entry(fh->vbq.stream.next, struct videobuf_buffer, stream);
1353 mutex_unlock(&fh->vbq.vb_lock);
1354
1355 poll_wait(file, &vb->done, wait);
1356
1357 if (vb->state == VIDEOBUF_DONE || vb->state == VIDEOBUF_ERROR)
1358 return POLLIN | POLLRDNORM;
1359
1360 return 0;
1361}
1362
1363static int omap24xxcam_mmap_buffers(struct file *file,
1364 struct vm_area_struct *vma)
1365{
1366 struct omap24xxcam_fh *fh = file->private_data;
1367 struct omap24xxcam_device *cam = fh->cam;
1368 struct videobuf_queue *vbq = &fh->vbq;
1369 unsigned int first, last, size, i, j;
1370 int err = 0;
1371
1372 mutex_lock(&cam->mutex);
1373 if (cam->streaming) {
1374 mutex_unlock(&cam->mutex);
1375 return -EBUSY;
1376 }
1377 mutex_unlock(&cam->mutex);
1378 mutex_lock(&vbq->vb_lock);
1379
1380 /* look for first buffer to map */
1381 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
1382 if (NULL == vbq->bufs[first])
1383 continue;
1384 if (V4L2_MEMORY_MMAP != vbq->bufs[first]->memory)
1385 continue;
1386 if (vbq->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
1387 break;
1388 }
1389
1390 /* look for last buffer to map */
1391 for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
1392 if (NULL == vbq->bufs[last])
1393 continue;
1394 if (V4L2_MEMORY_MMAP != vbq->bufs[last]->memory)
1395 continue;
1396 size += vbq->bufs[last]->bsize;
1397 if (size == (vma->vm_end - vma->vm_start))
1398 break;
1399 }
1400
1401 size = 0;
1402 for (i = first; i <= last && i < VIDEO_MAX_FRAME; i++) {
1403 struct videobuf_dmabuf *dma = videobuf_to_dma(vbq->bufs[i]);
1404
1405 for (j = 0; j < dma->sglen; j++) {
1406 err = remap_pfn_range(
1407 vma, vma->vm_start + size,
1408 page_to_pfn(sg_page(&dma->sglist[j])),
1409 sg_dma_len(&dma->sglist[j]), vma->vm_page_prot);
1410 if (err)
1411 goto out;
1412 size += sg_dma_len(&dma->sglist[j]);
1413 }
1414 }
1415
1416out:
1417 mutex_unlock(&vbq->vb_lock);
1418
1419 return err;
1420}
1421
1422static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
1423{
1424 struct omap24xxcam_fh *fh = file->private_data;
1425 int rval;
1426
1427 /* let the video-buf mapper check arguments and set-up structures */
1428 rval = videobuf_mmap_mapper(&fh->vbq, vma);
1429 if (rval)
1430 return rval;
1431
1432 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1433
1434 /* do mapping to our allocated buffers */
1435 rval = omap24xxcam_mmap_buffers(file, vma);
1436 /*
1437 * In case of error, free vma->vm_private_data allocated by
1438 * videobuf_mmap_mapper.
1439 */
1440 if (rval)
1441 kfree(vma->vm_private_data);
1442
1443 return rval;
1444}
1445
1446static int omap24xxcam_open(struct file *file)
1447{
1448 struct omap24xxcam_device *cam = omap24xxcam.priv;
1449 struct omap24xxcam_fh *fh;
1450 struct v4l2_format format;
1451
1452 if (!cam || !cam->vfd)
1453 return -ENODEV;
1454
1455 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
1456 if (fh == NULL)
1457 return -ENOMEM;
1458
1459 mutex_lock(&cam->mutex);
1460 if (cam->sdev == NULL || !try_module_get(cam->sdev->module)) {
1461 mutex_unlock(&cam->mutex);
1462 goto out_try_module_get;
1463 }
1464
1465 if (atomic_inc_return(&cam->users) == 1) {
1466 omap24xxcam_hwinit(cam);
1467 if (omap24xxcam_sensor_enable(cam)) {
1468 mutex_unlock(&cam->mutex);
1469 goto out_omap24xxcam_sensor_enable;
1470 }
1471 }
1472 mutex_unlock(&cam->mutex);
1473
1474 fh->cam = cam;
1475 mutex_lock(&cam->mutex);
1476 vidioc_int_g_fmt_cap(cam->sdev, &format);
1477 mutex_unlock(&cam->mutex);
1478 /* FIXME: how about fh->pix when there are more users? */
1479 fh->pix = format.fmt.pix;
1480
1481 file->private_data = fh;
1482
1483 spin_lock_init(&fh->vbq_lock);
1484
1485 videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
1486 &fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
1487 V4L2_FIELD_NONE,
1488 sizeof(struct videobuf_buffer), fh, NULL);
1489
1490 return 0;
1491
1492out_omap24xxcam_sensor_enable:
1493 omap24xxcam_poweron_reset(cam);
1494 module_put(cam->sdev->module);
1495
1496out_try_module_get:
1497 kfree(fh);
1498
1499 return -ENODEV;
1500}
1501
1502static int omap24xxcam_release(struct file *file)
1503{
1504 struct omap24xxcam_fh *fh = file->private_data;
1505 struct omap24xxcam_device *cam = fh->cam;
1506
1507 atomic_inc(&cam->reset_disable);
1508
1509 flush_work(&cam->sensor_reset_work);
1510
1511 /* stop streaming capture */
1512 videobuf_streamoff(&fh->vbq);
1513
1514 mutex_lock(&cam->mutex);
1515 if (cam->streaming == file) {
1516 cam->streaming = NULL;
1517 mutex_unlock(&cam->mutex);
1518 sysfs_notify(&cam->dev->kobj, NULL, "streaming");
1519 } else {
1520 mutex_unlock(&cam->mutex);
1521 }
1522
1523 atomic_dec(&cam->reset_disable);
1524
1525 omap24xxcam_vbq_free_mmap_buffers(&fh->vbq);
1526
1527 /*
1528 * Make sure the reset work we might have scheduled is not
1529 * pending! It may be run *only* if we have users. (And it may
1530 * not be scheduled anymore since streaming is already
1531 * disabled.)
1532 */
1533 flush_work(&cam->sensor_reset_work);
1534
1535 mutex_lock(&cam->mutex);
1536 if (atomic_dec_return(&cam->users) == 0) {
1537 omap24xxcam_sensor_disable(cam);
1538 omap24xxcam_poweron_reset(cam);
1539 }
1540 mutex_unlock(&cam->mutex);
1541
1542 file->private_data = NULL;
1543
1544 module_put(cam->sdev->module);
1545 kfree(fh);
1546
1547 return 0;
1548}
1549
1550static struct v4l2_file_operations omap24xxcam_fops = {
1551 .ioctl = video_ioctl2,
1552 .poll = omap24xxcam_poll,
1553 .mmap = omap24xxcam_mmap,
1554 .open = omap24xxcam_open,
1555 .release = omap24xxcam_release,
1556};
1557
1558/*
1559 *
1560 * Power management.
1561 *
1562 */
1563
1564#ifdef CONFIG_PM
1565static int omap24xxcam_suspend(struct platform_device *pdev, pm_message_t state)
1566{
1567 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1568
1569 if (atomic_read(&cam->users) == 0)
1570 return 0;
1571
1572 if (!atomic_read(&cam->reset_disable))
1573 omap24xxcam_capture_stop(cam);
1574
1575 omap24xxcam_sensor_disable(cam);
1576 omap24xxcam_poweron_reset(cam);
1577
1578 return 0;
1579}
1580
1581static int omap24xxcam_resume(struct platform_device *pdev)
1582{
1583 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1584
1585 if (atomic_read(&cam->users) == 0)
1586 return 0;
1587
1588 omap24xxcam_hwinit(cam);
1589 omap24xxcam_sensor_enable(cam);
1590
1591 if (!atomic_read(&cam->reset_disable))
1592 omap24xxcam_capture_cont(cam);
1593
1594 return 0;
1595}
1596#endif /* CONFIG_PM */
1597
1598static const struct v4l2_ioctl_ops omap24xxcam_ioctl_fops = {
1599 .vidioc_querycap = vidioc_querycap,
1600 .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
1601 .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
1602 .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
1603 .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
1604 .vidioc_reqbufs = vidioc_reqbufs,
1605 .vidioc_querybuf = vidioc_querybuf,
1606 .vidioc_qbuf = vidioc_qbuf,
1607 .vidioc_dqbuf = vidioc_dqbuf,
1608 .vidioc_streamon = vidioc_streamon,
1609 .vidioc_streamoff = vidioc_streamoff,
1610 .vidioc_enum_input = vidioc_enum_input,
1611 .vidioc_g_input = vidioc_g_input,
1612 .vidioc_s_input = vidioc_s_input,
1613 .vidioc_queryctrl = vidioc_queryctrl,
1614 .vidioc_g_ctrl = vidioc_g_ctrl,
1615 .vidioc_s_ctrl = vidioc_s_ctrl,
1616 .vidioc_g_parm = vidioc_g_parm,
1617 .vidioc_s_parm = vidioc_s_parm,
1618};
1619
1620/*
1621 *
1622 * Camera device (i.e. /dev/video).
1623 *
1624 */
1625
1626static int omap24xxcam_device_register(struct v4l2_int_device *s)
1627{
1628 struct omap24xxcam_device *cam = s->u.slave->master->priv;
1629 struct video_device *vfd;
1630 int rval;
1631
1632 /* We already have a slave. */
1633 if (cam->sdev)
1634 return -EBUSY;
1635
1636 cam->sdev = s;
1637
1638 if (device_create_file(cam->dev, &dev_attr_streaming) != 0) {
1639 dev_err(cam->dev, "could not register sysfs entry\n");
1640 rval = -EBUSY;
1641 goto err;
1642 }
1643
1644 /* initialize the video_device struct */
1645 vfd = cam->vfd = video_device_alloc();
1646 if (!vfd) {
1647 dev_err(cam->dev, "could not allocate video device struct\n");
1648 rval = -ENOMEM;
1649 goto err;
1650 }
1651 vfd->release = video_device_release;
1652
1653 vfd->v4l2_dev = &cam->v4l2_dev;
1654
1655 strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
1656 vfd->fops = &omap24xxcam_fops;
1657 vfd->ioctl_ops = &omap24xxcam_ioctl_fops;
1658
1659 omap24xxcam_hwinit(cam);
1660
1661 rval = omap24xxcam_sensor_init(cam);
1662 if (rval)
1663 goto err;
1664
1665 if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
1666 dev_err(cam->dev, "could not register V4L device\n");
1667 rval = -EBUSY;
1668 goto err;
1669 }
1670
1671 omap24xxcam_poweron_reset(cam);
1672
1673 dev_info(cam->dev, "registered device %s\n",
1674 video_device_node_name(vfd));
1675
1676 return 0;
1677
1678err:
1679 omap24xxcam_device_unregister(s);
1680
1681 return rval;
1682}
1683
1684static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
1685{
1686 struct omap24xxcam_device *cam = s->u.slave->master->priv;
1687
1688 omap24xxcam_sensor_exit(cam);
1689
1690 if (cam->vfd) {
1691 if (!video_is_registered(cam->vfd)) {
1692 /*
1693 * The device was never registered, so release the
1694 * video_device struct directly.
1695 */
1696 video_device_release(cam->vfd);
1697 } else {
1698 /*
1699 * The unregister function will release the
1700 * video_device struct as well as
1701 * unregistering it.
1702 */
1703 video_unregister_device(cam->vfd);
1704 }
1705 cam->vfd = NULL;
1706 }
1707
1708 device_remove_file(cam->dev, &dev_attr_streaming);
1709
1710 cam->sdev = NULL;
1711}
1712
1713static struct v4l2_int_master omap24xxcam_master = {
1714 .attach = omap24xxcam_device_register,
1715 .detach = omap24xxcam_device_unregister,
1716};
1717
1718static struct v4l2_int_device omap24xxcam = {
1719 .module = THIS_MODULE,
1720 .name = CAM_NAME,
1721 .type = v4l2_int_type_master,
1722 .u = {
1723 .master = &omap24xxcam_master
1724 },
1725};
1726
1727/*
1728 *
1729 * Driver initialisation and deinitialisation.
1730 *
1731 */
1732
1733static int omap24xxcam_probe(struct platform_device *pdev)
1734{
1735 struct omap24xxcam_device *cam;
1736 struct resource *mem;
1737 int irq;
1738
1739 cam = kzalloc(sizeof(*cam), GFP_KERNEL);
1740 if (!cam) {
1741 dev_err(&pdev->dev, "could not allocate memory\n");
1742 goto err;
1743 }
1744
1745 platform_set_drvdata(pdev, cam);
1746
1747 cam->dev = &pdev->dev;
1748
1749 if (v4l2_device_register(&pdev->dev, &cam->v4l2_dev)) {
1750 dev_err(&pdev->dev, "v4l2_device_register failed\n");
1751 goto err;
1752 }
1753
1754 /*
1755 * Impose a lower limit on the amount of memory allocated for
1756 * capture. We require at least enough memory to double-buffer
1757 * QVGA (300KB).
1758 */
1759 if (capture_mem < 320 * 240 * 2 * 2)
1760 capture_mem = 320 * 240 * 2 * 2;
1761 cam->capture_mem = capture_mem;
1762
1763 /* request the mem region for the camera registers */
1764 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1765 if (!mem) {
1766 dev_err(cam->dev, "no mem resource?\n");
1767 goto err;
1768 }
1769 if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
1770 dev_err(cam->dev,
1771 "cannot reserve camera register I/O region\n");
1772 goto err;
1773 }
1774 cam->mmio_base_phys = mem->start;
1775 cam->mmio_size = resource_size(mem);
1776
1777 /* map the region */
1778 cam->mmio_base = ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
1779 if (!cam->mmio_base) {
1780 dev_err(cam->dev, "cannot map camera register I/O region\n");
1781 goto err;
1782 }
1783
1784 irq = platform_get_irq(pdev, 0);
1785 if (irq <= 0) {
1786 dev_err(cam->dev, "no irq for camera?\n");
1787 goto err;
1788 }
1789
1790 /* install the interrupt service routine */
1791 if (request_irq(irq, omap24xxcam_isr, 0, CAM_NAME, cam)) {
1792 dev_err(cam->dev,
1793 "could not install interrupt service routine\n");
1794 goto err;
1795 }
1796 cam->irq = irq;
1797
1798 if (omap24xxcam_clock_get(cam))
1799 goto err;
1800
1801 INIT_WORK(&cam->sensor_reset_work, omap24xxcam_sensor_reset_work);
1802
1803 mutex_init(&cam->mutex);
1804 spin_lock_init(&cam->core_enable_disable_lock);
1805
1806 omap24xxcam_sgdma_init(&cam->sgdma,
1807 cam->mmio_base + CAMDMA_REG_OFFSET,
1808 omap24xxcam_stalled_dma_reset,
1809 (unsigned long)cam);
1810
1811 omap24xxcam.priv = cam;
1812
1813 if (v4l2_int_device_register(&omap24xxcam))
1814 goto err;
1815
1816 return 0;
1817
1818err:
1819 omap24xxcam_remove(pdev);
1820 return -ENODEV;
1821}
1822
1823static int omap24xxcam_remove(struct platform_device *pdev)
1824{
1825 struct omap24xxcam_device *cam = platform_get_drvdata(pdev);
1826
1827 if (!cam)
1828 return 0;
1829
1830 if (omap24xxcam.priv != NULL)
1831 v4l2_int_device_unregister(&omap24xxcam);
1832 omap24xxcam.priv = NULL;
1833
1834 omap24xxcam_clock_put(cam);
1835
1836 if (cam->irq) {
1837 free_irq(cam->irq, cam);
1838 cam->irq = 0;
1839 }
1840
1841 if (cam->mmio_base) {
1842 iounmap((void *)cam->mmio_base);
1843 cam->mmio_base = 0;
1844 }
1845
1846 if (cam->mmio_base_phys) {
1847 release_mem_region(cam->mmio_base_phys, cam->mmio_size);
1848 cam->mmio_base_phys = 0;
1849 }
1850
1851 v4l2_device_unregister(&cam->v4l2_dev);
1852
1853 kfree(cam);
1854
1855 return 0;
1856}
1857
1858static struct platform_driver omap24xxcam_driver = {
1859 .probe = omap24xxcam_probe,
1860 .remove = omap24xxcam_remove,
1861#ifdef CONFIG_PM
1862 .suspend = omap24xxcam_suspend,
1863 .resume = omap24xxcam_resume,
1864#endif
1865 .driver = {
1866 .name = CAM_NAME,
1867 .owner = THIS_MODULE,
1868 },
1869};
1870
1871module_platform_driver(omap24xxcam_driver);
1872
1873MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
1874MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
1875MODULE_LICENSE("GPL");
1876MODULE_VERSION(OMAP24XXCAM_VERSION);
1877module_param(video_nr, int, 0);
1878MODULE_PARM_DESC(video_nr,
1879 "Minor number for video device (-1 ==> auto assign)");
1880module_param(capture_mem, int, 0);
1881MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
1882 "buffers (default 4800kiB)");
diff --git a/drivers/staging/media/omap24xx/omap24xxcam.h b/drivers/staging/media/omap24xx/omap24xxcam.h
deleted file mode 100644
index 233bb40cfec3..000000000000
--- a/drivers/staging/media/omap24xx/omap24xxcam.h
+++ /dev/null
@@ -1,596 +0,0 @@
1/*
2 * drivers/media/platform/omap24xxcam.h
3 *
4 * Copyright (C) 2004 MontaVista Software, Inc.
5 * Copyright (C) 2004 Texas Instruments.
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * Based on code from Andy Lowe <source@mvista.com>.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 * 02110-1301 USA
25 */
26
27#ifndef OMAP24XXCAM_H
28#define OMAP24XXCAM_H
29
30#include <media/videobuf-dma-sg.h>
31#include <media/v4l2-device.h>
32#include "v4l2-int-device.h"
33
34/*
35 *
36 * General driver related definitions.
37 *
38 */
39
40#define CAM_NAME "omap24xxcam"
41
42#define CAM_MCLK 96000000
43
44/* number of bytes transferred per DMA request */
45#define DMA_THRESHOLD 32
46
47/*
48 * NUM_CAMDMA_CHANNELS is the number of logical channels provided by
49 * the camera DMA controller.
50 */
51#define NUM_CAMDMA_CHANNELS 4
52
53/*
54 * NUM_SG_DMA is the number of scatter-gather DMA transfers that can
55 * be queued. (We don't have any overlay sglists now.)
56 */
57#define NUM_SG_DMA (VIDEO_MAX_FRAME)
58
59/*
60 *
61 * Register definitions.
62 *
63 */
64
65/* subsystem register block offsets */
66#define CC_REG_OFFSET 0x00000400
67#define CAMDMA_REG_OFFSET 0x00000800
68#define CAMMMU_REG_OFFSET 0x00000C00
69
70/* define camera subsystem register offsets */
71#define CAM_REVISION 0x000
72#define CAM_SYSCONFIG 0x010
73#define CAM_SYSSTATUS 0x014
74#define CAM_IRQSTATUS 0x018
75#define CAM_GPO 0x040
76#define CAM_GPI 0x050
77
78/* define camera core register offsets */
79#define CC_REVISION 0x000
80#define CC_SYSCONFIG 0x010
81#define CC_SYSSTATUS 0x014
82#define CC_IRQSTATUS 0x018
83#define CC_IRQENABLE 0x01C
84#define CC_CTRL 0x040
85#define CC_CTRL_DMA 0x044
86#define CC_CTRL_XCLK 0x048
87#define CC_FIFODATA 0x04C
88#define CC_TEST 0x050
89#define CC_GENPAR 0x054
90#define CC_CCPFSCR 0x058
91#define CC_CCPFECR 0x05C
92#define CC_CCPLSCR 0x060
93#define CC_CCPLECR 0x064
94#define CC_CCPDFR 0x068
95
96/* define camera dma register offsets */
97#define CAMDMA_REVISION 0x000
98#define CAMDMA_IRQSTATUS_L0 0x008
99#define CAMDMA_IRQSTATUS_L1 0x00C
100#define CAMDMA_IRQSTATUS_L2 0x010
101#define CAMDMA_IRQSTATUS_L3 0x014
102#define CAMDMA_IRQENABLE_L0 0x018
103#define CAMDMA_IRQENABLE_L1 0x01C
104#define CAMDMA_IRQENABLE_L2 0x020
105#define CAMDMA_IRQENABLE_L3 0x024
106#define CAMDMA_SYSSTATUS 0x028
107#define CAMDMA_OCP_SYSCONFIG 0x02C
108#define CAMDMA_CAPS_0 0x064
109#define CAMDMA_CAPS_2 0x06C
110#define CAMDMA_CAPS_3 0x070
111#define CAMDMA_CAPS_4 0x074
112#define CAMDMA_GCR 0x078
113#define CAMDMA_CCR(n) (0x080 + (n)*0x60)
114#define CAMDMA_CLNK_CTRL(n) (0x084 + (n)*0x60)
115#define CAMDMA_CICR(n) (0x088 + (n)*0x60)
116#define CAMDMA_CSR(n) (0x08C + (n)*0x60)
117#define CAMDMA_CSDP(n) (0x090 + (n)*0x60)
118#define CAMDMA_CEN(n) (0x094 + (n)*0x60)
119#define CAMDMA_CFN(n) (0x098 + (n)*0x60)
120#define CAMDMA_CSSA(n) (0x09C + (n)*0x60)
121#define CAMDMA_CDSA(n) (0x0A0 + (n)*0x60)
122#define CAMDMA_CSEI(n) (0x0A4 + (n)*0x60)
123#define CAMDMA_CSFI(n) (0x0A8 + (n)*0x60)
124#define CAMDMA_CDEI(n) (0x0AC + (n)*0x60)
125#define CAMDMA_CDFI(n) (0x0B0 + (n)*0x60)
126#define CAMDMA_CSAC(n) (0x0B4 + (n)*0x60)
127#define CAMDMA_CDAC(n) (0x0B8 + (n)*0x60)
128#define CAMDMA_CCEN(n) (0x0BC + (n)*0x60)
129#define CAMDMA_CCFN(n) (0x0C0 + (n)*0x60)
130#define CAMDMA_COLOR(n) (0x0C4 + (n)*0x60)
131
132/* define camera mmu register offsets */
133#define CAMMMU_REVISION 0x000
134#define CAMMMU_SYSCONFIG 0x010
135#define CAMMMU_SYSSTATUS 0x014
136#define CAMMMU_IRQSTATUS 0x018
137#define CAMMMU_IRQENABLE 0x01C
138#define CAMMMU_WALKING_ST 0x040
139#define CAMMMU_CNTL 0x044
140#define CAMMMU_FAULT_AD 0x048
141#define CAMMMU_TTB 0x04C
142#define CAMMMU_LOCK 0x050
143#define CAMMMU_LD_TLB 0x054
144#define CAMMMU_CAM 0x058
145#define CAMMMU_RAM 0x05C
146#define CAMMMU_GFLUSH 0x060
147#define CAMMMU_FLUSH_ENTRY 0x064
148#define CAMMMU_READ_CAM 0x068
149#define CAMMMU_READ_RAM 0x06C
150#define CAMMMU_EMU_FAULT_AD 0x070
151
152/* Define bit fields within selected registers */
153#define CAM_REVISION_MAJOR (15 << 4)
154#define CAM_REVISION_MAJOR_SHIFT 4
155#define CAM_REVISION_MINOR (15 << 0)
156#define CAM_REVISION_MINOR_SHIFT 0
157
158#define CAM_SYSCONFIG_SOFTRESET (1 << 1)
159#define CAM_SYSCONFIG_AUTOIDLE (1 << 0)
160
161#define CAM_SYSSTATUS_RESETDONE (1 << 0)
162
163#define CAM_IRQSTATUS_CC_IRQ (1 << 4)
164#define CAM_IRQSTATUS_MMU_IRQ (1 << 3)
165#define CAM_IRQSTATUS_DMA_IRQ2 (1 << 2)
166#define CAM_IRQSTATUS_DMA_IRQ1 (1 << 1)
167#define CAM_IRQSTATUS_DMA_IRQ0 (1 << 0)
168
169#define CAM_GPO_CAM_S_P_EN (1 << 1)
170#define CAM_GPO_CAM_CCP_MODE (1 << 0)
171
172#define CAM_GPI_CC_DMA_REQ1 (1 << 24)
173#define CAP_GPI_CC_DMA_REQ0 (1 << 23)
174#define CAP_GPI_CAM_MSTANDBY (1 << 21)
175#define CAP_GPI_CAM_WAIT (1 << 20)
176#define CAP_GPI_CAM_S_DATA (1 << 17)
177#define CAP_GPI_CAM_S_CLK (1 << 16)
178#define CAP_GPI_CAM_P_DATA (0xFFF << 3)
179#define CAP_GPI_CAM_P_DATA_SHIFT 3
180#define CAP_GPI_CAM_P_VS (1 << 2)
181#define CAP_GPI_CAM_P_HS (1 << 1)
182#define CAP_GPI_CAM_P_CLK (1 << 0)
183
184#define CC_REVISION_MAJOR (15 << 4)
185#define CC_REVISION_MAJOR_SHIFT 4
186#define CC_REVISION_MINOR (15 << 0)
187#define CC_REVISION_MINOR_SHIFT 0
188
189#define CC_SYSCONFIG_SIDLEMODE (3 << 3)
190#define CC_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
191#define CC_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
192#define CC_SYSCONFIG_SOFTRESET (1 << 1)
193#define CC_SYSCONFIG_AUTOIDLE (1 << 0)
194
195#define CC_SYSSTATUS_RESETDONE (1 << 0)
196
197#define CC_IRQSTATUS_FS_IRQ (1 << 19)
198#define CC_IRQSTATUS_LE_IRQ (1 << 18)
199#define CC_IRQSTATUS_LS_IRQ (1 << 17)
200#define CC_IRQSTATUS_FE_IRQ (1 << 16)
201#define CC_IRQSTATUS_FW_ERR_IRQ (1 << 10)
202#define CC_IRQSTATUS_FSC_ERR_IRQ (1 << 9)
203#define CC_IRQSTATUS_SSC_ERR_IRQ (1 << 8)
204#define CC_IRQSTATUS_FIFO_NOEMPTY_IRQ (1 << 4)
205#define CC_IRQSTATUS_FIFO_FULL_IRQ (1 << 3)
206#define CC_IRQSTATUS_FIFO_THR_IRQ (1 << 2)
207#define CC_IRQSTATUS_FIFO_OF_IRQ (1 << 1)
208#define CC_IRQSTATUS_FIFO_UF_IRQ (1 << 0)
209
210#define CC_IRQENABLE_FS_IRQ (1 << 19)
211#define CC_IRQENABLE_LE_IRQ (1 << 18)
212#define CC_IRQENABLE_LS_IRQ (1 << 17)
213#define CC_IRQENABLE_FE_IRQ (1 << 16)
214#define CC_IRQENABLE_FW_ERR_IRQ (1 << 10)
215#define CC_IRQENABLE_FSC_ERR_IRQ (1 << 9)
216#define CC_IRQENABLE_SSC_ERR_IRQ (1 << 8)
217#define CC_IRQENABLE_FIFO_NOEMPTY_IRQ (1 << 4)
218#define CC_IRQENABLE_FIFO_FULL_IRQ (1 << 3)
219#define CC_IRQENABLE_FIFO_THR_IRQ (1 << 2)
220#define CC_IRQENABLE_FIFO_OF_IRQ (1 << 1)
221#define CC_IRQENABLE_FIFO_UF_IRQ (1 << 0)
222
223#define CC_CTRL_CC_ONE_SHOT (1 << 20)
224#define CC_CTRL_CC_IF_SYNCHRO (1 << 19)
225#define CC_CTRL_CC_RST (1 << 18)
226#define CC_CTRL_CC_FRAME_TRIG (1 << 17)
227#define CC_CTRL_CC_EN (1 << 16)
228#define CC_CTRL_NOBT_SYNCHRO (1 << 13)
229#define CC_CTRL_BT_CORRECT (1 << 12)
230#define CC_CTRL_PAR_ORDERCAM (1 << 11)
231#define CC_CTRL_PAR_CLK_POL (1 << 10)
232#define CC_CTRL_NOBT_HS_POL (1 << 9)
233#define CC_CTRL_NOBT_VS_POL (1 << 8)
234#define CC_CTRL_PAR_MODE (7 << 1)
235#define CC_CTRL_PAR_MODE_SHIFT 1
236#define CC_CTRL_PAR_MODE_NOBT8 (0 << 1)
237#define CC_CTRL_PAR_MODE_NOBT10 (1 << 1)
238#define CC_CTRL_PAR_MODE_NOBT12 (2 << 1)
239#define CC_CTRL_PAR_MODE_BT8 (4 << 1)
240#define CC_CTRL_PAR_MODE_BT10 (5 << 1)
241#define CC_CTRL_PAR_MODE_FIFOTEST (7 << 1)
242#define CC_CTRL_CCP_MODE (1 << 0)
243
244#define CC_CTRL_DMA_EN (1 << 8)
245#define CC_CTRL_DMA_FIFO_THRESHOLD (0x7F << 0)
246#define CC_CTRL_DMA_FIFO_THRESHOLD_SHIFT 0
247
248#define CC_CTRL_XCLK_DIV (0x1F << 0)
249#define CC_CTRL_XCLK_DIV_SHIFT 0
250#define CC_CTRL_XCLK_DIV_STABLE_LOW (0 << 0)
251#define CC_CTRL_XCLK_DIV_STABLE_HIGH (1 << 0)
252#define CC_CTRL_XCLK_DIV_BYPASS (31 << 0)
253
254#define CC_TEST_FIFO_RD_POINTER (0xFF << 24)
255#define CC_TEST_FIFO_RD_POINTER_SHIFT 24
256#define CC_TEST_FIFO_WR_POINTER (0xFF << 16)
257#define CC_TEST_FIFO_WR_POINTER_SHIFT 16
258#define CC_TEST_FIFO_LEVEL (0xFF << 8)
259#define CC_TEST_FIFO_LEVEL_SHIFT 8
260#define CC_TEST_FIFO_LEVEL_PEAK (0xFF << 0)
261#define CC_TEST_FIFO_LEVEL_PEAK_SHIFT 0
262
263#define CC_GENPAR_FIFO_DEPTH (7 << 0)
264#define CC_GENPAR_FIFO_DEPTH_SHIFT 0
265
266#define CC_CCPDFR_ALPHA (0xFF << 8)
267#define CC_CCPDFR_ALPHA_SHIFT 8
268#define CC_CCPDFR_DATAFORMAT (15 << 0)
269#define CC_CCPDFR_DATAFORMAT_SHIFT 0
270#define CC_CCPDFR_DATAFORMAT_YUV422BE (0 << 0)
271#define CC_CCPDFR_DATAFORMAT_YUV422 (1 << 0)
272#define CC_CCPDFR_DATAFORMAT_YUV420 (2 << 0)
273#define CC_CCPDFR_DATAFORMAT_RGB444 (4 << 0)
274#define CC_CCPDFR_DATAFORMAT_RGB565 (5 << 0)
275#define CC_CCPDFR_DATAFORMAT_RGB888NDE (6 << 0)
276#define CC_CCPDFR_DATAFORMAT_RGB888 (7 << 0)
277#define CC_CCPDFR_DATAFORMAT_RAW8NDE (8 << 0)
278#define CC_CCPDFR_DATAFORMAT_RAW8 (9 << 0)
279#define CC_CCPDFR_DATAFORMAT_RAW10NDE (10 << 0)
280#define CC_CCPDFR_DATAFORMAT_RAW10 (11 << 0)
281#define CC_CCPDFR_DATAFORMAT_RAW12NDE (12 << 0)
282#define CC_CCPDFR_DATAFORMAT_RAW12 (13 << 0)
283#define CC_CCPDFR_DATAFORMAT_JPEG8 (15 << 0)
284
285#define CAMDMA_REVISION_MAJOR (15 << 4)
286#define CAMDMA_REVISION_MAJOR_SHIFT 4
287#define CAMDMA_REVISION_MINOR (15 << 0)
288#define CAMDMA_REVISION_MINOR_SHIFT 0
289
290#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE (3 << 12)
291#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY (0 << 12)
292#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_NSTANDBY (1 << 12)
293#define CAMDMA_OCP_SYSCONFIG_MIDLEMODE_SSTANDBY (2 << 12)
294#define CAMDMA_OCP_SYSCONFIG_FUNC_CLOCK (1 << 9)
295#define CAMDMA_OCP_SYSCONFIG_OCP_CLOCK (1 << 8)
296#define CAMDMA_OCP_SYSCONFIG_EMUFREE (1 << 5)
297#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE (3 << 3)
298#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_FIDLE (0 << 3)
299#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_NIDLE (1 << 3)
300#define CAMDMA_OCP_SYSCONFIG_SIDLEMODE_SIDLE (2 << 3)
301#define CAMDMA_OCP_SYSCONFIG_SOFTRESET (1 << 1)
302#define CAMDMA_OCP_SYSCONFIG_AUTOIDLE (1 << 0)
303
304#define CAMDMA_SYSSTATUS_RESETDONE (1 << 0)
305
306#define CAMDMA_GCR_ARBITRATION_RATE (0xFF << 16)
307#define CAMDMA_GCR_ARBITRATION_RATE_SHIFT 16
308#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH (0xFF << 0)
309#define CAMDMA_GCR_MAX_CHANNEL_FIFO_DEPTH_SHIFT 0
310
311#define CAMDMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
312#define CAMDMA_CCR_PREFETCH (1 << 23)
313#define CAMDMA_CCR_SUPERVISOR (1 << 22)
314#define CAMDMA_CCR_SECURE (1 << 21)
315#define CAMDMA_CCR_BS (1 << 18)
316#define CAMDMA_CCR_TRANSPARENT_COPY_ENABLE (1 << 17)
317#define CAMDMA_CCR_CONSTANT_FILL_ENABLE (1 << 16)
318#define CAMDMA_CCR_DST_AMODE (3 << 14)
319#define CAMDMA_CCR_DST_AMODE_CONST_ADDR (0 << 14)
320#define CAMDMA_CCR_DST_AMODE_POST_INC (1 << 14)
321#define CAMDMA_CCR_DST_AMODE_SGL_IDX (2 << 14)
322#define CAMDMA_CCR_DST_AMODE_DBL_IDX (3 << 14)
323#define CAMDMA_CCR_SRC_AMODE (3 << 12)
324#define CAMDMA_CCR_SRC_AMODE_CONST_ADDR (0 << 12)
325#define CAMDMA_CCR_SRC_AMODE_POST_INC (1 << 12)
326#define CAMDMA_CCR_SRC_AMODE_SGL_IDX (2 << 12)
327#define CAMDMA_CCR_SRC_AMODE_DBL_IDX (3 << 12)
328#define CAMDMA_CCR_WR_ACTIVE (1 << 10)
329#define CAMDMA_CCR_RD_ACTIVE (1 << 9)
330#define CAMDMA_CCR_SUSPEND_SENSITIVE (1 << 8)
331#define CAMDMA_CCR_ENABLE (1 << 7)
332#define CAMDMA_CCR_PRIO (1 << 6)
333#define CAMDMA_CCR_FS (1 << 5)
334#define CAMDMA_CCR_SYNCHRO ((3 << 19) | (31 << 0))
335#define CAMDMA_CCR_SYNCHRO_CAMERA 0x01
336
337#define CAMDMA_CLNK_CTRL_ENABLE_LNK (1 << 15)
338#define CAMDMA_CLNK_CTRL_NEXTLCH_ID (0x1F << 0)
339#define CAMDMA_CLNK_CTRL_NEXTLCH_ID_SHIFT 0
340
341#define CAMDMA_CICR_MISALIGNED_ERR_IE (1 << 11)
342#define CAMDMA_CICR_SUPERVISOR_ERR_IE (1 << 10)
343#define CAMDMA_CICR_SECURE_ERR_IE (1 << 9)
344#define CAMDMA_CICR_TRANS_ERR_IE (1 << 8)
345#define CAMDMA_CICR_PACKET_IE (1 << 7)
346#define CAMDMA_CICR_BLOCK_IE (1 << 5)
347#define CAMDMA_CICR_LAST_IE (1 << 4)
348#define CAMDMA_CICR_FRAME_IE (1 << 3)
349#define CAMDMA_CICR_HALF_IE (1 << 2)
350#define CAMDMA_CICR_DROP_IE (1 << 1)
351
352#define CAMDMA_CSR_MISALIGNED_ERR (1 << 11)
353#define CAMDMA_CSR_SUPERVISOR_ERR (1 << 10)
354#define CAMDMA_CSR_SECURE_ERR (1 << 9)
355#define CAMDMA_CSR_TRANS_ERR (1 << 8)
356#define CAMDMA_CSR_PACKET (1 << 7)
357#define CAMDMA_CSR_SYNC (1 << 6)
358#define CAMDMA_CSR_BLOCK (1 << 5)
359#define CAMDMA_CSR_LAST (1 << 4)
360#define CAMDMA_CSR_FRAME (1 << 3)
361#define CAMDMA_CSR_HALF (1 << 2)
362#define CAMDMA_CSR_DROP (1 << 1)
363
364#define CAMDMA_CSDP_SRC_ENDIANNESS (1 << 21)
365#define CAMDMA_CSDP_SRC_ENDIANNESS_LOCK (1 << 20)
366#define CAMDMA_CSDP_DST_ENDIANNESS (1 << 19)
367#define CAMDMA_CSDP_DST_ENDIANNESS_LOCK (1 << 18)
368#define CAMDMA_CSDP_WRITE_MODE (3 << 16)
369#define CAMDMA_CSDP_WRITE_MODE_WRNP (0 << 16)
370#define CAMDMA_CSDP_WRITE_MODE_POSTED (1 << 16)
371#define CAMDMA_CSDP_WRITE_MODE_POSTED_LAST_WRNP (2 << 16)
372#define CAMDMA_CSDP_DST_BURST_EN (3 << 14)
373#define CAMDMA_CSDP_DST_BURST_EN_1 (0 << 14)
374#define CAMDMA_CSDP_DST_BURST_EN_16 (1 << 14)
375#define CAMDMA_CSDP_DST_BURST_EN_32 (2 << 14)
376#define CAMDMA_CSDP_DST_BURST_EN_64 (3 << 14)
377#define CAMDMA_CSDP_DST_PACKED (1 << 13)
378#define CAMDMA_CSDP_WR_ADD_TRSLT (15 << 9)
379#define CAMDMA_CSDP_WR_ADD_TRSLT_ENABLE_MREQADD (3 << 9)
380#define CAMDMA_CSDP_SRC_BURST_EN (3 << 7)
381#define CAMDMA_CSDP_SRC_BURST_EN_1 (0 << 7)
382#define CAMDMA_CSDP_SRC_BURST_EN_16 (1 << 7)
383#define CAMDMA_CSDP_SRC_BURST_EN_32 (2 << 7)
384#define CAMDMA_CSDP_SRC_BURST_EN_64 (3 << 7)
385#define CAMDMA_CSDP_SRC_PACKED (1 << 6)
386#define CAMDMA_CSDP_RD_ADD_TRSLT (15 << 2)
387#define CAMDMA_CSDP_RD_ADD_TRSLT_ENABLE_MREQADD (3 << 2)
388#define CAMDMA_CSDP_DATA_TYPE (3 << 0)
389#define CAMDMA_CSDP_DATA_TYPE_8BITS (0 << 0)
390#define CAMDMA_CSDP_DATA_TYPE_16BITS (1 << 0)
391#define CAMDMA_CSDP_DATA_TYPE_32BITS (2 << 0)
392
393#define CAMMMU_SYSCONFIG_AUTOIDLE (1 << 0)
394
395/*
396 *
397 * Declarations.
398 *
399 */
400
401/* forward declarations */
402struct omap24xxcam_sgdma;
403struct omap24xxcam_dma;
404
405typedef void (*sgdma_callback_t)(struct omap24xxcam_sgdma *cam,
406 u32 status, void *arg);
407typedef void (*dma_callback_t)(struct omap24xxcam_dma *cam,
408 u32 status, void *arg);
409
410struct channel_state {
411 dma_callback_t callback;
412 void *arg;
413};
414
415/* sgdma state for each of the possible videobuf_buffers + 2 overlays */
416struct sgdma_state {
417 const struct scatterlist *sglist;
418 int sglen; /* number of sglist entries */
419 int next_sglist; /* index of next sglist entry to process */
420 unsigned int bytes_read; /* number of bytes read */
421 unsigned int len; /* total length of sglist (excluding
422 * bytes due to page alignment) */
423 int queued_sglist; /* number of sglist entries queued for DMA */
424 u32 csr; /* DMA return code */
425 sgdma_callback_t callback;
426 void *arg;
427};
428
429/* physical DMA channel management */
430struct omap24xxcam_dma {
431 spinlock_t lock; /* Lock for the whole structure. */
432
433 void __iomem *base; /* base address for dma controller */
434
435 /* While dma_stop!=0, an attempt to start a new DMA transfer will
436 * fail.
437 */
438 atomic_t dma_stop;
439 int free_dmach; /* number of dma channels free */
440 int next_dmach; /* index of next dma channel to use */
441 struct channel_state ch_state[NUM_CAMDMA_CHANNELS];
442};
443
444/* scatter-gather DMA (scatterlist stuff) management */
445struct omap24xxcam_sgdma {
446 struct omap24xxcam_dma dma;
447
448 spinlock_t lock; /* Lock for the fields below. */
449 int free_sgdma; /* number of free sg dma slots */
450 int next_sgdma; /* index of next sg dma slot to use */
451 struct sgdma_state sg_state[NUM_SG_DMA];
452
453 /* Reset timer data */
454 struct timer_list reset_timer;
455};
456
457/* per-device data structure */
458struct omap24xxcam_device {
459 /*** mutex ***/
460 /*
461 * mutex serialises access to this structure. Also camera
462 * opening and releasing is synchronised by this.
463 */
464 struct mutex mutex;
465
466 struct v4l2_device v4l2_dev;
467
468 /*** general driver state information ***/
469 atomic_t users;
470 /*
471 * Lock to serialise core enabling and disabling and access to
472 * sgdma_in_queue.
473 */
474 spinlock_t core_enable_disable_lock;
475 /*
476 * Number or sgdma requests in scatter-gather queue, protected
477 * by the lock above.
478 */
479 int sgdma_in_queue;
480 /*
481 * Sensor interface parameters: interface type, CC_CTRL
482 * register value and interface specific data.
483 */
484 int if_type;
485 union {
486 struct parallel {
487 u32 xclk;
488 } bt656;
489 } if_u;
490 u32 cc_ctrl;
491
492 /*** subsystem structures ***/
493 struct omap24xxcam_sgdma sgdma;
494
495 /*** hardware resources ***/
496 unsigned int irq;
497 void __iomem *mmio_base;
498 unsigned long mmio_base_phys;
499 unsigned long mmio_size;
500
501 /*** interfaces and device ***/
502 struct v4l2_int_device *sdev;
503 struct device *dev;
504 struct video_device *vfd;
505
506 /*** camera and sensor reset related stuff ***/
507 struct work_struct sensor_reset_work;
508 /*
509 * We're in the middle of a reset. Don't enable core if this
510 * is non-zero! This exists to help decisionmaking in a case
511 * where videobuf_qbuf is called while we are in the middle of
512 * a reset.
513 */
514 atomic_t in_reset;
515 /*
516 * Non-zero if we don't want any resets for now. Used to
517 * prevent reset work to run when we're about to stop
518 * streaming.
519 */
520 atomic_t reset_disable;
521
522 /*** video device parameters ***/
523 int capture_mem;
524
525 /*** camera module clocks ***/
526 struct clk *fck;
527 struct clk *ick;
528
529 /*** capture data ***/
530 /* file handle, if streaming is on */
531 struct file *streaming;
532};
533
534/* Per-file handle data. */
535struct omap24xxcam_fh {
536 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
537 struct videobuf_queue vbq;
538 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
539 atomic_t field_count; /* field counter for videobuf_buffer */
540 /* accessing cam here doesn't need serialisation: it's constant */
541 struct omap24xxcam_device *cam;
542};
543
544/*
545 *
546 * Register I/O functions.
547 *
548 */
549
550static inline u32 omap24xxcam_reg_in(u32 __iomem *base, u32 offset)
551{
552 return readl(base + offset);
553}
554
555static inline u32 omap24xxcam_reg_out(u32 __iomem *base, u32 offset,
556 u32 val)
557{
558 writel(val, base + offset);
559 return val;
560}
561
562static inline u32 omap24xxcam_reg_merge(u32 __iomem *base, u32 offset,
563 u32 val, u32 mask)
564{
565 u32 __iomem *addr = base + offset;
566 u32 new_val = (readl(addr) & ~mask) | (val & mask);
567
568 writel(new_val, addr);
569 return new_val;
570}
571
572/*
573 *
574 * Function prototypes.
575 *
576 */
577
578/* dma prototypes */
579
580void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma);
581void omap24xxcam_dma_isr(struct omap24xxcam_dma *dma);
582
583/* sgdma prototypes */
584
585void omap24xxcam_sgdma_process(struct omap24xxcam_sgdma *sgdma);
586int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
587 const struct scatterlist *sglist, int sglen,
588 int len, sgdma_callback_t callback, void *arg);
589void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
590void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
591 void __iomem *base,
592 void (*reset_callback)(unsigned long data),
593 unsigned long reset_callback_data);
594void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
595
596#endif
diff --git a/drivers/staging/media/omap24xx/tcm825x.c b/drivers/staging/media/omap24xx/tcm825x.c
deleted file mode 100644
index 9d9ecf1fc4ae..000000000000
--- a/drivers/staging/media/omap24xx/tcm825x.c
+++ /dev/null
@@ -1,938 +0,0 @@
1/*
2 * drivers/media/i2c/tcm825x.c
3 *
4 * TCM825X camera sensor driver.
5 *
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * Based on code from David Cohen <david.cohen@indt.org.br>
11 *
12 * This driver was based on ov9640 sensor driver from MontaVista
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * version 2 as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 * 02110-1301 USA
27 */
28
29#include <linux/i2c.h>
30#include <linux/module.h>
31#include "v4l2-int-device.h"
32
33#include "tcm825x.h"
34
35/*
36 * The sensor has two fps modes: the lower one just gives half the fps
37 * at the same xclk than the high one.
38 */
39#define MAX_FPS 30
40#define MIN_FPS 8
41#define MAX_HALF_FPS (MAX_FPS / 2)
42#define HIGH_FPS_MODE_LOWER_LIMIT 14
43#define DEFAULT_FPS MAX_HALF_FPS
44
45struct tcm825x_sensor {
46 const struct tcm825x_platform_data *platform_data;
47 struct v4l2_int_device *v4l2_int_device;
48 struct i2c_client *i2c_client;
49 struct v4l2_pix_format pix;
50 struct v4l2_fract timeperframe;
51};
52
53/* list of image formats supported by TCM825X sensor */
54static const struct v4l2_fmtdesc tcm825x_formats[] = {
55 {
56 .description = "YUYV (YUV 4:2:2), packed",
57 .pixelformat = V4L2_PIX_FMT_UYVY,
58 }, {
59 /* Note: V4L2 defines RGB565 as:
60 *
61 * Byte 0 Byte 1
62 * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
63 *
64 * We interpret RGB565 as:
65 *
66 * Byte 0 Byte 1
67 * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
68 */
69 .description = "RGB565, le",
70 .pixelformat = V4L2_PIX_FMT_RGB565,
71 },
72};
73
74#define TCM825X_NUM_CAPTURE_FORMATS ARRAY_SIZE(tcm825x_formats)
75
76/*
77 * TCM825X register configuration for all combinations of pixel format and
78 * image size
79 */
80static const struct tcm825x_reg subqcif = { 0x20, TCM825X_PICSIZ };
81static const struct tcm825x_reg qcif = { 0x18, TCM825X_PICSIZ };
82static const struct tcm825x_reg cif = { 0x14, TCM825X_PICSIZ };
83static const struct tcm825x_reg qqvga = { 0x0c, TCM825X_PICSIZ };
84static const struct tcm825x_reg qvga = { 0x04, TCM825X_PICSIZ };
85static const struct tcm825x_reg vga = { 0x00, TCM825X_PICSIZ };
86
87static const struct tcm825x_reg yuv422 = { 0x00, TCM825X_PICFMT };
88static const struct tcm825x_reg rgb565 = { 0x02, TCM825X_PICFMT };
89
90/* Our own specific controls */
91#define V4L2_CID_ALC V4L2_CID_PRIVATE_BASE
92#define V4L2_CID_H_EDGE_EN (V4L2_CID_PRIVATE_BASE + 1)
93#define V4L2_CID_V_EDGE_EN (V4L2_CID_PRIVATE_BASE + 2)
94#define V4L2_CID_LENS (V4L2_CID_PRIVATE_BASE + 3)
95#define V4L2_CID_MAX_EXPOSURE_TIME (V4L2_CID_PRIVATE_BASE + 4)
96#define V4L2_CID_LAST_PRIV V4L2_CID_MAX_EXPOSURE_TIME
97
98/* Video controls */
99static struct vcontrol {
100 struct v4l2_queryctrl qc;
101 u16 reg;
102 u16 start_bit;
103} video_control[] = {
104 {
105 {
106 .id = V4L2_CID_GAIN,
107 .type = V4L2_CTRL_TYPE_INTEGER,
108 .name = "Gain",
109 .minimum = 0,
110 .maximum = 63,
111 .step = 1,
112 },
113 .reg = TCM825X_AG,
114 .start_bit = 0,
115 },
116 {
117 {
118 .id = V4L2_CID_RED_BALANCE,
119 .type = V4L2_CTRL_TYPE_INTEGER,
120 .name = "Red Balance",
121 .minimum = 0,
122 .maximum = 255,
123 .step = 1,
124 },
125 .reg = TCM825X_MRG,
126 .start_bit = 0,
127 },
128 {
129 {
130 .id = V4L2_CID_BLUE_BALANCE,
131 .type = V4L2_CTRL_TYPE_INTEGER,
132 .name = "Blue Balance",
133 .minimum = 0,
134 .maximum = 255,
135 .step = 1,
136 },
137 .reg = TCM825X_MBG,
138 .start_bit = 0,
139 },
140 {
141 {
142 .id = V4L2_CID_AUTO_WHITE_BALANCE,
143 .type = V4L2_CTRL_TYPE_BOOLEAN,
144 .name = "Auto White Balance",
145 .minimum = 0,
146 .maximum = 1,
147 .step = 0,
148 },
149 .reg = TCM825X_AWBSW,
150 .start_bit = 7,
151 },
152 {
153 {
154 .id = V4L2_CID_EXPOSURE,
155 .type = V4L2_CTRL_TYPE_INTEGER,
156 .name = "Exposure Time",
157 .minimum = 0,
158 .maximum = 0x1fff,
159 .step = 1,
160 },
161 .reg = TCM825X_ESRSPD_U,
162 .start_bit = 0,
163 },
164 {
165 {
166 .id = V4L2_CID_HFLIP,
167 .type = V4L2_CTRL_TYPE_BOOLEAN,
168 .name = "Mirror Image",
169 .minimum = 0,
170 .maximum = 1,
171 .step = 0,
172 },
173 .reg = TCM825X_H_INV,
174 .start_bit = 6,
175 },
176 {
177 {
178 .id = V4L2_CID_VFLIP,
179 .type = V4L2_CTRL_TYPE_BOOLEAN,
180 .name = "Vertical Flip",
181 .minimum = 0,
182 .maximum = 1,
183 .step = 0,
184 },
185 .reg = TCM825X_V_INV,
186 .start_bit = 7,
187 },
188 /* Private controls */
189 {
190 {
191 .id = V4L2_CID_ALC,
192 .type = V4L2_CTRL_TYPE_BOOLEAN,
193 .name = "Auto Luminance Control",
194 .minimum = 0,
195 .maximum = 1,
196 .step = 0,
197 },
198 .reg = TCM825X_ALCSW,
199 .start_bit = 7,
200 },
201 {
202 {
203 .id = V4L2_CID_H_EDGE_EN,
204 .type = V4L2_CTRL_TYPE_INTEGER,
205 .name = "Horizontal Edge Enhancement",
206 .minimum = 0,
207 .maximum = 0xff,
208 .step = 1,
209 },
210 .reg = TCM825X_HDTG,
211 .start_bit = 0,
212 },
213 {
214 {
215 .id = V4L2_CID_V_EDGE_EN,
216 .type = V4L2_CTRL_TYPE_INTEGER,
217 .name = "Vertical Edge Enhancement",
218 .minimum = 0,
219 .maximum = 0xff,
220 .step = 1,
221 },
222 .reg = TCM825X_VDTG,
223 .start_bit = 0,
224 },
225 {
226 {
227 .id = V4L2_CID_LENS,
228 .type = V4L2_CTRL_TYPE_INTEGER,
229 .name = "Lens Shading Compensation",
230 .minimum = 0,
231 .maximum = 0x3f,
232 .step = 1,
233 },
234 .reg = TCM825X_LENS,
235 .start_bit = 0,
236 },
237 {
238 {
239 .id = V4L2_CID_MAX_EXPOSURE_TIME,
240 .type = V4L2_CTRL_TYPE_INTEGER,
241 .name = "Maximum Exposure Time",
242 .minimum = 0,
243 .maximum = 0x3,
244 .step = 1,
245 },
246 .reg = TCM825X_ESRLIM,
247 .start_bit = 5,
248 },
249};
250
251
252static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] = {
253 &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
254
255static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] = {
256 &yuv422, &rgb565 };
257
258/*
259 * Read a value from a register in an TCM825X sensor device. The value is
260 * returned in 'val'.
261 * Returns zero if successful, or non-zero otherwise.
262 */
263static int tcm825x_read_reg(struct i2c_client *client, int reg)
264{
265 int err;
266 struct i2c_msg msg[2];
267 u8 reg_buf, data_buf = 0;
268
269 if (!client->adapter)
270 return -ENODEV;
271
272 msg[0].addr = client->addr;
273 msg[0].flags = 0;
274 msg[0].len = 1;
275 msg[0].buf = &reg_buf;
276 msg[1].addr = client->addr;
277 msg[1].flags = I2C_M_RD;
278 msg[1].len = 1;
279 msg[1].buf = &data_buf;
280
281 reg_buf = reg;
282
283 err = i2c_transfer(client->adapter, msg, 2);
284 if (err < 0)
285 return err;
286 return data_buf;
287}
288
289/*
290 * Write a value to a register in an TCM825X sensor device.
291 * Returns zero if successful, or non-zero otherwise.
292 */
293static int tcm825x_write_reg(struct i2c_client *client, u8 reg, u8 val)
294{
295 int err;
296 struct i2c_msg msg[1];
297 unsigned char data[2];
298
299 if (!client->adapter)
300 return -ENODEV;
301
302 msg->addr = client->addr;
303 msg->flags = 0;
304 msg->len = 2;
305 msg->buf = data;
306 data[0] = reg;
307 data[1] = val;
308 err = i2c_transfer(client->adapter, msg, 1);
309 if (err >= 0)
310 return 0;
311 return err;
312}
313
314static int __tcm825x_write_reg_mask(struct i2c_client *client,
315 u8 reg, u8 val, u8 mask)
316{
317 int rc;
318
319 /* need to do read - modify - write */
320 rc = tcm825x_read_reg(client, reg);
321 if (rc < 0)
322 return rc;
323
324 rc &= (~mask); /* Clear the masked bits */
325 val &= mask; /* Enforce mask on value */
326 val |= rc;
327
328 /* write the new value to the register */
329 rc = tcm825x_write_reg(client, reg, val);
330 if (rc)
331 return rc;
332
333 return 0;
334}
335
336#define tcm825x_write_reg_mask(client, regmask, val) \
337 __tcm825x_write_reg_mask(client, TCM825X_ADDR((regmask)), val, \
338 TCM825X_MASK((regmask)))
339
340
341/*
342 * Initialize a list of TCM825X registers.
343 * The list of registers is terminated by the pair of values
344 * { TCM825X_REG_TERM, TCM825X_VAL_TERM }.
345 * Returns zero if successful, or non-zero otherwise.
346 */
347static int tcm825x_write_default_regs(struct i2c_client *client,
348 const struct tcm825x_reg *reglist)
349{
350 int err;
351 const struct tcm825x_reg *next = reglist;
352
353 while (!((next->reg == TCM825X_REG_TERM)
354 && (next->val == TCM825X_VAL_TERM))) {
355 err = tcm825x_write_reg(client, next->reg, next->val);
356 if (err) {
357 dev_err(&client->dev, "register writing failed\n");
358 return err;
359 }
360 next++;
361 }
362
363 return 0;
364}
365
366static struct vcontrol *find_vctrl(int id)
367{
368 int i;
369
370 if (id < V4L2_CID_BASE)
371 return NULL;
372
373 for (i = 0; i < ARRAY_SIZE(video_control); i++)
374 if (video_control[i].qc.id == id)
375 return &video_control[i];
376
377 return NULL;
378}
379
380/*
381 * Find the best match for a requested image capture size. The best match
382 * is chosen as the nearest match that has the same number or fewer pixels
383 * as the requested size, or the smallest image size if the requested size
384 * has fewer pixels than the smallest image.
385 */
386static enum image_size tcm825x_find_size(struct v4l2_int_device *s,
387 unsigned int width,
388 unsigned int height)
389{
390 enum image_size isize;
391 unsigned long pixels = width * height;
392 struct tcm825x_sensor *sensor = s->priv;
393
394 for (isize = subQCIF; isize < VGA; isize++) {
395 if (tcm825x_sizes[isize + 1].height
396 * tcm825x_sizes[isize + 1].width > pixels) {
397 dev_dbg(&sensor->i2c_client->dev, "size %d\n", isize);
398
399 return isize;
400 }
401 }
402
403 dev_dbg(&sensor->i2c_client->dev, "format default VGA\n");
404
405 return VGA;
406}
407
408/*
409 * Configure the TCM825X for current image size, pixel format, and
410 * frame period. fper is the frame period (in seconds) expressed as a
411 * fraction. Returns zero if successful, or non-zero otherwise. The
412 * actual frame period is returned in fper.
413 */
414static int tcm825x_configure(struct v4l2_int_device *s)
415{
416 struct tcm825x_sensor *sensor = s->priv;
417 struct v4l2_pix_format *pix = &sensor->pix;
418 enum image_size isize = tcm825x_find_size(s, pix->width, pix->height);
419 struct v4l2_fract *fper = &sensor->timeperframe;
420 enum pixel_format pfmt;
421 int err;
422 u32 tgt_fps;
423 u8 val;
424
425 /* common register initialization */
426 err = tcm825x_write_default_regs(
427 sensor->i2c_client, sensor->platform_data->default_regs());
428 if (err)
429 return err;
430
431 /* configure image size */
432 val = tcm825x_siz_reg[isize]->val;
433 dev_dbg(&sensor->i2c_client->dev,
434 "configuring image size %d\n", isize);
435 err = tcm825x_write_reg_mask(sensor->i2c_client,
436 tcm825x_siz_reg[isize]->reg, val);
437 if (err)
438 return err;
439
440 /* configure pixel format */
441 switch (pix->pixelformat) {
442 default:
443 case V4L2_PIX_FMT_RGB565:
444 pfmt = RGB565;
445 break;
446 case V4L2_PIX_FMT_UYVY:
447 pfmt = YUV422;
448 break;
449 }
450
451 dev_dbg(&sensor->i2c_client->dev,
452 "configuring pixel format %d\n", pfmt);
453 val = tcm825x_fmt_reg[pfmt]->val;
454
455 err = tcm825x_write_reg_mask(sensor->i2c_client,
456 tcm825x_fmt_reg[pfmt]->reg, val);
457 if (err)
458 return err;
459
460 /*
461 * For frame rate < 15, the FPS reg (addr 0x02, bit 7) must be
462 * set. Frame rate will be halved from the normal.
463 */
464 tgt_fps = fper->denominator / fper->numerator;
465 if (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) {
466 val = tcm825x_read_reg(sensor->i2c_client, 0x02);
467 val |= 0x80;
468 tcm825x_write_reg(sensor->i2c_client, 0x02, val);
469 }
470
471 return 0;
472}
473
474static int ioctl_queryctrl(struct v4l2_int_device *s,
475 struct v4l2_queryctrl *qc)
476{
477 struct vcontrol *control;
478
479 control = find_vctrl(qc->id);
480
481 if (control == NULL)
482 return -EINVAL;
483
484 *qc = control->qc;
485
486 return 0;
487}
488
489static int ioctl_g_ctrl(struct v4l2_int_device *s,
490 struct v4l2_control *vc)
491{
492 struct tcm825x_sensor *sensor = s->priv;
493 struct i2c_client *client = sensor->i2c_client;
494 int val, r;
495 struct vcontrol *lvc;
496
497 /* exposure time is special, spread across 2 registers */
498 if (vc->id == V4L2_CID_EXPOSURE) {
499 int val_lower, val_upper;
500
501 val_upper = tcm825x_read_reg(client,
502 TCM825X_ADDR(TCM825X_ESRSPD_U));
503 if (val_upper < 0)
504 return val_upper;
505 val_lower = tcm825x_read_reg(client,
506 TCM825X_ADDR(TCM825X_ESRSPD_L));
507 if (val_lower < 0)
508 return val_lower;
509
510 vc->value = ((val_upper & 0x1f) << 8) | (val_lower);
511 return 0;
512 }
513
514 lvc = find_vctrl(vc->id);
515 if (lvc == NULL)
516 return -EINVAL;
517
518 r = tcm825x_read_reg(client, TCM825X_ADDR(lvc->reg));
519 if (r < 0)
520 return r;
521 val = r & TCM825X_MASK(lvc->reg);
522 val >>= lvc->start_bit;
523
524 if (val < 0)
525 return val;
526
527 if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
528 val ^= sensor->platform_data->is_upside_down();
529
530 vc->value = val;
531 return 0;
532}
533
534static int ioctl_s_ctrl(struct v4l2_int_device *s,
535 struct v4l2_control *vc)
536{
537 struct tcm825x_sensor *sensor = s->priv;
538 struct i2c_client *client = sensor->i2c_client;
539 struct vcontrol *lvc;
540 int val = vc->value;
541
542 /* exposure time is special, spread across 2 registers */
543 if (vc->id == V4L2_CID_EXPOSURE) {
544 int val_lower, val_upper;
545
546 val_lower = val & TCM825X_MASK(TCM825X_ESRSPD_L);
547 val_upper = (val >> 8) & TCM825X_MASK(TCM825X_ESRSPD_U);
548
549 if (tcm825x_write_reg_mask(client,
550 TCM825X_ESRSPD_U, val_upper))
551 return -EIO;
552
553 if (tcm825x_write_reg_mask(client,
554 TCM825X_ESRSPD_L, val_lower))
555 return -EIO;
556
557 return 0;
558 }
559
560 lvc = find_vctrl(vc->id);
561 if (lvc == NULL)
562 return -EINVAL;
563
564 if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
565 val ^= sensor->platform_data->is_upside_down();
566
567 val = val << lvc->start_bit;
568 if (tcm825x_write_reg_mask(client, lvc->reg, val))
569 return -EIO;
570
571 return 0;
572}
573
574static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
575 struct v4l2_fmtdesc *fmt)
576{
577 int index = fmt->index;
578
579 switch (fmt->type) {
580 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
581 if (index >= TCM825X_NUM_CAPTURE_FORMATS)
582 return -EINVAL;
583 break;
584
585 default:
586 return -EINVAL;
587 }
588
589 fmt->flags = tcm825x_formats[index].flags;
590 strlcpy(fmt->description, tcm825x_formats[index].description,
591 sizeof(fmt->description));
592 fmt->pixelformat = tcm825x_formats[index].pixelformat;
593
594 return 0;
595}
596
597static int ioctl_try_fmt_cap(struct v4l2_int_device *s,
598 struct v4l2_format *f)
599{
600 struct tcm825x_sensor *sensor = s->priv;
601 enum image_size isize;
602 int ifmt;
603 struct v4l2_pix_format *pix = &f->fmt.pix;
604
605 isize = tcm825x_find_size(s, pix->width, pix->height);
606 dev_dbg(&sensor->i2c_client->dev, "isize = %d num_capture = %lu\n",
607 isize, (unsigned long)TCM825X_NUM_CAPTURE_FORMATS);
608
609 pix->width = tcm825x_sizes[isize].width;
610 pix->height = tcm825x_sizes[isize].height;
611
612 for (ifmt = 0; ifmt < TCM825X_NUM_CAPTURE_FORMATS; ifmt++)
613 if (pix->pixelformat == tcm825x_formats[ifmt].pixelformat)
614 break;
615
616 if (ifmt == TCM825X_NUM_CAPTURE_FORMATS)
617 ifmt = 0; /* Default = YUV 4:2:2 */
618
619 pix->pixelformat = tcm825x_formats[ifmt].pixelformat;
620 pix->field = V4L2_FIELD_NONE;
621 pix->bytesperline = pix->width * TCM825X_BYTES_PER_PIXEL;
622 pix->sizeimage = pix->bytesperline * pix->height;
623 pix->priv = 0;
624 dev_dbg(&sensor->i2c_client->dev, "format = 0x%08x\n",
625 pix->pixelformat);
626
627 switch (pix->pixelformat) {
628 case V4L2_PIX_FMT_UYVY:
629 default:
630 pix->colorspace = V4L2_COLORSPACE_JPEG;
631 break;
632 case V4L2_PIX_FMT_RGB565:
633 pix->colorspace = V4L2_COLORSPACE_SRGB;
634 break;
635 }
636
637 return 0;
638}
639
640static int ioctl_s_fmt_cap(struct v4l2_int_device *s,
641 struct v4l2_format *f)
642{
643 struct tcm825x_sensor *sensor = s->priv;
644 struct v4l2_pix_format *pix = &f->fmt.pix;
645 int rval;
646
647 rval = ioctl_try_fmt_cap(s, f);
648 if (rval)
649 return rval;
650
651 rval = tcm825x_configure(s);
652
653 sensor->pix = *pix;
654
655 return rval;
656}
657
658static int ioctl_g_fmt_cap(struct v4l2_int_device *s,
659 struct v4l2_format *f)
660{
661 struct tcm825x_sensor *sensor = s->priv;
662
663 f->fmt.pix = sensor->pix;
664
665 return 0;
666}
667
668static int ioctl_g_parm(struct v4l2_int_device *s,
669 struct v4l2_streamparm *a)
670{
671 struct tcm825x_sensor *sensor = s->priv;
672 struct v4l2_captureparm *cparm = &a->parm.capture;
673
674 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
675 return -EINVAL;
676
677 memset(a, 0, sizeof(*a));
678 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
679
680 cparm->capability = V4L2_CAP_TIMEPERFRAME;
681 cparm->timeperframe = sensor->timeperframe;
682
683 return 0;
684}
685
686static int ioctl_s_parm(struct v4l2_int_device *s,
687 struct v4l2_streamparm *a)
688{
689 struct tcm825x_sensor *sensor = s->priv;
690 struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
691 u32 tgt_fps; /* target frames per secound */
692 int rval;
693
694 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
695 return -EINVAL;
696
697 if ((timeperframe->numerator == 0)
698 || (timeperframe->denominator == 0)) {
699 timeperframe->denominator = DEFAULT_FPS;
700 timeperframe->numerator = 1;
701 }
702
703 tgt_fps = timeperframe->denominator / timeperframe->numerator;
704
705 if (tgt_fps > MAX_FPS) {
706 timeperframe->denominator = MAX_FPS;
707 timeperframe->numerator = 1;
708 } else if (tgt_fps < MIN_FPS) {
709 timeperframe->denominator = MIN_FPS;
710 timeperframe->numerator = 1;
711 }
712
713 sensor->timeperframe = *timeperframe;
714
715 rval = tcm825x_configure(s);
716
717 return rval;
718}
719
720static int ioctl_s_power(struct v4l2_int_device *s, int on)
721{
722 struct tcm825x_sensor *sensor = s->priv;
723
724 return sensor->platform_data->power_set(on);
725}
726
727/*
728 * Given the image capture format in pix, the nominal frame period in
729 * timeperframe, calculate the required xclk frequency.
730 *
731 * TCM825X input frequency characteristics are:
732 * Minimum 11.9 MHz, Typical 24.57 MHz and maximum 25/27 MHz
733 */
734
735static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
736{
737 struct tcm825x_sensor *sensor = s->priv;
738 struct v4l2_fract *timeperframe = &sensor->timeperframe;
739 u32 tgt_xclk; /* target xclk */
740 u32 tgt_fps; /* target frames per secound */
741 int rval;
742
743 rval = sensor->platform_data->ifparm(p);
744 if (rval)
745 return rval;
746
747 tgt_fps = timeperframe->denominator / timeperframe->numerator;
748
749 tgt_xclk = (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) ?
750 (2457 * tgt_fps) / MAX_HALF_FPS :
751 (2457 * tgt_fps) / MAX_FPS;
752 tgt_xclk *= 10000;
753
754 tgt_xclk = min(tgt_xclk, (u32)TCM825X_XCLK_MAX);
755 tgt_xclk = max(tgt_xclk, (u32)TCM825X_XCLK_MIN);
756
757 p->u.bt656.clock_curr = tgt_xclk;
758
759 return 0;
760}
761
762static int ioctl_g_needs_reset(struct v4l2_int_device *s, void *buf)
763{
764 struct tcm825x_sensor *sensor = s->priv;
765
766 return sensor->platform_data->needs_reset(s, buf, &sensor->pix);
767}
768
769static int ioctl_reset(struct v4l2_int_device *s)
770{
771 return -EBUSY;
772}
773
774static int ioctl_init(struct v4l2_int_device *s)
775{
776 return tcm825x_configure(s);
777}
778
779static int ioctl_dev_exit(struct v4l2_int_device *s)
780{
781 return 0;
782}
783
784static int ioctl_dev_init(struct v4l2_int_device *s)
785{
786 struct tcm825x_sensor *sensor = s->priv;
787 int r;
788
789 r = tcm825x_read_reg(sensor->i2c_client, 0x01);
790 if (r < 0)
791 return r;
792 if (r == 0) {
793 dev_err(&sensor->i2c_client->dev, "device not detected\n");
794 return -EIO;
795 }
796 return 0;
797}
798
799static struct v4l2_int_ioctl_desc tcm825x_ioctl_desc[] = {
800 { vidioc_int_dev_init_num,
801 (v4l2_int_ioctl_func *)ioctl_dev_init },
802 { vidioc_int_dev_exit_num,
803 (v4l2_int_ioctl_func *)ioctl_dev_exit },
804 { vidioc_int_s_power_num,
805 (v4l2_int_ioctl_func *)ioctl_s_power },
806 { vidioc_int_g_ifparm_num,
807 (v4l2_int_ioctl_func *)ioctl_g_ifparm },
808 { vidioc_int_g_needs_reset_num,
809 (v4l2_int_ioctl_func *)ioctl_g_needs_reset },
810 { vidioc_int_reset_num,
811 (v4l2_int_ioctl_func *)ioctl_reset },
812 { vidioc_int_init_num,
813 (v4l2_int_ioctl_func *)ioctl_init },
814 { vidioc_int_enum_fmt_cap_num,
815 (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
816 { vidioc_int_try_fmt_cap_num,
817 (v4l2_int_ioctl_func *)ioctl_try_fmt_cap },
818 { vidioc_int_g_fmt_cap_num,
819 (v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
820 { vidioc_int_s_fmt_cap_num,
821 (v4l2_int_ioctl_func *)ioctl_s_fmt_cap },
822 { vidioc_int_g_parm_num,
823 (v4l2_int_ioctl_func *)ioctl_g_parm },
824 { vidioc_int_s_parm_num,
825 (v4l2_int_ioctl_func *)ioctl_s_parm },
826 { vidioc_int_queryctrl_num,
827 (v4l2_int_ioctl_func *)ioctl_queryctrl },
828 { vidioc_int_g_ctrl_num,
829 (v4l2_int_ioctl_func *)ioctl_g_ctrl },
830 { vidioc_int_s_ctrl_num,
831 (v4l2_int_ioctl_func *)ioctl_s_ctrl },
832};
833
834static struct v4l2_int_slave tcm825x_slave = {
835 .ioctls = tcm825x_ioctl_desc,
836 .num_ioctls = ARRAY_SIZE(tcm825x_ioctl_desc),
837};
838
839static struct tcm825x_sensor tcm825x;
840
841static struct v4l2_int_device tcm825x_int_device = {
842 .module = THIS_MODULE,
843 .name = TCM825X_NAME,
844 .priv = &tcm825x,
845 .type = v4l2_int_type_slave,
846 .u = {
847 .slave = &tcm825x_slave,
848 },
849};
850
851static int tcm825x_probe(struct i2c_client *client,
852 const struct i2c_device_id *did)
853{
854 struct tcm825x_sensor *sensor = &tcm825x;
855
856 if (i2c_get_clientdata(client))
857 return -EBUSY;
858
859 sensor->platform_data = client->dev.platform_data;
860
861 if (sensor->platform_data == NULL
862 || !sensor->platform_data->is_okay())
863 return -ENODEV;
864
865 sensor->v4l2_int_device = &tcm825x_int_device;
866
867 sensor->i2c_client = client;
868 i2c_set_clientdata(client, sensor);
869
870 /* Make the default capture format QVGA RGB565 */
871 sensor->pix.width = tcm825x_sizes[QVGA].width;
872 sensor->pix.height = tcm825x_sizes[QVGA].height;
873 sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565;
874
875 return v4l2_int_device_register(sensor->v4l2_int_device);
876}
877
878static int tcm825x_remove(struct i2c_client *client)
879{
880 struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
881
882 if (!client->adapter)
883 return -ENODEV; /* our client isn't attached */
884
885 v4l2_int_device_unregister(sensor->v4l2_int_device);
886
887 return 0;
888}
889
890static const struct i2c_device_id tcm825x_id[] = {
891 { "tcm825x", 0 },
892 { }
893};
894MODULE_DEVICE_TABLE(i2c, tcm825x_id);
895
896static struct i2c_driver tcm825x_i2c_driver = {
897 .driver = {
898 .name = TCM825X_NAME,
899 },
900 .probe = tcm825x_probe,
901 .remove = tcm825x_remove,
902 .id_table = tcm825x_id,
903};
904
905static struct tcm825x_sensor tcm825x = {
906 .timeperframe = {
907 .numerator = 1,
908 .denominator = DEFAULT_FPS,
909 },
910};
911
912static int __init tcm825x_init(void)
913{
914 int rval;
915
916 rval = i2c_add_driver(&tcm825x_i2c_driver);
917 if (rval)
918 pr_info("%s: failed registering " TCM825X_NAME "\n",
919 __func__);
920
921 return rval;
922}
923
924static void __exit tcm825x_exit(void)
925{
926 i2c_del_driver(&tcm825x_i2c_driver);
927}
928
929/*
930 * FIXME: Menelaus isn't ready (?) at module_init stage, so use
931 * late_initcall for now.
932 */
933late_initcall(tcm825x_init);
934module_exit(tcm825x_exit);
935
936MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
937MODULE_DESCRIPTION("TCM825x camera sensor driver");
938MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/omap24xx/tcm825x.h b/drivers/staging/media/omap24xx/tcm825x.h
deleted file mode 100644
index 8a29636d1ad4..000000000000
--- a/drivers/staging/media/omap24xx/tcm825x.h
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * drivers/media/i2c/tcm825x.h
3 *
4 * Register definitions for the TCM825X CameraChip.
5 *
6 * Author: David Cohen (david.cohen@indt.org.br)
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 *
12 * This file was based on ov9640.h from MontaVista
13 */
14
15#ifndef TCM825X_H
16#define TCM825X_H
17
18#include <linux/videodev2.h>
19
20#include "v4l2-int-device.h"
21
22#define TCM825X_NAME "tcm825x"
23
24#define TCM825X_MASK(x) (x & 0x00ff)
25#define TCM825X_ADDR(x) ((x & 0xff00) >> 8)
26
27/* The TCM825X I2C sensor chip has a fixed slave address of 0x3d. */
28#define TCM825X_I2C_ADDR 0x3d
29
30/*
31 * define register offsets for the TCM825X sensor chip
32 * OFFSET(8 bits) + MASK(8 bits)
33 * MASK bit 4 and 3 are used when the register uses more than one address
34 */
35#define TCM825X_FPS 0x0280
36#define TCM825X_ACF 0x0240
37#define TCM825X_DOUTBUF 0x020C
38#define TCM825X_DCLKP 0x0202
39#define TCM825X_ACFDET 0x0201
40#define TCM825X_DOUTSW 0x0380
41#define TCM825X_DATAHZ 0x0340
42#define TCM825X_PICSIZ 0x033c
43#define TCM825X_PICFMT 0x0302
44#define TCM825X_V_INV 0x0480
45#define TCM825X_H_INV 0x0440
46#define TCM825X_ESRLSW 0x0430
47#define TCM825X_V_LENGTH 0x040F
48#define TCM825X_ALCSW 0x0580
49#define TCM825X_ESRLIM 0x0560
50#define TCM825X_ESRSPD_U 0x051F
51#define TCM825X_ESRSPD_L 0x06FF
52#define TCM825X_AG 0x07FF
53#define TCM825X_ESRSPD2 0x06FF
54#define TCM825X_ALCMODE 0x0830
55#define TCM825X_ALCH 0x080F
56#define TCM825X_ALCL 0x09FF
57#define TCM825X_AWBSW 0x0A80
58#define TCM825X_MRG 0x0BFF
59#define TCM825X_MBG 0x0CFF
60#define TCM825X_GAMSW 0x0D80
61#define TCM825X_HDTG 0x0EFF
62#define TCM825X_VDTG 0x0FFF
63#define TCM825X_HDTCORE 0x10F0
64#define TCM825X_VDTCORE 0x100F
65#define TCM825X_CONT 0x11FF
66#define TCM825X_BRIGHT 0x12FF
67#define TCM825X_VHUE 0x137F
68#define TCM825X_UHUE 0x147F
69#define TCM825X_VGAIN 0x153F
70#define TCM825X_UGAIN 0x163F
71#define TCM825X_UVCORE 0x170F
72#define TCM825X_SATU 0x187F
73#define TCM825X_MHMODE 0x1980
74#define TCM825X_MHLPFSEL 0x1940
75#define TCM825X_YMODE 0x1930
76#define TCM825X_MIXHG 0x1907
77#define TCM825X_LENS 0x1A3F
78#define TCM825X_AGLIM 0x1BE0
79#define TCM825X_LENSRPOL 0x1B10
80#define TCM825X_LENSRGAIN 0x1B0F
81#define TCM825X_ES100S 0x1CFF
82#define TCM825X_ES120S 0x1DFF
83#define TCM825X_DMASK 0x1EC0
84#define TCM825X_CODESW 0x1E20
85#define TCM825X_CODESEL 0x1E10
86#define TCM825X_TESPIC 0x1E04
87#define TCM825X_PICSEL 0x1E03
88#define TCM825X_HNUM 0x20FF
89#define TCM825X_VOUTPH 0x287F
90#define TCM825X_ESROUT 0x327F
91#define TCM825X_ESROUT2 0x33FF
92#define TCM825X_AGOUT 0x34FF
93#define TCM825X_DGOUT 0x353F
94#define TCM825X_AGSLOW1 0x39C0
95#define TCM825X_FLLSMODE 0x3930
96#define TCM825X_FLLSLIM 0x390F
97#define TCM825X_DETSEL 0x3AF0
98#define TCM825X_ACDETNC 0x3A0F
99#define TCM825X_AGSLOW2 0x3BC0
100#define TCM825X_DG 0x3B3F
101#define TCM825X_REJHLEV 0x3CFF
102#define TCM825X_ALCLOCK 0x3D80
103#define TCM825X_FPSLNKSW 0x3D40
104#define TCM825X_ALCSPD 0x3D30
105#define TCM825X_REJH 0x3D03
106#define TCM825X_SHESRSW 0x3E80
107#define TCM825X_ESLIMSEL 0x3E40
108#define TCM825X_SHESRSPD 0x3E30
109#define TCM825X_ELSTEP 0x3E0C
110#define TCM825X_ELSTART 0x3E03
111#define TCM825X_AGMIN 0x3FFF
112#define TCM825X_PREGRG 0x423F
113#define TCM825X_PREGBG 0x433F
114#define TCM825X_PRERG 0x443F
115#define TCM825X_PREBG 0x453F
116#define TCM825X_MSKBR 0x477F
117#define TCM825X_MSKGR 0x487F
118#define TCM825X_MSKRB 0x497F
119#define TCM825X_MSKGB 0x4A7F
120#define TCM825X_MSKRG 0x4B7F
121#define TCM825X_MSKBG 0x4C7F
122#define TCM825X_HDTCSW 0x4D80
123#define TCM825X_VDTCSW 0x4D40
124#define TCM825X_DTCYL 0x4D3F
125#define TCM825X_HDTPSW 0x4E80
126#define TCM825X_VDTPSW 0x4E40
127#define TCM825X_DTCGAIN 0x4E3F
128#define TCM825X_DTLLIMSW 0x4F10
129#define TCM825X_DTLYLIM 0x4F0F
130#define TCM825X_YLCUTLMSK 0x5080
131#define TCM825X_YLCUTL 0x503F
132#define TCM825X_YLCUTHMSK 0x5180
133#define TCM825X_YLCUTH 0x513F
134#define TCM825X_UVSKNC 0x527F
135#define TCM825X_UVLJ 0x537F
136#define TCM825X_WBGMIN 0x54FF
137#define TCM825X_WBGMAX 0x55FF
138#define TCM825X_WBSPDUP 0x5603
139#define TCM825X_ALLAREA 0x5820
140#define TCM825X_WBLOCK 0x5810
141#define TCM825X_WB2SP 0x580F
142#define TCM825X_KIZUSW 0x5920
143#define TCM825X_PBRSW 0x5910
144#define TCM825X_ABCSW 0x5903
145#define TCM825X_PBDLV 0x5AFF
146#define TCM825X_PBC1LV 0x5BFF
147
148#define TCM825X_NUM_REGS (TCM825X_ADDR(TCM825X_PBC1LV) + 1)
149
150#define TCM825X_BYTES_PER_PIXEL 2
151
152#define TCM825X_REG_TERM 0xff /* terminating list entry for reg */
153#define TCM825X_VAL_TERM 0xff /* terminating list entry for val */
154
155/* define a structure for tcm825x register initialization values */
156struct tcm825x_reg {
157 u8 val;
158 u16 reg;
159};
160
161enum image_size { subQCIF = 0, QQVGA, QCIF, QVGA, CIF, VGA };
162enum pixel_format { YUV422 = 0, RGB565 };
163#define NUM_IMAGE_SIZES 6
164#define NUM_PIXEL_FORMATS 2
165
166#define TCM825X_XCLK_MIN 11900000
167#define TCM825X_XCLK_MAX 25000000
168
169struct capture_size {
170 unsigned long width;
171 unsigned long height;
172};
173
174struct tcm825x_platform_data {
175 /* Is the sensor usable? Doesn't yet mean it's there, but you
176 * can try! */
177 int (*is_okay)(void);
178 /* Set power state, zero is off, non-zero is on. */
179 int (*power_set)(int power);
180 /* Default registers written after power-on or reset. */
181 const struct tcm825x_reg * (*default_regs)(void);
182 int (*needs_reset)(struct v4l2_int_device *s, void *buf,
183 struct v4l2_pix_format *fmt);
184 int (*ifparm)(struct v4l2_ifparm *p);
185 int (*is_upside_down)(void);
186};
187
188/* Array of image sizes supported by TCM825X. These must be ordered from
189 * smallest image size to largest.
190 */
191static const struct capture_size tcm825x_sizes[] = {
192 { 128, 96 }, /* subQCIF */
193 { 160, 120 }, /* QQVGA */
194 { 176, 144 }, /* QCIF */
195 { 320, 240 }, /* QVGA */
196 { 352, 288 }, /* CIF */
197 { 640, 480 }, /* VGA */
198};
199
200#endif /* ifndef TCM825X_H */
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.c b/drivers/staging/media/omap24xx/v4l2-int-device.c
deleted file mode 100644
index 427a89033a1d..000000000000
--- a/drivers/staging/media/omap24xx/v4l2-int-device.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * drivers/media/video/v4l2-int-device.c
3 *
4 * V4L2 internal ioctl interface.
5 *
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/sort.h>
28#include <linux/string.h>
29#include <linux/module.h>
30
31#include "v4l2-int-device.h"
32
33static DEFINE_MUTEX(mutex);
34static LIST_HEAD(int_list);
35
36void v4l2_int_device_try_attach_all(void)
37{
38 struct v4l2_int_device *m, *s;
39
40 list_for_each_entry(m, &int_list, head) {
41 if (m->type != v4l2_int_type_master)
42 continue;
43
44 list_for_each_entry(s, &int_list, head) {
45 if (s->type != v4l2_int_type_slave)
46 continue;
47
48 /* Slave is connected? */
49 if (s->u.slave->master)
50 continue;
51
52 /* Slave wants to attach to master? */
53 if (s->u.slave->attach_to[0] != 0
54 && strncmp(m->name, s->u.slave->attach_to,
55 V4L2NAMESIZE))
56 continue;
57
58 if (!try_module_get(m->module))
59 continue;
60
61 s->u.slave->master = m;
62 if (m->u.master->attach(s)) {
63 s->u.slave->master = NULL;
64 module_put(m->module);
65 continue;
66 }
67 }
68 }
69}
70EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
71
72static int ioctl_sort_cmp(const void *a, const void *b)
73{
74 const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
75
76 if (d1->num > d2->num)
77 return 1;
78
79 if (d1->num < d2->num)
80 return -1;
81
82 return 0;
83}
84
85int v4l2_int_device_register(struct v4l2_int_device *d)
86{
87 if (d->type == v4l2_int_type_slave)
88 sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
89 sizeof(struct v4l2_int_ioctl_desc),
90 &ioctl_sort_cmp, NULL);
91 mutex_lock(&mutex);
92 list_add(&d->head, &int_list);
93 v4l2_int_device_try_attach_all();
94 mutex_unlock(&mutex);
95
96 return 0;
97}
98EXPORT_SYMBOL_GPL(v4l2_int_device_register);
99
100void v4l2_int_device_unregister(struct v4l2_int_device *d)
101{
102 mutex_lock(&mutex);
103 list_del(&d->head);
104 if (d->type == v4l2_int_type_slave
105 && d->u.slave->master != NULL) {
106 d->u.slave->master->u.master->detach(d);
107 module_put(d->u.slave->master->module);
108 d->u.slave->master = NULL;
109 }
110 mutex_unlock(&mutex);
111}
112EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
113
114/* Adapted from search_extable in extable.c. */
115static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
116 v4l2_int_ioctl_func *no_such_ioctl)
117{
118 const struct v4l2_int_ioctl_desc *first = slave->ioctls;
119 const struct v4l2_int_ioctl_desc *last =
120 first + slave->num_ioctls - 1;
121
122 while (first <= last) {
123 const struct v4l2_int_ioctl_desc *mid;
124
125 mid = (last - first) / 2 + first;
126
127 if (mid->num < cmd)
128 first = mid + 1;
129 else if (mid->num > cmd)
130 last = mid - 1;
131 else
132 return mid->func;
133 }
134
135 return no_such_ioctl;
136}
137
138static int no_such_ioctl_0(struct v4l2_int_device *d)
139{
140 return -ENOIOCTLCMD;
141}
142
143int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
144{
145 return ((v4l2_int_ioctl_func_0 *)
146 find_ioctl(d->u.slave, cmd,
147 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
148}
149EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
150
151static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
152{
153 return -ENOIOCTLCMD;
154}
155
156int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
157{
158 return ((v4l2_int_ioctl_func_1 *)
159 find_ioctl(d->u.slave, cmd,
160 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
161}
162EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
163
164MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/omap24xx/v4l2-int-device.h b/drivers/staging/media/omap24xx/v4l2-int-device.h
deleted file mode 100644
index 0286c95814ff..000000000000
--- a/drivers/staging/media/omap24xx/v4l2-int-device.h
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * include/media/v4l2-int-device.h
3 *
4 * V4L2 internal ioctl interface.
5 *
6 * Copyright (C) 2007 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#ifndef V4L2_INT_DEVICE_H
26#define V4L2_INT_DEVICE_H
27
28#include <media/v4l2-common.h>
29
30#define V4L2NAMESIZE 32
31
32/*
33 *
34 * The internal V4L2 device interface core.
35 *
36 */
37
38enum v4l2_int_type {
39 v4l2_int_type_master = 1,
40 v4l2_int_type_slave
41};
42
43struct module;
44
45struct v4l2_int_device;
46
47struct v4l2_int_master {
48 int (*attach)(struct v4l2_int_device *slave);
49 void (*detach)(struct v4l2_int_device *slave);
50};
51
52typedef int (v4l2_int_ioctl_func)(struct v4l2_int_device *);
53typedef int (v4l2_int_ioctl_func_0)(struct v4l2_int_device *);
54typedef int (v4l2_int_ioctl_func_1)(struct v4l2_int_device *, void *);
55
56struct v4l2_int_ioctl_desc {
57 int num;
58 v4l2_int_ioctl_func *func;
59};
60
61struct v4l2_int_slave {
62 /* Don't touch master. */
63 struct v4l2_int_device *master;
64
65 char attach_to[V4L2NAMESIZE];
66
67 int num_ioctls;
68 struct v4l2_int_ioctl_desc *ioctls;
69};
70
71struct v4l2_int_device {
72 /* Don't touch head. */
73 struct list_head head;
74
75 struct module *module;
76
77 char name[V4L2NAMESIZE];
78
79 enum v4l2_int_type type;
80 union {
81 struct v4l2_int_master *master;
82 struct v4l2_int_slave *slave;
83 } u;
84
85 void *priv;
86};
87
88void v4l2_int_device_try_attach_all(void);
89
90int v4l2_int_device_register(struct v4l2_int_device *d);
91void v4l2_int_device_unregister(struct v4l2_int_device *d);
92
93int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd);
94int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg);
95
96/*
97 *
98 * Types and definitions for IOCTL commands.
99 *
100 */
101
102enum v4l2_power {
103 V4L2_POWER_OFF = 0,
104 V4L2_POWER_ON,
105 V4L2_POWER_STANDBY,
106};
107
108/* Slave interface type. */
109enum v4l2_if_type {
110 /*
111 * Parallel 8-, 10- or 12-bit interface, used by for example
112 * on certain image sensors.
113 */
114 V4L2_IF_TYPE_BT656,
115};
116
117enum v4l2_if_type_bt656_mode {
118 /*
119 * Modes without Bt synchronisation codes. Separate
120 * synchronisation signal lines are used.
121 */
122 V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT,
123 V4L2_IF_TYPE_BT656_MODE_NOBT_10BIT,
124 V4L2_IF_TYPE_BT656_MODE_NOBT_12BIT,
125 /*
126 * Use Bt synchronisation codes. The vertical and horizontal
127 * synchronisation is done based on synchronisation codes.
128 */
129 V4L2_IF_TYPE_BT656_MODE_BT_8BIT,
130 V4L2_IF_TYPE_BT656_MODE_BT_10BIT,
131};
132
133struct v4l2_if_type_bt656 {
134 /*
135 * 0: Frame begins when vsync is high.
136 * 1: Frame begins when vsync changes from low to high.
137 */
138 unsigned frame_start_on_rising_vs:1;
139 /* Use Bt synchronisation codes for sync correction. */
140 unsigned bt_sync_correct:1;
141 /* Swap every two adjacent image data elements. */
142 unsigned swap:1;
143 /* Inverted latch clock polarity from slave. */
144 unsigned latch_clk_inv:1;
145 /* Hs polarity. 0 is active high, 1 active low. */
146 unsigned nobt_hs_inv:1;
147 /* Vs polarity. 0 is active high, 1 active low. */
148 unsigned nobt_vs_inv:1;
149 enum v4l2_if_type_bt656_mode mode;
150 /* Minimum accepted bus clock for slave (in Hz). */
151 u32 clock_min;
152 /* Maximum accepted bus clock for slave. */
153 u32 clock_max;
154 /*
155 * Current wish of the slave. May only change in response to
156 * ioctls that affect image capture.
157 */
158 u32 clock_curr;
159};
160
161struct v4l2_ifparm {
162 enum v4l2_if_type if_type;
163 union {
164 struct v4l2_if_type_bt656 bt656;
165 } u;
166};
167
168/* IOCTL command numbers. */
169enum v4l2_int_ioctl_num {
170 /*
171 *
172 * "Proper" V4L ioctls, as in struct video_device.
173 *
174 */
175 vidioc_int_enum_fmt_cap_num = 1,
176 vidioc_int_g_fmt_cap_num,
177 vidioc_int_s_fmt_cap_num,
178 vidioc_int_try_fmt_cap_num,
179 vidioc_int_queryctrl_num,
180 vidioc_int_g_ctrl_num,
181 vidioc_int_s_ctrl_num,
182 vidioc_int_cropcap_num,
183 vidioc_int_g_crop_num,
184 vidioc_int_s_crop_num,
185 vidioc_int_g_parm_num,
186 vidioc_int_s_parm_num,
187 vidioc_int_querystd_num,
188 vidioc_int_s_std_num,
189 vidioc_int_s_video_routing_num,
190
191 /*
192 *
193 * Strictly internal ioctls.
194 *
195 */
196 /* Initialise the device when slave attaches to the master. */
197 vidioc_int_dev_init_num = 1000,
198 /* Delinitialise the device at slave detach. */
199 vidioc_int_dev_exit_num,
200 /* Set device power state. */
201 vidioc_int_s_power_num,
202 /*
203 * Get slave private data, e.g. platform-specific slave
204 * configuration used by the master.
205 */
206 vidioc_int_g_priv_num,
207 /* Get slave interface parameters. */
208 vidioc_int_g_ifparm_num,
209 /* Does the slave need to be reset after VIDIOC_DQBUF? */
210 vidioc_int_g_needs_reset_num,
211 vidioc_int_enum_framesizes_num,
212 vidioc_int_enum_frameintervals_num,
213
214 /*
215 *
216 * VIDIOC_INT_* ioctls.
217 *
218 */
219 /* VIDIOC_INT_RESET */
220 vidioc_int_reset_num,
221 /* VIDIOC_INT_INIT */
222 vidioc_int_init_num,
223
224 /*
225 *
226 * Start of private ioctls.
227 *
228 */
229 vidioc_int_priv_start_num = 2000,
230};
231
232/*
233 *
234 * IOCTL wrapper functions for better type checking.
235 *
236 */
237
238#define V4L2_INT_WRAPPER_0(name) \
239 static inline int vidioc_int_##name(struct v4l2_int_device *d) \
240 { \
241 return v4l2_int_ioctl_0(d, vidioc_int_##name##_num); \
242 } \
243 \
244 static inline struct v4l2_int_ioctl_desc \
245 vidioc_int_##name##_cb(int (*func) \
246 (struct v4l2_int_device *)) \
247 { \
248 struct v4l2_int_ioctl_desc desc; \
249 \
250 desc.num = vidioc_int_##name##_num; \
251 desc.func = (v4l2_int_ioctl_func *)func; \
252 \
253 return desc; \
254 }
255
256#define V4L2_INT_WRAPPER_1(name, arg_type, asterisk) \
257 static inline int vidioc_int_##name(struct v4l2_int_device *d, \
258 arg_type asterisk arg) \
259 { \
260 return v4l2_int_ioctl_1(d, vidioc_int_##name##_num, \
261 (void *)(unsigned long)arg); \
262 } \
263 \
264 static inline struct v4l2_int_ioctl_desc \
265 vidioc_int_##name##_cb(int (*func) \
266 (struct v4l2_int_device *, \
267 arg_type asterisk)) \
268 { \
269 struct v4l2_int_ioctl_desc desc; \
270 \
271 desc.num = vidioc_int_##name##_num; \
272 desc.func = (v4l2_int_ioctl_func *)func; \
273 \
274 return desc; \
275 }
276
277V4L2_INT_WRAPPER_1(enum_fmt_cap, struct v4l2_fmtdesc, *);
278V4L2_INT_WRAPPER_1(g_fmt_cap, struct v4l2_format, *);
279V4L2_INT_WRAPPER_1(s_fmt_cap, struct v4l2_format, *);
280V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
281V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
282V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
283V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
284V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
285V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
286V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
287V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
288V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
289V4L2_INT_WRAPPER_1(querystd, v4l2_std_id, *);
290V4L2_INT_WRAPPER_1(s_std, v4l2_std_id, *);
291V4L2_INT_WRAPPER_1(s_video_routing, struct v4l2_routing, *);
292
293V4L2_INT_WRAPPER_0(dev_init);
294V4L2_INT_WRAPPER_0(dev_exit);
295V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
296V4L2_INT_WRAPPER_1(g_priv, void, *);
297V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
298V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
299V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
300V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
301
302V4L2_INT_WRAPPER_0(reset);
303V4L2_INT_WRAPPER_0(init);
304
305#endif
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index 92c2d5b743c7..7dbf68cd3566 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -93,20 +93,20 @@ static void csi2_recv_config(struct iss_csi2_device *csi2,
93} 93}
94 94
95static const unsigned int csi2_input_fmts[] = { 95static const unsigned int csi2_input_fmts[] = {
96 V4L2_MBUS_FMT_SGRBG10_1X10, 96 MEDIA_BUS_FMT_SGRBG10_1X10,
97 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 97 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
98 V4L2_MBUS_FMT_SRGGB10_1X10, 98 MEDIA_BUS_FMT_SRGGB10_1X10,
99 V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 99 MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
100 V4L2_MBUS_FMT_SBGGR10_1X10, 100 MEDIA_BUS_FMT_SBGGR10_1X10,
101 V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 101 MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
102 V4L2_MBUS_FMT_SGBRG10_1X10, 102 MEDIA_BUS_FMT_SGBRG10_1X10,
103 V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 103 MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
104 V4L2_MBUS_FMT_SBGGR8_1X8, 104 MEDIA_BUS_FMT_SBGGR8_1X8,
105 V4L2_MBUS_FMT_SGBRG8_1X8, 105 MEDIA_BUS_FMT_SGBRG8_1X8,
106 V4L2_MBUS_FMT_SGRBG8_1X8, 106 MEDIA_BUS_FMT_SGRBG8_1X8,
107 V4L2_MBUS_FMT_SRGGB8_1X8, 107 MEDIA_BUS_FMT_SRGGB8_1X8,
108 V4L2_MBUS_FMT_UYVY8_1X16, 108 MEDIA_BUS_FMT_UYVY8_1X16,
109 V4L2_MBUS_FMT_YUYV8_1X16, 109 MEDIA_BUS_FMT_YUYV8_1X16,
110}; 110};
111 111
112/* To set the format on the CSI2 requires a mapping function that takes 112/* To set the format on the CSI2 requires a mapping function that takes
@@ -201,26 +201,26 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
201 int fmtidx, destidx; 201 int fmtidx, destidx;
202 202
203 switch (fmt->code) { 203 switch (fmt->code) {
204 case V4L2_MBUS_FMT_SGRBG10_1X10: 204 case MEDIA_BUS_FMT_SGRBG10_1X10:
205 case V4L2_MBUS_FMT_SRGGB10_1X10: 205 case MEDIA_BUS_FMT_SRGGB10_1X10:
206 case V4L2_MBUS_FMT_SBGGR10_1X10: 206 case MEDIA_BUS_FMT_SBGGR10_1X10:
207 case V4L2_MBUS_FMT_SGBRG10_1X10: 207 case MEDIA_BUS_FMT_SGBRG10_1X10:
208 fmtidx = 0; 208 fmtidx = 0;
209 break; 209 break;
210 case V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8: 210 case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
211 case V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8: 211 case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
212 case V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8: 212 case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
213 case V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8: 213 case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
214 fmtidx = 1; 214 fmtidx = 1;
215 break; 215 break;
216 case V4L2_MBUS_FMT_SBGGR8_1X8: 216 case MEDIA_BUS_FMT_SBGGR8_1X8:
217 case V4L2_MBUS_FMT_SGBRG8_1X8: 217 case MEDIA_BUS_FMT_SGBRG8_1X8:
218 case V4L2_MBUS_FMT_SGRBG8_1X8: 218 case MEDIA_BUS_FMT_SGRBG8_1X8:
219 case V4L2_MBUS_FMT_SRGGB8_1X8: 219 case MEDIA_BUS_FMT_SRGGB8_1X8:
220 fmtidx = 2; 220 fmtidx = 2;
221 break; 221 break;
222 case V4L2_MBUS_FMT_UYVY8_1X16: 222 case MEDIA_BUS_FMT_UYVY8_1X16:
223 case V4L2_MBUS_FMT_YUYV8_1X16: 223 case MEDIA_BUS_FMT_YUYV8_1X16:
224 fmtidx = 3; 224 fmtidx = 3;
225 break; 225 break;
226 default: 226 default:
@@ -817,7 +817,7 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
817 unsigned int pad, struct v4l2_mbus_framefmt *fmt, 817 unsigned int pad, struct v4l2_mbus_framefmt *fmt,
818 enum v4l2_subdev_format_whence which) 818 enum v4l2_subdev_format_whence which)
819{ 819{
820 enum v4l2_mbus_pixelcode pixelcode; 820 u32 pixelcode;
821 struct v4l2_mbus_framefmt *format; 821 struct v4l2_mbus_framefmt *format;
822 const struct iss_format_info *info; 822 const struct iss_format_info *info;
823 unsigned int i; 823 unsigned int i;
@@ -832,7 +832,7 @@ csi2_try_format(struct iss_csi2_device *csi2, struct v4l2_subdev_fh *fh,
832 832
833 /* If not found, use SGRBG10 as default */ 833 /* If not found, use SGRBG10 as default */
834 if (i >= ARRAY_SIZE(csi2_input_fmts)) 834 if (i >= ARRAY_SIZE(csi2_input_fmts))
835 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 835 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
836 836
837 fmt->width = clamp_t(u32, fmt->width, 1, 8191); 837 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
838 fmt->height = clamp_t(u32, fmt->height, 1, 8191); 838 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1020,7 +1020,7 @@ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1020 memset(&format, 0, sizeof(format)); 1020 memset(&format, 0, sizeof(format));
1021 format.pad = CSI2_PAD_SINK; 1021 format.pad = CSI2_PAD_SINK;
1022 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 1022 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
1023 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 1023 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
1024 format.format.width = 4096; 1024 format.format.width = 4096;
1025 format.format.height = 4096; 1025 format.format.height = 4096;
1026 csi2_set_format(sd, fh, &format); 1026 csi2_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index 54042008154c..a1a46ef8319b 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -28,10 +28,10 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
28 unsigned int pad, enum v4l2_subdev_format_whence which); 28 unsigned int pad, enum v4l2_subdev_format_whence which);
29 29
30static const unsigned int ipipe_fmts[] = { 30static const unsigned int ipipe_fmts[] = {
31 V4L2_MBUS_FMT_SGRBG10_1X10, 31 MEDIA_BUS_FMT_SGRBG10_1X10,
32 V4L2_MBUS_FMT_SRGGB10_1X10, 32 MEDIA_BUS_FMT_SRGGB10_1X10,
33 V4L2_MBUS_FMT_SBGGR10_1X10, 33 MEDIA_BUS_FMT_SBGGR10_1X10,
34 V4L2_MBUS_FMT_SGBRG10_1X10, 34 MEDIA_BUS_FMT_SGBRG10_1X10,
35}; 35};
36 36
37/* 37/*
@@ -211,7 +211,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
211 211
212 /* If not found, use SGRBG10 as default */ 212 /* If not found, use SGRBG10 as default */
213 if (i >= ARRAY_SIZE(ipipe_fmts)) 213 if (i >= ARRAY_SIZE(ipipe_fmts))
214 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 214 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
215 215
216 /* Clamp the input size. */ 216 /* Clamp the input size. */
217 fmt->width = clamp_t(u32, width, 1, 8192); 217 fmt->width = clamp_t(u32, width, 1, 8192);
@@ -223,7 +223,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe, struct v4l2_subdev_fh *fh,
223 format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which); 223 format = __ipipe_get_format(ipipe, fh, IPIPE_PAD_SINK, which);
224 memcpy(fmt, format, sizeof(*fmt)); 224 memcpy(fmt, format, sizeof(*fmt));
225 225
226 fmt->code = V4L2_MBUS_FMT_UYVY8_1X16; 226 fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
227 fmt->width = clamp_t(u32, width, 32, fmt->width); 227 fmt->width = clamp_t(u32, width, 32, fmt->width);
228 fmt->height = clamp_t(u32, height, 32, fmt->height); 228 fmt->height = clamp_t(u32, height, 32, fmt->height);
229 fmt->colorspace = V4L2_COLORSPACE_JPEG; 229 fmt->colorspace = V4L2_COLORSPACE_JPEG;
@@ -257,7 +257,7 @@ static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
257 if (code->index != 0) 257 if (code->index != 0)
258 return -EINVAL; 258 return -EINVAL;
259 259
260 code->code = V4L2_MBUS_FMT_UYVY8_1X16; 260 code->code = MEDIA_BUS_FMT_UYVY8_1X16;
261 break; 261 break;
262 262
263 default: 263 default:
@@ -385,7 +385,7 @@ static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
385 memset(&format, 0, sizeof(format)); 385 memset(&format, 0, sizeof(format));
386 format.pad = IPIPE_PAD_SINK; 386 format.pad = IPIPE_PAD_SINK;
387 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 387 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
388 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 388 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
389 format.format.width = 4096; 389 format.format.width = 4096;
390 format.format.height = 4096; 390 format.format.height = 4096;
391 ipipe_set_format(sd, fh, &format); 391 ipipe_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 75f6a15ad202..32a748398ced 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -24,12 +24,12 @@
24#include "iss_ipipeif.h" 24#include "iss_ipipeif.h"
25 25
26static const unsigned int ipipeif_fmts[] = { 26static const unsigned int ipipeif_fmts[] = {
27 V4L2_MBUS_FMT_SGRBG10_1X10, 27 MEDIA_BUS_FMT_SGRBG10_1X10,
28 V4L2_MBUS_FMT_SRGGB10_1X10, 28 MEDIA_BUS_FMT_SRGGB10_1X10,
29 V4L2_MBUS_FMT_SBGGR10_1X10, 29 MEDIA_BUS_FMT_SBGGR10_1X10,
30 V4L2_MBUS_FMT_SGBRG10_1X10, 30 MEDIA_BUS_FMT_SGBRG10_1X10,
31 V4L2_MBUS_FMT_UYVY8_1X16, 31 MEDIA_BUS_FMT_UYVY8_1X16,
32 V4L2_MBUS_FMT_YUYV8_1X16, 32 MEDIA_BUS_FMT_YUYV8_1X16,
33}; 33};
34 34
35/* 35/*
@@ -140,8 +140,8 @@ static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
140 140
141 /* Select ISIF/IPIPEIF input format */ 141 /* Select ISIF/IPIPEIF input format */
142 switch (format->code) { 142 switch (format->code) {
143 case V4L2_MBUS_FMT_UYVY8_1X16: 143 case MEDIA_BUS_FMT_UYVY8_1X16:
144 case V4L2_MBUS_FMT_YUYV8_1X16: 144 case MEDIA_BUS_FMT_YUYV8_1X16:
145 iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET, 145 iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
146 ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK | 146 ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
147 ISIF_MODESET_CCDW_MASK, 147 ISIF_MODESET_CCDW_MASK,
@@ -151,25 +151,25 @@ static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
151 IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16); 151 IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
152 152
153 break; 153 break;
154 case V4L2_MBUS_FMT_SGRBG10_1X10: 154 case MEDIA_BUS_FMT_SGRBG10_1X10:
155 isif_ccolp = ISIF_CCOLP_CP0_F0_GR | 155 isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
156 ISIF_CCOLP_CP1_F0_R | 156 ISIF_CCOLP_CP1_F0_R |
157 ISIF_CCOLP_CP2_F0_B | 157 ISIF_CCOLP_CP2_F0_B |
158 ISIF_CCOLP_CP3_F0_GB; 158 ISIF_CCOLP_CP3_F0_GB;
159 goto cont_raw; 159 goto cont_raw;
160 case V4L2_MBUS_FMT_SRGGB10_1X10: 160 case MEDIA_BUS_FMT_SRGGB10_1X10:
161 isif_ccolp = ISIF_CCOLP_CP0_F0_R | 161 isif_ccolp = ISIF_CCOLP_CP0_F0_R |
162 ISIF_CCOLP_CP1_F0_GR | 162 ISIF_CCOLP_CP1_F0_GR |
163 ISIF_CCOLP_CP2_F0_GB | 163 ISIF_CCOLP_CP2_F0_GB |
164 ISIF_CCOLP_CP3_F0_B; 164 ISIF_CCOLP_CP3_F0_B;
165 goto cont_raw; 165 goto cont_raw;
166 case V4L2_MBUS_FMT_SBGGR10_1X10: 166 case MEDIA_BUS_FMT_SBGGR10_1X10:
167 isif_ccolp = ISIF_CCOLP_CP0_F0_B | 167 isif_ccolp = ISIF_CCOLP_CP0_F0_B |
168 ISIF_CCOLP_CP1_F0_GB | 168 ISIF_CCOLP_CP1_F0_GB |
169 ISIF_CCOLP_CP2_F0_GR | 169 ISIF_CCOLP_CP2_F0_GR |
170 ISIF_CCOLP_CP3_F0_R; 170 ISIF_CCOLP_CP3_F0_R;
171 goto cont_raw; 171 goto cont_raw;
172 case V4L2_MBUS_FMT_SGBRG10_1X10: 172 case MEDIA_BUS_FMT_SGBRG10_1X10:
173 isif_ccolp = ISIF_CCOLP_CP0_F0_GB | 173 isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
174 ISIF_CCOLP_CP1_F0_B | 174 ISIF_CCOLP_CP1_F0_B |
175 ISIF_CCOLP_CP2_F0_R | 175 ISIF_CCOLP_CP2_F0_R |
@@ -415,7 +415,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
415 415
416 /* If not found, use SGRBG10 as default */ 416 /* If not found, use SGRBG10 as default */
417 if (i >= ARRAY_SIZE(ipipeif_fmts)) 417 if (i >= ARRAY_SIZE(ipipeif_fmts))
418 fmt->code = V4L2_MBUS_FMT_SGRBG10_1X10; 418 fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
419 419
420 /* Clamp the input size. */ 420 /* Clamp the input size. */
421 fmt->width = clamp_t(u32, width, 1, 8192); 421 fmt->width = clamp_t(u32, width, 1, 8192);
@@ -625,7 +625,7 @@ static int ipipeif_init_formats(struct v4l2_subdev *sd,
625 memset(&format, 0, sizeof(format)); 625 memset(&format, 0, sizeof(format));
626 format.pad = IPIPEIF_PAD_SINK; 626 format.pad = IPIPEIF_PAD_SINK;
627 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 627 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
628 format.format.code = V4L2_MBUS_FMT_SGRBG10_1X10; 628 format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
629 format.format.width = 4096; 629 format.format.width = 4096;
630 format.format.height = 4096; 630 format.format.height = 4096;
631 ipipeif_set_format(sd, fh, &format); 631 ipipeif_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index a21e356cce3a..88522a8cdf56 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -24,8 +24,8 @@
24#include "iss_resizer.h" 24#include "iss_resizer.h"
25 25
26static const unsigned int resizer_fmts[] = { 26static const unsigned int resizer_fmts[] = {
27 V4L2_MBUS_FMT_UYVY8_1X16, 27 MEDIA_BUS_FMT_UYVY8_1X16,
28 V4L2_MBUS_FMT_YUYV8_1X16, 28 MEDIA_BUS_FMT_YUYV8_1X16,
29}; 29};
30 30
31/* 31/*
@@ -156,8 +156,8 @@ static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
156 addr & 0xffff); 156 addr & 0xffff);
157 157
158 /* Program UV buffer address... Hardcoded to be contiguous! */ 158 /* Program UV buffer address... Hardcoded to be contiguous! */
159 if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) && 159 if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
160 (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) { 160 (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
161 u32 c_addr = addr + (resizer->video_out.bpl_value * 161 u32 c_addr = addr + (resizer->video_out.bpl_value *
162 (outformat->height - 1)); 162 (outformat->height - 1));
163 163
@@ -242,8 +242,8 @@ static void resizer_configure(struct iss_resizer_device *resizer)
242 resizer->video_out.bpl_value); 242 resizer->video_out.bpl_value);
243 243
244 /* UYVY -> NV12 conversion */ 244 /* UYVY -> NV12 conversion */
245 if ((informat->code == V4L2_MBUS_FMT_UYVY8_1X16) && 245 if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
246 (outformat->code == V4L2_MBUS_FMT_YUYV8_1_5X8)) { 246 (outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
247 iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 247 iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
248 RSZ_420_CEN | RSZ_420_YEN); 248 RSZ_420_CEN | RSZ_420_YEN);
249 249
@@ -457,7 +457,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
457 struct v4l2_mbus_framefmt *fmt, 457 struct v4l2_mbus_framefmt *fmt,
458 enum v4l2_subdev_format_whence which) 458 enum v4l2_subdev_format_whence which)
459{ 459{
460 enum v4l2_mbus_pixelcode pixelcode; 460 u32 pixelcode;
461 struct v4l2_mbus_framefmt *format; 461 struct v4l2_mbus_framefmt *format;
462 unsigned int width = fmt->width; 462 unsigned int width = fmt->width;
463 unsigned int height = fmt->height; 463 unsigned int height = fmt->height;
@@ -472,7 +472,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
472 472
473 /* If not found, use UYVY as default */ 473 /* If not found, use UYVY as default */
474 if (i >= ARRAY_SIZE(resizer_fmts)) 474 if (i >= ARRAY_SIZE(resizer_fmts))
475 fmt->code = V4L2_MBUS_FMT_UYVY8_1X16; 475 fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
476 476
477 /* Clamp the input size. */ 477 /* Clamp the input size. */
478 fmt->width = clamp_t(u32, width, 1, 8192); 478 fmt->width = clamp_t(u32, width, 1, 8192);
@@ -485,8 +485,8 @@ resizer_try_format(struct iss_resizer_device *resizer,
485 which); 485 which);
486 memcpy(fmt, format, sizeof(*fmt)); 486 memcpy(fmt, format, sizeof(*fmt));
487 487
488 if ((pixelcode == V4L2_MBUS_FMT_YUYV8_1_5X8) && 488 if ((pixelcode == MEDIA_BUS_FMT_YUYV8_1_5X8) &&
489 (fmt->code == V4L2_MBUS_FMT_UYVY8_1X16)) 489 (fmt->code == MEDIA_BUS_FMT_UYVY8_1X16))
490 fmt->code = pixelcode; 490 fmt->code = pixelcode;
491 491
492 /* The data formatter truncates the number of horizontal output 492 /* The data formatter truncates the number of horizontal output
@@ -537,9 +537,9 @@ static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
537 } 537 }
538 538
539 switch (format->code) { 539 switch (format->code) {
540 case V4L2_MBUS_FMT_UYVY8_1X16: 540 case MEDIA_BUS_FMT_UYVY8_1X16:
541 if (code->index == 1) 541 if (code->index == 1)
542 code->code = V4L2_MBUS_FMT_YUYV8_1_5X8; 542 code->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
543 else 543 else
544 return -EINVAL; 544 return -EINVAL;
545 break; 545 break;
@@ -680,7 +680,7 @@ static int resizer_init_formats(struct v4l2_subdev *sd,
680 memset(&format, 0, sizeof(format)); 680 memset(&format, 0, sizeof(format));
681 format.pad = RESIZER_PAD_SINK; 681 format.pad = RESIZER_PAD_SINK;
682 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; 682 format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
683 format.format.code = V4L2_MBUS_FMT_UYVY8_1X16; 683 format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
684 format.format.width = 4096; 684 format.format.width = 4096;
685 format.format.height = 4096; 685 format.format.height = 4096;
686 resizer_set_format(sd, fh, &format); 686 resizer_set_format(sd, fh, &format);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 5d6250337fec..cdee5966cbca 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -34,67 +34,67 @@ MODULE_PARM_DESC(debug, "activates debug info");
34 */ 34 */
35 35
36static struct iss_format_info formats[] = { 36static struct iss_format_info formats[] = {
37 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 37 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
38 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 38 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
39 V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", }, 39 V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
40 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, 40 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
41 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, 41 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
42 V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", }, 42 V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
43 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, 43 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
44 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, 44 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
45 V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", }, 45 V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
46 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 46 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
47 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, 47 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
48 V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", }, 48 V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
49 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 49 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
50 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, 50 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
51 V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", }, 51 V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
52 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 52 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
53 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, 53 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
54 V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", }, 54 V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
55 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 55 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
56 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, 56 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
57 V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", }, 57 V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
58 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 58 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
59 V4L2_MBUS_FMT_SGRBG10_1X10, 0, 59 MEDIA_BUS_FMT_SGRBG10_1X10, 0,
60 V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", }, 60 V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
61 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, 61 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
62 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, 62 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
63 V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", }, 63 V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
64 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, 64 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
65 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, 65 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
66 V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", }, 66 V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
67 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, 67 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
68 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, 68 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
69 V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", }, 69 V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
70 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, 70 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
71 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, 71 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
72 V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", }, 72 V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
73 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, 73 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
74 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, 74 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
75 V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", }, 75 V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
76 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, 76 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
77 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, 77 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
78 V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", }, 78 V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
79 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, 79 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
80 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, 80 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
81 V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", }, 81 V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
82 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, 82 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
83 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, 83 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
84 V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", }, 84 V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
85 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 85 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
86 V4L2_MBUS_FMT_UYVY8_1X16, 0, 86 MEDIA_BUS_FMT_UYVY8_1X16, 0,
87 V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", }, 87 V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
88 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 88 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
89 V4L2_MBUS_FMT_YUYV8_1X16, 0, 89 MEDIA_BUS_FMT_YUYV8_1X16, 0,
90 V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", }, 90 V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
91 { V4L2_MBUS_FMT_YUYV8_1_5X8, V4L2_MBUS_FMT_YUYV8_1_5X8, 91 { MEDIA_BUS_FMT_YUYV8_1_5X8, MEDIA_BUS_FMT_YUYV8_1_5X8,
92 V4L2_MBUS_FMT_YUYV8_1_5X8, 0, 92 MEDIA_BUS_FMT_YUYV8_1_5X8, 0,
93 V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", }, 93 V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
94}; 94};
95 95
96const struct iss_format_info * 96const struct iss_format_info *
97omap4iss_video_format_info(enum v4l2_mbus_pixelcode code) 97omap4iss_video_format_info(u32 code)
98{ 98{
99 unsigned int i; 99 unsigned int i;
100 100
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 9dccdb154e1a..f11fce2cb977 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -43,10 +43,10 @@ struct v4l2_pix_format;
43 * @description: Human-readable format description 43 * @description: Human-readable format description
44 */ 44 */
45struct iss_format_info { 45struct iss_format_info {
46 enum v4l2_mbus_pixelcode code; 46 u32 code;
47 enum v4l2_mbus_pixelcode truncated; 47 u32 truncated;
48 enum v4l2_mbus_pixelcode uncompressed; 48 u32 uncompressed;
49 enum v4l2_mbus_pixelcode flavor; 49 u32 flavor;
50 u32 pixelformat; 50 u32 pixelformat;
51 unsigned int bpp; 51 unsigned int bpp;
52 const char *description; 52 const char *description;
@@ -199,6 +199,6 @@ void omap4iss_video_cancel_stream(struct iss_video *video);
199struct media_pad *omap4iss_video_remote_pad(struct iss_video *video); 199struct media_pad *omap4iss_video_remote_pad(struct iss_video *video);
200 200
201const struct iss_format_info * 201const struct iss_format_info *
202omap4iss_video_format_info(enum v4l2_mbus_pixelcode code); 202omap4iss_video_format_info(u32 code);
203 203
204#endif /* OMAP4_ISS_VIDEO_H */ 204#endif /* OMAP4_ISS_VIDEO_H */
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9935e66935af..eddef9cd2e16 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true) 275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); 276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
277 277
278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
279 if (ph2c == NULL) 279 if (ph2c == NULL)
280 return _FAIL; 280 return _FAIL;
281 281
282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
283 if (psurveyPara == NULL) { 283 if (psurveyPara == NULL) {
284 kfree(ph2c); 284 kfree(ph2c);
285 return _FAIL; 285 return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
405 else 405 else
406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); 406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
407 407
408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
409 if (pcmd == NULL) { 409 if (pcmd == NULL) {
410 res = _FAIL; 410 res = _FAIL;
411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); 411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
755 u8 res = _SUCCESS; 755 u8 res = _SUCCESS;
756 756
757 757
758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
759 if (ph2c == NULL) { 759 if (ph2c == NULL) {
760 res = _FAIL; 760 res = _FAIL;
761 goto exit; 761 goto exit;
762 } 762 }
763 763
764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
765 if (pdrvextra_cmd_parm == NULL) { 765 if (pdrvextra_cmd_parm == NULL) {
766 kfree(ph2c); 766 kfree(ph2c);
767 res = _FAIL; 767 res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
967 u8 res = _SUCCESS; 967 u8 res = _SUCCESS;
968 968
969 if (enqueue) { 969 if (enqueue) {
970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
971 if (ph2c == NULL) { 971 if (ph2c == NULL) {
972 res = _FAIL; 972 res = _FAIL;
973 goto exit; 973 goto exit;
974 } 974 }
975 975
976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
977 if (pdrvextra_cmd_parm == NULL) { 977 if (pdrvextra_cmd_parm == NULL) {
978 kfree(ph2c); 978 kfree(ph2c);
979 res = _FAIL; 979 res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
1010 1010
1011 u8 res = _SUCCESS; 1011 u8 res = _SUCCESS;
1012 1012
1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1014 if (ph2c == NULL) { 1014 if (ph2c == NULL) {
1015 res = _FAIL; 1015 res = _FAIL;
1016 goto exit; 1016 goto exit;
1017 } 1017 }
1018 1018
1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1020 if (pdrvextra_cmd_parm == NULL) { 1020 if (pdrvextra_cmd_parm == NULL) {
1021 kfree(ph2c); 1021 kfree(ph2c);
1022 res = _FAIL; 1022 res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
1088 1088
1089 u8 res = _SUCCESS; 1089 u8 res = _SUCCESS;
1090 1090
1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1092 if (ppscmd == NULL) { 1092 if (ppscmd == NULL) {
1093 res = _FAIL; 1093 res = _FAIL;
1094 goto exit; 1094 goto exit;
1095 } 1095 }
1096 1096
1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1098 if (pdrvextra_cmd_parm == NULL) { 1098 if (pdrvextra_cmd_parm == NULL) {
1099 kfree(ppscmd); 1099 kfree(ppscmd);
1100 res = _FAIL; 1100 res = _FAIL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 5ba5099ec20d..70b1bc3e0e63 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
4241 pcmdpriv = &padapter->cmdpriv; 4241 pcmdpriv = &padapter->cmdpriv;
4242 4242
4243 4243
4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4245 if (pcmd_obj == NULL) 4245 if (pcmd_obj == NULL)
4246 return; 4246 return;
4247 4247
4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header)); 4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
4249 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4249 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4250 if (pevtcmd == NULL) { 4250 if (pevtcmd == NULL) {
4251 kfree(pcmd_obj); 4251 kfree(pcmd_obj);
4252 return; 4252 return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4341 4341
4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4343 if (pcmd_obj == NULL) 4343 if (pcmd_obj == NULL)
4344 return; 4344 return;
4345 4345
4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header)); 4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
4347 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4347 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4348 if (pevtcmd == NULL) { 4348 if (pevtcmd == NULL) {
4349 kfree(pcmd_obj); 4349 kfree(pcmd_obj);
4350 return; 4350 return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
4854 pmlmeext->scan_abort = false;/* reset */ 4854 pmlmeext->scan_abort = false;/* reset */
4855 } 4855 }
4856 4856
4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4858 if (ph2c == NULL) 4858 if (ph2c == NULL)
4859 goto exit_survey_timer_hdl; 4859 goto exit_survey_timer_hdl;
4860 4860
4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
4862 if (psurveyPara == NULL) { 4862 if (psurveyPara == NULL) {
4863 kfree(ph2c); 4863 kfree(ph2c);
4864 goto exit_survey_timer_hdl; 4864 goto exit_survey_timer_hdl;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 33ccbbbd8ed6..d300369977fa 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
935 return true; 935 return true;
936 } 936 }
937 937
938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL); 938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
939 939
940 subtype = GetFrameSubType(pframe) >> 4; 940 subtype = GetFrameSubType(pframe) >> 4;
941 941
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 407a318b09db..2f87150a21b7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ 47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
50 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
50 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 51 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
51 {} /* Terminating entry */ 52 {} /* Terminating entry */
52}; 53};
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index ab3ab27d49b7..4d1b7224a7f2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -110,58 +110,6 @@ static struct device_driver tcm_loop_driverfs = {
110 */ 110 */
111struct device *tcm_loop_primary; 111struct device *tcm_loop_primary;
112 112
113/*
114 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
115 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
116 */
117static int tcm_loop_change_queue_depth(
118 struct scsi_device *sdev,
119 int depth,
120 int reason)
121{
122 switch (reason) {
123 case SCSI_QDEPTH_DEFAULT:
124 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
125 break;
126 case SCSI_QDEPTH_QFULL:
127 scsi_track_queue_full(sdev, depth);
128 break;
129 case SCSI_QDEPTH_RAMP_UP:
130 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
131 break;
132 default:
133 return -EOPNOTSUPP;
134 }
135 return sdev->queue_depth;
136}
137
138static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
139{
140 if (sdev->tagged_supported) {
141 scsi_set_tag_type(sdev, tag);
142
143 if (tag)
144 scsi_activate_tcq(sdev, sdev->queue_depth);
145 else
146 scsi_deactivate_tcq(sdev, sdev->queue_depth);
147 } else
148 tag = 0;
149
150 return tag;
151}
152
153/*
154 * Locate the SAM Task Attr from struct scsi_cmnd *
155 */
156static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
157{
158 if (sc->device->tagged_supported &&
159 sc->device->ordered_tags && tag >= 0)
160 return MSG_ORDERED_TAG;
161
162 return MSG_SIMPLE_TAG;
163}
164
165static void tcm_loop_submission_work(struct work_struct *work) 113static void tcm_loop_submission_work(struct work_struct *work)
166{ 114{
167 struct tcm_loop_cmd *tl_cmd = 115 struct tcm_loop_cmd *tl_cmd =
@@ -220,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
220 168
221 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
222 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
223 transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag), 171 transfer_length, MSG_SIMPLE_TAG,
224 sc->sc_data_direction, 0, 172 sc->sc_data_direction, 0,
225 scsi_sglist(sc), scsi_sg_count(sc), 173 scsi_sglist(sc), scsi_sg_count(sc),
226 sgl_bidi, sgl_bidi_count, 174 sgl_bidi, sgl_bidi_count,
@@ -431,27 +379,13 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
431 return 0; 379 return 0;
432} 380}
433 381
434static int tcm_loop_slave_configure(struct scsi_device *sd)
435{
436 if (sd->tagged_supported) {
437 scsi_activate_tcq(sd, sd->queue_depth);
438 scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
439 sd->host->cmd_per_lun);
440 } else {
441 scsi_adjust_queue_depth(sd, 0,
442 sd->host->cmd_per_lun);
443 }
444
445 return 0;
446}
447
448static struct scsi_host_template tcm_loop_driver_template = { 382static struct scsi_host_template tcm_loop_driver_template = {
449 .show_info = tcm_loop_show_info, 383 .show_info = tcm_loop_show_info,
450 .proc_name = "tcm_loopback", 384 .proc_name = "tcm_loopback",
451 .name = "TCM_Loopback", 385 .name = "TCM_Loopback",
452 .queuecommand = tcm_loop_queuecommand, 386 .queuecommand = tcm_loop_queuecommand,
453 .change_queue_depth = tcm_loop_change_queue_depth, 387 .change_queue_depth = scsi_change_queue_depth,
454 .change_queue_type = tcm_loop_change_queue_type, 388 .change_queue_type = scsi_change_queue_type,
455 .eh_abort_handler = tcm_loop_abort_task, 389 .eh_abort_handler = tcm_loop_abort_task,
456 .eh_device_reset_handler = tcm_loop_device_reset, 390 .eh_device_reset_handler = tcm_loop_device_reset,
457 .eh_target_reset_handler = tcm_loop_target_reset, 391 .eh_target_reset_handler = tcm_loop_target_reset,
@@ -462,8 +396,9 @@ static struct scsi_host_template tcm_loop_driver_template = {
462 .max_sectors = 0xFFFF, 396 .max_sectors = 0xFFFF,
463 .use_clustering = DISABLE_CLUSTERING, 397 .use_clustering = DISABLE_CLUSTERING,
464 .slave_alloc = tcm_loop_slave_alloc, 398 .slave_alloc = tcm_loop_slave_alloc,
465 .slave_configure = tcm_loop_slave_configure,
466 .module = THIS_MODULE, 399 .module = THIS_MODULE,
400 .use_blk_tags = 1,
401 .track_queue_depth = 1,
467}; 402};
468 403
469static int tcm_loop_driver_probe(struct device *dev) 404static int tcm_loop_driver_probe(struct device *dev)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fb87780929d2..75cbde1f7c5b 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -576,7 +576,7 @@ static inline int core_alua_state_standby(
576 case SEND_DIAGNOSTIC: 576 case SEND_DIAGNOSTIC:
577 case READ_CAPACITY: 577 case READ_CAPACITY:
578 return 0; 578 return 0;
579 case SERVICE_ACTION_IN: 579 case SERVICE_ACTION_IN_16:
580 switch (cdb[1] & 0x1f) { 580 switch (cdb[1] & 0x1f) {
581 case SAI_READ_CAPACITY_16: 581 case SAI_READ_CAPACITY_16:
582 return 0; 582 return 0;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 9f93b8234095..4c261c33cf55 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -459,7 +459,7 @@ static int core_scsi3_pr_seq_non_holder(
459 case ACCESS_CONTROL_OUT: 459 case ACCESS_CONTROL_OUT:
460 case INQUIRY: 460 case INQUIRY:
461 case LOG_SENSE: 461 case LOG_SENSE:
462 case READ_MEDIA_SERIAL_NUMBER: 462 case SERVICE_ACTION_IN_12:
463 case REPORT_LUNS: 463 case REPORT_LUNS:
464 case REQUEST_SENSE: 464 case REQUEST_SENSE:
465 case PERSISTENT_RESERVE_IN: 465 case PERSISTENT_RESERVE_IN:
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ebe62afb957d..8d171ff77e75 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -852,7 +852,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
852 size = READ_CAP_LEN; 852 size = READ_CAP_LEN;
853 cmd->execute_cmd = sbc_emulate_readcapacity; 853 cmd->execute_cmd = sbc_emulate_readcapacity;
854 break; 854 break;
855 case SERVICE_ACTION_IN: 855 case SERVICE_ACTION_IN_16:
856 switch (cmd->t_task_cdb[1] & 0x1f) { 856 switch (cmd->t_task_cdb[1] & 0x1f) {
857 case SAI_READ_CAPACITY_16: 857 case SAI_READ_CAPACITY_16:
858 cmd->execute_cmd = sbc_emulate_readcapacity_16; 858 cmd->execute_cmd = sbc_emulate_readcapacity_16;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 1ab0018271c5..ad09e51ffae4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
50 unsigned int cpufreq_state; 50 unsigned int cpufreq_state;
51 unsigned int cpufreq_val; 51 unsigned int cpufreq_val;
52 struct cpumask allowed_cpus; 52 struct cpumask allowed_cpus;
53 struct list_head node;
53}; 54};
54static DEFINE_IDR(cpufreq_idr); 55static DEFINE_IDR(cpufreq_idr);
55static DEFINE_MUTEX(cooling_cpufreq_lock); 56static DEFINE_MUTEX(cooling_cpufreq_lock);
56 57
57static unsigned int cpufreq_dev_count; 58static unsigned int cpufreq_dev_count;
58 59
59/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ 60static LIST_HEAD(cpufreq_dev_list);
60#define NOTIFY_INVALID NULL
61static struct cpufreq_cooling_device *notify_device;
62 61
63/** 62/**
64 * get_idr - function to get a unique id. 63 * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
287 286
288 cpufreq_device->cpufreq_state = cooling_state; 287 cpufreq_device->cpufreq_state = cooling_state;
289 cpufreq_device->cpufreq_val = clip_freq; 288 cpufreq_device->cpufreq_val = clip_freq;
290 notify_device = cpufreq_device;
291 289
292 for_each_cpu(cpuid, mask) { 290 for_each_cpu(cpuid, mask) {
293 if (is_cpufreq_valid(cpuid)) 291 if (is_cpufreq_valid(cpuid))
294 cpufreq_update_policy(cpuid); 292 cpufreq_update_policy(cpuid);
295 } 293 }
296 294
297 notify_device = NOTIFY_INVALID;
298
299 return 0; 295 return 0;
300} 296}
301 297
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
316{ 312{
317 struct cpufreq_policy *policy = data; 313 struct cpufreq_policy *policy = data;
318 unsigned long max_freq = 0; 314 unsigned long max_freq = 0;
315 struct cpufreq_cooling_device *cpufreq_dev;
319 316
320 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) 317 if (event != CPUFREQ_ADJUST)
321 return 0; 318 return 0;
322 319
323 if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus)) 320 mutex_lock(&cooling_cpufreq_lock);
324 max_freq = notify_device->cpufreq_val; 321 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
325 else 322 if (!cpumask_test_cpu(policy->cpu,
326 return 0; 323 &cpufreq_dev->allowed_cpus))
324 continue;
325
326 if (!cpufreq_dev->cpufreq_val)
327 cpufreq_dev->cpufreq_val = get_cpu_frequency(
328 cpumask_any(&cpufreq_dev->allowed_cpus),
329 cpufreq_dev->cpufreq_state);
327 330
328 /* Never exceed user_policy.max */ 331 max_freq = cpufreq_dev->cpufreq_val;
329 if (max_freq > policy->user_policy.max)
330 max_freq = policy->user_policy.max;
331 332
332 if (policy->max != max_freq) 333 if (policy->max != max_freq)
333 cpufreq_verify_within_limits(policy, 0, max_freq); 334 cpufreq_verify_within_limits(policy, 0, max_freq);
335 }
336 mutex_unlock(&cooling_cpufreq_lock);
334 337
335 return 0; 338 return 0;
336} 339}
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
486 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 489 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
487 CPUFREQ_POLICY_NOTIFIER); 490 CPUFREQ_POLICY_NOTIFIER);
488 cpufreq_dev_count++; 491 cpufreq_dev_count++;
492 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
489 493
490 mutex_unlock(&cooling_cpufreq_lock); 494 mutex_unlock(&cooling_cpufreq_lock);
491 495
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
549 553
550 cpufreq_dev = cdev->devdata; 554 cpufreq_dev = cdev->devdata;
551 mutex_lock(&cooling_cpufreq_lock); 555 mutex_lock(&cooling_cpufreq_lock);
556 list_del(&cpufreq_dev->node);
552 cpufreq_dev_count--; 557 cpufreq_dev_count--;
553 558
554 /* Unregister the notifier for the last cpufreq cooling device */ 559 /* Unregister the notifier for the last cpufreq cooling device */
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index 3f5ad25ddca8..b6be572704a4 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
417 417
418 th_zone = sensor_conf->pzone_data; 418 th_zone = sensor_conf->pzone_data;
419 419
420 if (th_zone->therm_dev) 420 thermal_zone_device_unregister(th_zone->therm_dev);
421 thermal_zone_device_unregister(th_zone->therm_dev);
422 421
423 for (i = 0; i < th_zone->cool_dev_size; i++) { 422 for (i = 0; i < th_zone->cool_dev_size; ++i)
424 if (th_zone->cool_dev[i]) 423 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
425 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
426 }
427 424
428 dev_info(sensor_conf->dev, 425 dev_info(sensor_conf->dev,
429 "Exynos: Kernel Thermal management unregistered\n"); 426 "Exynos: Kernel Thermal management unregistered\n");
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 90163b384660..d1ec5804c0bb 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
275} 275}
276EXPORT_SYMBOL_GPL(st_thermal_unregister); 276EXPORT_SYMBOL_GPL(st_thermal_unregister);
277 277
278#ifdef CONFIG_PM_SLEEP
278static int st_thermal_suspend(struct device *dev) 279static int st_thermal_suspend(struct device *dev)
279{ 280{
280 struct platform_device *pdev = to_platform_device(dev); 281 struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
305 306
306 return 0; 307 return 0;
307} 308}
309#endif
310
308SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); 311SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
309EXPORT_SYMBOL_GPL(st_thermal_pm_ops); 312EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
310 313
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 2e900a98c3e3..26f097f60b10 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2123,7 +2123,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2123{ 2123{
2124 struct n_tty_data *ldata = tty->disc_data; 2124 struct n_tty_data *ldata = tty->disc_data;
2125 unsigned char __user *b = buf; 2125 unsigned char __user *b = buf;
2126 DECLARE_WAITQUEUE(wait, current); 2126 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2127 int c; 2127 int c;
2128 int minimum, time; 2128 int minimum, time;
2129 ssize_t retval = 0; 2129 ssize_t retval = 0;
@@ -2186,10 +2186,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2186 nr--; 2186 nr--;
2187 break; 2187 break;
2188 } 2188 }
2189 /* This statement must be first before checking for input
2190 so that any interrupt will set the state back to
2191 TASK_RUNNING. */
2192 set_current_state(TASK_INTERRUPTIBLE);
2193 2189
2194 if (((minimum - (b - buf)) < ldata->minimum_to_wake) && 2190 if (((minimum - (b - buf)) < ldata->minimum_to_wake) &&
2195 ((minimum - (b - buf)) >= 1)) 2191 ((minimum - (b - buf)) >= 1))
@@ -2220,13 +2216,13 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2220 n_tty_set_room(tty); 2216 n_tty_set_room(tty);
2221 up_read(&tty->termios_rwsem); 2217 up_read(&tty->termios_rwsem);
2222 2218
2223 timeout = schedule_timeout(timeout); 2219 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
2220 timeout);
2224 2221
2225 down_read(&tty->termios_rwsem); 2222 down_read(&tty->termios_rwsem);
2226 continue; 2223 continue;
2227 } 2224 }
2228 } 2225 }
2229 __set_current_state(TASK_RUNNING);
2230 2226
2231 /* Deal with packet mode. */ 2227 /* Deal with packet mode. */
2232 if (packet && b == buf) { 2228 if (packet && b == buf) {
@@ -2273,7 +2269,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2273 2269
2274 mutex_unlock(&ldata->atomic_read_lock); 2270 mutex_unlock(&ldata->atomic_read_lock);
2275 2271
2276 __set_current_state(TASK_RUNNING);
2277 if (b - buf) 2272 if (b - buf)
2278 retval = b - buf; 2273 retval = b - buf;
2279 2274
@@ -2306,7 +2301,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
2306 const unsigned char *buf, size_t nr) 2301 const unsigned char *buf, size_t nr)
2307{ 2302{
2308 const unsigned char *b = buf; 2303 const unsigned char *b = buf;
2309 DECLARE_WAITQUEUE(wait, current); 2304 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2310 int c; 2305 int c;
2311 ssize_t retval = 0; 2306 ssize_t retval = 0;
2312 2307
@@ -2324,7 +2319,6 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
2324 2319
2325 add_wait_queue(&tty->write_wait, &wait); 2320 add_wait_queue(&tty->write_wait, &wait);
2326 while (1) { 2321 while (1) {
2327 set_current_state(TASK_INTERRUPTIBLE);
2328 if (signal_pending(current)) { 2322 if (signal_pending(current)) {
2329 retval = -ERESTARTSYS; 2323 retval = -ERESTARTSYS;
2330 break; 2324 break;
@@ -2378,12 +2372,11 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
2378 } 2372 }
2379 up_read(&tty->termios_rwsem); 2373 up_read(&tty->termios_rwsem);
2380 2374
2381 schedule(); 2375 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2382 2376
2383 down_read(&tty->termios_rwsem); 2377 down_read(&tty->termios_rwsem);
2384 } 2378 }
2385break_out: 2379break_out:
2386 __set_current_state(TASK_RUNNING);
2387 remove_wait_queue(&tty->write_wait, &wait); 2380 remove_wait_queue(&tty->write_wait, &wait);
2388 if (b - buf != nr && tty->fasync) 2381 if (b - buf != nr && tty->fasync)
2389 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 2382 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 649b784081c7..98f8bcaf3e7e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -249,14 +249,14 @@ config SERIAL_SAMSUNG
249 249
250config SERIAL_SAMSUNG_UARTS_4 250config SERIAL_SAMSUNG_UARTS_4
251 bool 251 bool
252 depends on PLAT_SAMSUNG 252 depends on SERIAL_SAMSUNG
253 default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442) 253 default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442)
254 help 254 help
255 Internal node for the common case of 4 Samsung compatible UARTs 255 Internal node for the common case of 4 Samsung compatible UARTs
256 256
257config SERIAL_SAMSUNG_UARTS 257config SERIAL_SAMSUNG_UARTS
258 int 258 int
259 depends on PLAT_SAMSUNG 259 depends on SERIAL_SAMSUNG
260 default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416 260 default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416
261 default 3 261 default 3
262 help 262 help
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 56982da4a9e9..bf355050eab6 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
240 return 0; 240 return 0;
241} 241}
242 242
243#ifdef CONFIG_PM_SLEEP
244static int of_serial_suspend(struct device *dev)
245{
246 struct of_serial_info *info = dev_get_drvdata(dev);
247
248 serial8250_suspend_port(info->line);
249 if (info->clk)
250 clk_disable_unprepare(info->clk);
251
252 return 0;
253}
254
255static int of_serial_resume(struct device *dev)
256{
257 struct of_serial_info *info = dev_get_drvdata(dev);
258
259 if (info->clk)
260 clk_prepare_enable(info->clk);
261
262 serial8250_resume_port(info->line);
263
264 return 0;
265}
266#endif
267static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
268
269/* 243/*
270 * A few common types, add more as needed. 244 * A few common types, add more as needed.
271 */ 245 */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
297 .name = "of_serial", 271 .name = "of_serial",
298 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
299 .of_match_table = of_platform_serial_table, 273 .of_match_table = of_platform_serial_table,
300 .pm = &of_serial_pm_ops,
301 }, 274 },
302 .probe = of_platform_serial_probe, 275 .probe = of_platform_serial_probe,
303 .remove = of_platform_serial_remove, 276 .remove = of_platform_serial_remove,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 9bffd26cea05..874dec31a111 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1493,10 +1493,6 @@ int usb_resume(struct device *dev, pm_message_t msg)
1493 return status; 1493 return status;
1494} 1494}
1495 1495
1496#endif /* CONFIG_PM */
1497
1498#ifdef CONFIG_PM_RUNTIME
1499
1500/** 1496/**
1501 * usb_enable_autosuspend - allow a USB device to be autosuspended 1497 * usb_enable_autosuspend - allow a USB device to be autosuspended
1502 * @udev: the USB device which may be autosuspended 1498 * @udev: the USB device which may be autosuspended
@@ -1876,7 +1872,7 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1876 return ret; 1872 return ret;
1877} 1873}
1878 1874
1879#endif /* CONFIG_PM_RUNTIME */ 1875#endif /* CONFIG_PM */
1880 1876
1881struct bus_type usb_bus_type = { 1877struct bus_type usb_bus_type = {
1882 .name = "usb", 1878 .name = "usb",
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index efc953119ce2..9eb1cff28bd4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -429,7 +429,6 @@ static int check_root_hub_suspended(struct device *dev)
429 return 0; 429 return 0;
430} 430}
431 431
432#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
433static int suspend_common(struct device *dev, bool do_wakeup) 432static int suspend_common(struct device *dev, bool do_wakeup)
434{ 433{
435 struct pci_dev *pci_dev = to_pci_dev(dev); 434 struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -528,7 +527,6 @@ static int resume_common(struct device *dev, int event)
528 } 527 }
529 return retval; 528 return retval;
530} 529}
531#endif /* SLEEP || RUNTIME */
532 530
533#ifdef CONFIG_PM_SLEEP 531#ifdef CONFIG_PM_SLEEP
534 532
@@ -607,8 +605,6 @@ static int hcd_pci_restore(struct device *dev)
607 605
608#endif /* CONFIG_PM_SLEEP */ 606#endif /* CONFIG_PM_SLEEP */
609 607
610#ifdef CONFIG_PM_RUNTIME
611
612static int hcd_pci_runtime_suspend(struct device *dev) 608static int hcd_pci_runtime_suspend(struct device *dev)
613{ 609{
614 int retval; 610 int retval;
@@ -630,13 +626,6 @@ static int hcd_pci_runtime_resume(struct device *dev)
630 return retval; 626 return retval;
631} 627}
632 628
633#else
634
635#define hcd_pci_runtime_suspend NULL
636#define hcd_pci_runtime_resume NULL
637
638#endif /* CONFIG_PM_RUNTIME */
639
640const struct dev_pm_ops usb_hcd_pci_pm_ops = { 629const struct dev_pm_ops usb_hcd_pci_pm_ops = {
641 .suspend = hcd_pci_suspend, 630 .suspend = hcd_pci_suspend,
642 .suspend_noirq = hcd_pci_suspend_noirq, 631 .suspend_noirq = hcd_pci_suspend_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a6efb4184f2b..278be0515e8e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2258,10 +2258,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2258 return status; 2258 return status;
2259} 2259}
2260 2260
2261#endif /* CONFIG_PM */
2262
2263#ifdef CONFIG_PM_RUNTIME
2264
2265/* Workqueue routine for root-hub remote wakeup */ 2261/* Workqueue routine for root-hub remote wakeup */
2266static void hcd_resume_work(struct work_struct *work) 2262static void hcd_resume_work(struct work_struct *work)
2267{ 2263{
@@ -2293,7 +2289,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2293} 2289}
2294EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub); 2290EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
2295 2291
2296#endif /* CONFIG_PM_RUNTIME */ 2292#endif /* CONFIG_PM */
2297 2293
2298/*-------------------------------------------------------------------------*/ 2294/*-------------------------------------------------------------------------*/
2299 2295
@@ -2476,7 +2472,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
2476 init_timer(&hcd->rh_timer); 2472 init_timer(&hcd->rh_timer);
2477 hcd->rh_timer.function = rh_timer_func; 2473 hcd->rh_timer.function = rh_timer_func;
2478 hcd->rh_timer.data = (unsigned long) hcd; 2474 hcd->rh_timer.data = (unsigned long) hcd;
2479#ifdef CONFIG_PM_RUNTIME 2475#ifdef CONFIG_PM
2480 INIT_WORK(&hcd->wakeup_work, hcd_resume_work); 2476 INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
2481#endif 2477#endif
2482 2478
@@ -2790,7 +2786,7 @@ error_create_attr_group:
2790 hcd->rh_registered = 0; 2786 hcd->rh_registered = 0;
2791 spin_unlock_irq(&hcd_root_hub_lock); 2787 spin_unlock_irq(&hcd_root_hub_lock);
2792 2788
2793#ifdef CONFIG_PM_RUNTIME 2789#ifdef CONFIG_PM
2794 cancel_work_sync(&hcd->wakeup_work); 2790 cancel_work_sync(&hcd->wakeup_work);
2795#endif 2791#endif
2796 mutex_lock(&usb_bus_list_lock); 2792 mutex_lock(&usb_bus_list_lock);
@@ -2858,7 +2854,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
2858 hcd->rh_registered = 0; 2854 hcd->rh_registered = 0;
2859 spin_unlock_irq (&hcd_root_hub_lock); 2855 spin_unlock_irq (&hcd_root_hub_lock);
2860 2856
2861#ifdef CONFIG_PM_RUNTIME 2857#ifdef CONFIG_PM
2862 cancel_work_sync(&hcd->wakeup_work); 2858 cancel_work_sync(&hcd->wakeup_work);
2863#endif 2859#endif
2864 2860
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b649fef2e35d..c9596525ba8c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1737,7 +1737,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1737 * - If user has indicated to prevent autosuspend by passing 1737 * - If user has indicated to prevent autosuspend by passing
1738 * usbcore.autosuspend = -1 then keep autosuspend disabled. 1738 * usbcore.autosuspend = -1 then keep autosuspend disabled.
1739 */ 1739 */
1740#ifdef CONFIG_PM_RUNTIME 1740#ifdef CONFIG_PM
1741 if (hdev->dev.power.autosuspend_delay >= 0) 1741 if (hdev->dev.power.autosuspend_delay >= 0)
1742 pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1742 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
1743#endif 1743#endif
@@ -3449,7 +3449,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3449 return status; 3449 return status;
3450} 3450}
3451 3451
3452#ifdef CONFIG_PM_RUNTIME 3452#ifdef CONFIG_PM
3453 3453
3454int usb_remote_wakeup(struct usb_device *udev) 3454int usb_remote_wakeup(struct usb_device *udev)
3455{ 3455{
@@ -4856,7 +4856,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
4856 udev->state != USB_STATE_NOTATTACHED) { 4856 udev->state != USB_STATE_NOTATTACHED) {
4857 if (portstatus & USB_PORT_STAT_ENABLE) { 4857 if (portstatus & USB_PORT_STAT_ENABLE) {
4858 status = 0; /* Nothing to do */ 4858 status = 0; /* Nothing to do */
4859#ifdef CONFIG_PM_RUNTIME 4859#ifdef CONFIG_PM
4860 } else if (udev->state == USB_STATE_SUSPENDED && 4860 } else if (udev->state == USB_STATE_SUSPENDED &&
4861 udev->persist_enabled) { 4861 udev->persist_enabled) {
4862 /* For a suspended device, treat this as a 4862 /* For a suspended device, treat this as a
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index cd3f9dc24a06..210618319f10 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -72,7 +72,7 @@ static void usb_port_device_release(struct device *dev)
72 kfree(port_dev); 72 kfree(port_dev);
73} 73}
74 74
75#ifdef CONFIG_PM_RUNTIME 75#ifdef CONFIG_PM
76static int usb_port_runtime_resume(struct device *dev) 76static int usb_port_runtime_resume(struct device *dev)
77{ 77{
78 struct usb_port *port_dev = to_usb_port(dev); 78 struct usb_port *port_dev = to_usb_port(dev);
@@ -171,7 +171,7 @@ static int usb_port_runtime_suspend(struct device *dev)
171#endif 171#endif
172 172
173static const struct dev_pm_ops usb_port_pm_ops = { 173static const struct dev_pm_ops usb_port_pm_ops = {
174#ifdef CONFIG_PM_RUNTIME 174#ifdef CONFIG_PM
175 .runtime_suspend = usb_port_runtime_suspend, 175 .runtime_suspend = usb_port_runtime_suspend,
176 .runtime_resume = usb_port_runtime_resume, 176 .runtime_resume = usb_port_runtime_resume,
177#endif 177#endif
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 39b4081b632d..96fafed92b76 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
44 /* Creative SB Audigy 2 NX */ 44 /* Creative SB Audigy 2 NX */
45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
46 46
47 /* Microsoft Wireless Laser Mouse 6000 Receiver */
48 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
49
47 /* Microsoft LifeCam-VX700 v2.0 */ 50 /* Microsoft LifeCam-VX700 v2.0 */
48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
49 52
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 1236c6011c70..d26973844a4d 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -334,14 +334,6 @@ static void remove_persist_attributes(struct device *dev)
334 &dev_attr_persist.attr, 334 &dev_attr_persist.attr,
335 power_group_name); 335 power_group_name);
336} 336}
337#else
338
339#define add_persist_attributes(dev) 0
340#define remove_persist_attributes(dev) do {} while (0)
341
342#endif /* CONFIG_PM */
343
344#ifdef CONFIG_PM_RUNTIME
345 337
346static ssize_t connected_duration_show(struct device *dev, 338static ssize_t connected_duration_show(struct device *dev,
347 struct device_attribute *attr, char *buf) 339 struct device_attribute *attr, char *buf)
@@ -585,10 +577,13 @@ static void remove_power_attributes(struct device *dev)
585 577
586#else 578#else
587 579
580#define add_persist_attributes(dev) 0
581#define remove_persist_attributes(dev) do {} while (0)
582
588#define add_power_attributes(dev) 0 583#define add_power_attributes(dev) 0
589#define remove_power_attributes(dev) do {} while (0) 584#define remove_power_attributes(dev) do {} while (0)
590 585
591#endif /* CONFIG_PM_RUNTIME */ 586#endif /* CONFIG_PM */
592 587
593 588
594/* Descriptor fields */ 589/* Descriptor fields */
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2dd2362198d2..2a92b97f0144 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,7 +49,7 @@ const char *usbcore_name = "usbcore";
49 49
50static bool nousb; /* Disable USB when built into kernel image */ 50static bool nousb; /* Disable USB when built into kernel image */
51 51
52#ifdef CONFIG_PM_RUNTIME 52#ifdef CONFIG_PM
53static int usb_autosuspend_delay = 2; /* Default delay value, 53static int usb_autosuspend_delay = 2; /* Default delay value,
54 * in seconds */ 54 * in seconds */
55module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); 55module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
@@ -348,11 +348,9 @@ static const struct dev_pm_ops usb_device_pm_ops = {
348 .thaw = usb_dev_thaw, 348 .thaw = usb_dev_thaw,
349 .poweroff = usb_dev_poweroff, 349 .poweroff = usb_dev_poweroff,
350 .restore = usb_dev_restore, 350 .restore = usb_dev_restore,
351#ifdef CONFIG_PM_RUNTIME
352 .runtime_suspend = usb_runtime_suspend, 351 .runtime_suspend = usb_runtime_suspend,
353 .runtime_resume = usb_runtime_resume, 352 .runtime_resume = usb_runtime_resume,
354 .runtime_idle = usb_runtime_idle, 353 .runtime_idle = usb_runtime_idle,
355#endif
356}; 354};
357 355
358#endif /* CONFIG_PM */ 356#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index b1b34d0557c9..7eb1e26798e5 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -75,6 +75,14 @@ extern int usb_resume_complete(struct device *dev);
75extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg); 75extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
76extern int usb_port_resume(struct usb_device *dev, pm_message_t msg); 76extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
77 77
78extern void usb_autosuspend_device(struct usb_device *udev);
79extern int usb_autoresume_device(struct usb_device *udev);
80extern int usb_remote_wakeup(struct usb_device *dev);
81extern int usb_runtime_suspend(struct device *dev);
82extern int usb_runtime_resume(struct device *dev);
83extern int usb_runtime_idle(struct device *dev);
84extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
85
78#else 86#else
79 87
80static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg) 88static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
@@ -87,20 +95,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
87 return 0; 95 return 0;
88} 96}
89 97
90#endif
91
92#ifdef CONFIG_PM_RUNTIME
93
94extern void usb_autosuspend_device(struct usb_device *udev);
95extern int usb_autoresume_device(struct usb_device *udev);
96extern int usb_remote_wakeup(struct usb_device *dev);
97extern int usb_runtime_suspend(struct device *dev);
98extern int usb_runtime_resume(struct device *dev);
99extern int usb_runtime_idle(struct device *dev);
100extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
101
102#else
103
104#define usb_autosuspend_device(udev) do {} while (0) 98#define usb_autosuspend_device(udev) do {} while (0)
105static inline int usb_autoresume_device(struct usb_device *udev) 99static inline int usb_autoresume_device(struct usb_device *udev)
106{ 100{
@@ -111,6 +105,7 @@ static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
111{ 105{
112 return 0; 106 return 0;
113} 107}
108
114#endif 109#endif
115 110
116extern struct bus_type usb_bus_type; 111extern struct bus_type usb_bus_type;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 711b23019d54..df38e7ef4976 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -791,6 +791,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
791 791
792 trb = dwc->ep0_trb; 792 trb = dwc->ep0_trb;
793 793
794 r = next_request(&ep0->request_list);
795 if (!r)
796 return;
797
794 status = DWC3_TRB_SIZE_TRBSTS(trb->size); 798 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
795 if (status == DWC3_TRBSTS_SETUP_PENDING) { 799 if (status == DWC3_TRBSTS_SETUP_PENDING) {
796 dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); 800 dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -801,10 +805,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
801 return; 805 return;
802 } 806 }
803 807
804 r = next_request(&ep0->request_list);
805 if (!r)
806 return;
807
808 ur = &r->request; 808 ur = &r->request;
809 809
810 length = trb->size & DWC3_TRB_SIZE_MASK; 810 length = trb->size & DWC3_TRB_SIZE_MASK;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 6cdb7a534f23..024f58475a94 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -912,7 +912,7 @@ static int get_cmd_dir(const unsigned char *cdb)
912 case INQUIRY: 912 case INQUIRY:
913 case MODE_SENSE: 913 case MODE_SENSE:
914 case MODE_SENSE_10: 914 case MODE_SENSE_10:
915 case SERVICE_ACTION_IN: 915 case SERVICE_ACTION_IN_16:
916 case MAINTENANCE_IN: 916 case MAINTENANCE_IN:
917 case PERSISTENT_RESERVE_IN: 917 case PERSISTENT_RESERVE_IN:
918 case SECURITY_PROTOCOL_IN: 918 case SECURITY_PROTOCOL_IN:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index ca7b964124af..851006a0d97b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -305,7 +305,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
305 } 305 }
306 } 306 }
307 307
308#ifdef CONFIG_PM_RUNTIME 308#ifdef CONFIG_PM
309 if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) 309 if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
310 ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); 310 ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
311#endif 311#endif
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index ad0c348e68e9..25fb1da8d3d7 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -22,7 +22,7 @@
22 * and usb-storage. 22 * and usb-storage.
23 * 23 *
24 * TODO: 24 * TODO:
25 * - usb suspend/resume triggered by sl811 (with PM_RUNTIME) 25 * - usb suspend/resume triggered by sl811
26 * - various issues noted in the code 26 * - various issues noted in the code
27 * - performance work; use both register banks; ... 27 * - performance work; use both register banks; ...
28 * - use urb->iso_frame_desc[] with ISO transfers 28 * - use urb->iso_frame_desc[] with ISO transfers
@@ -1752,8 +1752,7 @@ sl811h_probe(struct platform_device *dev)
1752#ifdef CONFIG_PM 1752#ifdef CONFIG_PM
1753 1753
1754/* for this device there's no useful distinction between the controller 1754/* for this device there's no useful distinction between the controller
1755 * and its root hub, except that the root hub only gets direct PM calls 1755 * and its root hub.
1756 * when CONFIG_PM_RUNTIME is enabled.
1757 */ 1756 */
1758 1757
1759static int 1758static int
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index c0671750671f..bf86630b3cea 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3144,8 +3144,7 @@ static int u132_probe(struct platform_device *pdev)
3144#ifdef CONFIG_PM 3144#ifdef CONFIG_PM
3145/* 3145/*
3146 * for this device there's no useful distinction between the controller 3146 * for this device there's no useful distinction between the controller
3147 * and its root hub, except that the root hub only gets direct PM calls 3147 * and its root hub.
3148 * when CONFIG_PM_RUNTIME is enabled.
3149 */ 3148 */
3150static int u132_suspend(struct platform_device *pdev, pm_message_t state) 3149static int u132_suspend(struct platform_device *pdev, pm_message_t state)
3151{ 3150{
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 696160d48ae8..a7865c4b0498 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -22,7 +22,6 @@
22 22
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h>
26#include <asm/unaligned.h> 25#include <asm/unaligned.h>
27 26
28#include "xhci.h" 27#include "xhci.h"
@@ -1146,12 +1145,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1146 set_bit(port_index, &bus_state->bus_suspended); 1145 set_bit(port_index, &bus_state->bus_suspended);
1147 } 1146 }
1148 /* USB core sets remote wake mask for USB 3.0 hubs, 1147 /* USB core sets remote wake mask for USB 3.0 hubs,
1149 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME 1148 * including the USB 3.0 roothub, but only if CONFIG_PM
1150 * is enabled, so also enable remote wake here. 1149 * is enabled, so also enable remote wake here.
1151 */ 1150 */
1152 if (hcd->self.root_hub->do_remote_wakeup 1151 if (hcd->self.root_hub->do_remote_wakeup) {
1153 && device_may_wakeup(hcd->self.controller)) {
1154
1155 if (t1 & PORT_CONNECT) { 1152 if (t1 & PORT_CONNECT) {
1156 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1153 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1157 t2 &= ~PORT_WKCONN_E; 1154 t2 &= ~PORT_WKCONN_E;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 9a69b1f1b300..142b601f9563 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
281 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 281 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
282 pdev->no_d3cold = true; 282 pdev->no_d3cold = true;
283 283
284 return xhci_suspend(xhci); 284 return xhci_suspend(xhci, do_wakeup);
285} 285}
286 286
287static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 287static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3d78b0cd674b..646300cbe5f7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
204 struct usb_hcd *hcd = dev_get_drvdata(dev); 204 struct usb_hcd *hcd = dev_get_drvdata(dev);
205 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 205 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
206 206
207 return xhci_suspend(xhci); 207 /*
208 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
209 * to do wakeup during suspend. Since xhci_plat_suspend is currently
210 * only designed for system suspend, device_may_wakeup() is enough
211 * to dertermine whether host is allowed to do wakeup. Need to
212 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
213 * also applies to runtime suspend.
214 */
215 return xhci_suspend(xhci, device_may_wakeup(dev));
208} 216}
209 217
210static int xhci_plat_resume(struct device *dev) 218static int xhci_plat_resume(struct device *dev)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc6fcbc16f61..06433aec81d7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1067 false); 1067 false);
1068 xhci_ring_cmd_db(xhci); 1068 xhci_ring_cmd_db(xhci);
1069 } else { 1069 } else {
1070 /* Clear our internal halted state and restart the ring(s) */ 1070 /* Clear our internal halted state */
1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1072 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1073 } 1072 }
1074} 1073}
1075 1074
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1823 ep->stopped_td = td; 1822 ep->stopped_td = td;
1824 return 0; 1823 return 0;
1825 } else { 1824 } else {
1826 if (trb_comp_code == COMP_STALL) { 1825 if (trb_comp_code == COMP_STALL ||
1827 /* The transfer is completed from the driver's 1826 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1828 * perspective, but we need to issue a set dequeue 1827 trb_comp_code)) {
1829 * command for this stalled endpoint to move the dequeue 1828 /* Issue a reset endpoint command to clear the host side
1830 * pointer past the TD. We can't do that here because 1829 * halt, followed by a set dequeue command to move the
1831 * the halt condition must be cleared first. Let the 1830 * dequeue pointer past the TD.
1832 * USB class driver clear the stall later. 1831 * The class driver clears the device side halt later.
1833 */
1834 ep->stopped_td = td;
1835 ep->stopped_stream = ep_ring->stream_id;
1836 } else if (xhci_requires_manual_halt_cleanup(xhci,
1837 ep_ctx, trb_comp_code)) {
1838 /* Other types of errors halt the endpoint, but the
1839 * class driver doesn't call usb_reset_endpoint() unless
1840 * the error is -EPIPE. Clear the halted status in the
1841 * xHCI hardware manually.
1842 */ 1832 */
1843 xhci_cleanup_halted_endpoint(xhci, 1833 xhci_cleanup_halted_endpoint(xhci,
1844 slot_id, ep_index, ep_ring->stream_id, 1834 slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1958 else 1948 else
1959 td->urb->actual_length = 0; 1949 td->urb->actual_length = 0;
1960 1950
1961 xhci_cleanup_halted_endpoint(xhci, 1951 return finish_td(xhci, td, event_trb, event, ep, status, false);
1962 slot_id, ep_index, 0, td, event_trb);
1963 return finish_td(xhci, td, event_trb, event, ep, status, true);
1964 } 1952 }
1965 /* 1953 /*
1966 * Did we transfer any data, despite the errors that might have 1954 * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
2519 if (ret) { 2507 if (ret) {
2520 urb = td->urb; 2508 urb = td->urb;
2521 urb_priv = urb->hcpriv; 2509 urb_priv = urb->hcpriv;
2522 /* Leave the TD around for the reset endpoint function 2510
2523 * to use(but only if it's not a control endpoint, 2511 xhci_urb_free_priv(xhci, urb_priv);
2524 * since we already queued the Set TR dequeue pointer
2525 * command for stalled control endpoints).
2526 */
2527 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2528 (trb_comp_code != COMP_STALL &&
2529 trb_comp_code != COMP_BABBLE))
2530 xhci_urb_free_priv(xhci, urb_priv);
2531 else
2532 kfree(urb_priv);
2533 2512
2534 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2513 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2535 if ((urb->actual_length != urb->transfer_buffer_length && 2514 if ((urb->actual_length != urb->transfer_buffer_length &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2a5d45b4cb15..cf3413116aff 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -35,6 +35,8 @@
35#define DRIVER_AUTHOR "Sarah Sharp" 35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37 37
38#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
39
38/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 40/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39static int link_quirk; 41static int link_quirk;
40module_param(link_quirk, int, S_IRUGO | S_IWUSR); 42module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
851 xhci_set_cmd_ring_deq(xhci); 853 xhci_set_cmd_ring_deq(xhci);
852} 854}
853 855
856static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
857{
858 int port_index;
859 __le32 __iomem **port_array;
860 unsigned long flags;
861 u32 t1, t2;
862
863 spin_lock_irqsave(&xhci->lock, flags);
864
865 /* disble usb3 ports Wake bits*/
866 port_index = xhci->num_usb3_ports;
867 port_array = xhci->usb3_ports;
868 while (port_index--) {
869 t1 = readl(port_array[port_index]);
870 t1 = xhci_port_state_to_neutral(t1);
871 t2 = t1 & ~PORT_WAKE_BITS;
872 if (t1 != t2)
873 writel(t2, port_array[port_index]);
874 }
875
876 /* disble usb2 ports Wake bits*/
877 port_index = xhci->num_usb2_ports;
878 port_array = xhci->usb2_ports;
879 while (port_index--) {
880 t1 = readl(port_array[port_index]);
881 t1 = xhci_port_state_to_neutral(t1);
882 t2 = t1 & ~PORT_WAKE_BITS;
883 if (t1 != t2)
884 writel(t2, port_array[port_index]);
885 }
886
887 spin_unlock_irqrestore(&xhci->lock, flags);
888}
889
854/* 890/*
855 * Stop HC (not bus-specific) 891 * Stop HC (not bus-specific)
856 * 892 *
857 * This is called when the machine transition into S3/S4 mode. 893 * This is called when the machine transition into S3/S4 mode.
858 * 894 *
859 */ 895 */
860int xhci_suspend(struct xhci_hcd *xhci) 896int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
861{ 897{
862 int rc = 0; 898 int rc = 0;
863 unsigned int delay = XHCI_MAX_HALT_USEC; 899 unsigned int delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
868 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 904 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
869 return -EINVAL; 905 return -EINVAL;
870 906
907 /* Clear root port wake on bits if wakeup not allowed. */
908 if (!do_wakeup)
909 xhci_disable_port_wake_on_bits(xhci);
910
871 /* Don't poll the roothubs on bus suspend. */ 911 /* Don't poll the roothubs on bus suspend. */
872 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 912 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
873 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 913 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2912 } 2952 }
2913} 2953}
2914 2954
2915/* Deal with stalled endpoints. The core should have sent the control message 2955/* Called when clearing halted device. The core should have sent the control
2916 * to clear the halt condition. However, we need to make the xHCI hardware 2956 * message to clear the device halt condition. The host side of the halt should
2917 * reset its sequence number, since a device will expect a sequence number of 2957 * already be cleared with a reset endpoint command issued when the STALL tx
2918 * zero after the halt condition is cleared. 2958 * event was received.
2959 *
2919 * Context: in_interrupt 2960 * Context: in_interrupt
2920 */ 2961 */
2962
2921void xhci_endpoint_reset(struct usb_hcd *hcd, 2963void xhci_endpoint_reset(struct usb_hcd *hcd,
2922 struct usb_host_endpoint *ep) 2964 struct usb_host_endpoint *ep)
2923{ 2965{
2924 struct xhci_hcd *xhci; 2966 struct xhci_hcd *xhci;
2925 struct usb_device *udev;
2926 unsigned int ep_index;
2927 unsigned long flags;
2928 int ret;
2929 struct xhci_virt_ep *virt_ep;
2930 struct xhci_command *command;
2931 2967
2932 xhci = hcd_to_xhci(hcd); 2968 xhci = hcd_to_xhci(hcd);
2933 udev = (struct usb_device *) ep->hcpriv;
2934 /* Called with a root hub endpoint (or an endpoint that wasn't added
2935 * with xhci_add_endpoint()
2936 */
2937 if (!ep->hcpriv)
2938 return;
2939 ep_index = xhci_get_endpoint_index(&ep->desc);
2940 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2941 if (!virt_ep->stopped_td) {
2942 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2943 "Endpoint 0x%x not halted, refusing to reset.",
2944 ep->desc.bEndpointAddress);
2945 return;
2946 }
2947 if (usb_endpoint_xfer_control(&ep->desc)) {
2948 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2949 "Control endpoint stall already handled.");
2950 return;
2951 }
2952 2969
2953 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
2954 if (!command)
2955 return;
2956
2957 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2958 "Queueing reset endpoint command");
2959 spin_lock_irqsave(&xhci->lock, flags);
2960 ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
2961 /* 2970 /*
2962 * Can't change the ring dequeue pointer until it's transitioned to the 2971 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2963 * stopped state, which is only upon a successful reset endpoint 2972 * The Reset Endpoint Command may only be issued to endpoints in the
2964 * command. Better hope that last command worked! 2973 * Halted state. If software wishes reset the Data Toggle or Sequence
2974 * Number of an endpoint that isn't in the Halted state, then software
2975 * may issue a Configure Endpoint Command with the Drop and Add bits set
2976 * for the target endpoint. that is in the Stopped state.
2965 */ 2977 */
2966 if (!ret) {
2967 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2968 kfree(virt_ep->stopped_td);
2969 xhci_ring_cmd_db(xhci);
2970 }
2971 virt_ep->stopped_td = NULL;
2972 virt_ep->stopped_stream = 0;
2973 spin_unlock_irqrestore(&xhci->lock, flags);
2974 2978
2975 if (ret) 2979 /* For now just print debug to follow the situation */
2976 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2980 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2981 ep->desc.bEndpointAddress);
2977} 2982}
2978 2983
2979static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2984static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
@@ -4024,7 +4029,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4024 return ret; 4029 return ret;
4025} 4030}
4026 4031
4027#ifdef CONFIG_PM_RUNTIME 4032#ifdef CONFIG_PM
4028 4033
4029/* BESL to HIRD Encoding array for USB2 LPM */ 4034/* BESL to HIRD Encoding array for USB2 LPM */
4030static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4035static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
@@ -4239,24 +4244,8 @@ int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4239 return 0; 4244 return 0;
4240} 4245}
4241 4246
4242#else
4243
4244int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4245 struct usb_device *udev, int enable)
4246{
4247 return 0;
4248}
4249
4250int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4251{
4252 return 0;
4253}
4254
4255#endif /* CONFIG_PM_RUNTIME */
4256
4257/*---------------------- USB 3.0 Link PM functions ------------------------*/ 4247/*---------------------- USB 3.0 Link PM functions ------------------------*/
4258 4248
4259#ifdef CONFIG_PM
4260/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4249/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4261static unsigned long long xhci_service_interval_to_ns( 4250static unsigned long long xhci_service_interval_to_ns(
4262 struct usb_endpoint_descriptor *desc) 4251 struct usb_endpoint_descriptor *desc)
@@ -4687,6 +4676,17 @@ int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4687} 4676}
4688#else /* CONFIG_PM */ 4677#else /* CONFIG_PM */
4689 4678
4679int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4680 struct usb_device *udev, int enable)
4681{
4682 return 0;
4683}
4684
4685int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4686{
4687 return 0;
4688}
4689
4690int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4690int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4691 struct usb_device *udev, enum usb3_link_state state) 4691 struct usb_device *udev, enum usb3_link_state state)
4692{ 4692{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index df76d642e719..d745715a1e2f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *)); 1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
1747 1747
1748#ifdef CONFIG_PM 1748#ifdef CONFIG_PM
1749int xhci_suspend(struct xhci_hcd *xhci); 1749int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated); 1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
1751#else 1751#else
1752#define xhci_suspend NULL 1752#define xhci_suspend NULL
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 7843ef7dd0ff..29be0e654ecc 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1761,7 +1761,7 @@ static int msm_otg_remove(struct platform_device *pdev)
1761 return 0; 1761 return 0;
1762} 1762}
1763 1763
1764#ifdef CONFIG_PM_RUNTIME 1764#ifdef CONFIG_PM
1765static int msm_otg_runtime_idle(struct device *dev) 1765static int msm_otg_runtime_idle(struct device *dev)
1766{ 1766{
1767 struct msm_otg *motg = dev_get_drvdata(dev); 1767 struct msm_otg *motg = dev_get_drvdata(dev);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cfd009dc4018..6c4eb3cf5efd 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
123 { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
123 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 124 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
124 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 125 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
125 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 126 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0dad8ce5a609..1ebb351b9e9a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, 470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, 471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
472 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, 472 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
473 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
474 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
475 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
476 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
477 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
478 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
479 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
480 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
481 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
482 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
483 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
484 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
485 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
486 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
487 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
488 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
489 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
490 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
491 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
492 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
493 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
494 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
495 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
496 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
497 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
498 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
499 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
500 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
501 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
502 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
503 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
504 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
505 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
473 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 506 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
474 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 507 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
475 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 508 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6786b705ccf6..e52409c9be99 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -926,8 +926,8 @@
926#define BAYER_CONTOUR_CABLE_PID 0x6001 926#define BAYER_CONTOUR_CABLE_PID 0x6001
927 927
928/* 928/*
929 * The following are the values for the Matrix Orbital FTDI Range 929 * Matrix Orbital Intelligent USB displays.
930 * Anything in this range will use an FT232RL. 930 * http://www.matrixorbital.com
931 */ 931 */
932#define MTXORB_VID 0x1B3D 932#define MTXORB_VID 0x1B3D
933#define MTXORB_FTDI_RANGE_0100_PID 0x0100 933#define MTXORB_FTDI_RANGE_0100_PID 0x0100
@@ -1186,8 +1186,39 @@
1186#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD 1186#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
1187#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE 1187#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
1188#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF 1188#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
1189 1189#define MTXORB_FTDI_RANGE_4701_PID 0x4701
1190 1190#define MTXORB_FTDI_RANGE_9300_PID 0x9300
1191#define MTXORB_FTDI_RANGE_9301_PID 0x9301
1192#define MTXORB_FTDI_RANGE_9302_PID 0x9302
1193#define MTXORB_FTDI_RANGE_9303_PID 0x9303
1194#define MTXORB_FTDI_RANGE_9304_PID 0x9304
1195#define MTXORB_FTDI_RANGE_9305_PID 0x9305
1196#define MTXORB_FTDI_RANGE_9306_PID 0x9306
1197#define MTXORB_FTDI_RANGE_9307_PID 0x9307
1198#define MTXORB_FTDI_RANGE_9308_PID 0x9308
1199#define MTXORB_FTDI_RANGE_9309_PID 0x9309
1200#define MTXORB_FTDI_RANGE_930A_PID 0x930A
1201#define MTXORB_FTDI_RANGE_930B_PID 0x930B
1202#define MTXORB_FTDI_RANGE_930C_PID 0x930C
1203#define MTXORB_FTDI_RANGE_930D_PID 0x930D
1204#define MTXORB_FTDI_RANGE_930E_PID 0x930E
1205#define MTXORB_FTDI_RANGE_930F_PID 0x930F
1206#define MTXORB_FTDI_RANGE_9310_PID 0x9310
1207#define MTXORB_FTDI_RANGE_9311_PID 0x9311
1208#define MTXORB_FTDI_RANGE_9312_PID 0x9312
1209#define MTXORB_FTDI_RANGE_9313_PID 0x9313
1210#define MTXORB_FTDI_RANGE_9314_PID 0x9314
1211#define MTXORB_FTDI_RANGE_9315_PID 0x9315
1212#define MTXORB_FTDI_RANGE_9316_PID 0x9316
1213#define MTXORB_FTDI_RANGE_9317_PID 0x9317
1214#define MTXORB_FTDI_RANGE_9318_PID 0x9318
1215#define MTXORB_FTDI_RANGE_9319_PID 0x9319
1216#define MTXORB_FTDI_RANGE_931A_PID 0x931A
1217#define MTXORB_FTDI_RANGE_931B_PID 0x931B
1218#define MTXORB_FTDI_RANGE_931C_PID 0x931C
1219#define MTXORB_FTDI_RANGE_931D_PID 0x931D
1220#define MTXORB_FTDI_RANGE_931E_PID 0x931E
1221#define MTXORB_FTDI_RANGE_931F_PID 0x931F
1191 1222
1192/* 1223/*
1193 * The Mobility Lab (TML) 1224 * The Mobility Lab (TML)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93cb7cebda62..077c714f1285 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb)
311 if ((data[0] & 0x80) == 0) { 311 if ((data[0] & 0x80) == 0) {
312 /* no errors on individual bytes, only 312 /* no errors on individual bytes, only
313 possible overrun err */ 313 possible overrun err */
314 if (data[0] & RXERROR_OVERRUN) 314 if (data[0] & RXERROR_OVERRUN) {
315 err = TTY_OVERRUN; 315 tty_insert_flip_char(&port->port, 0,
316 else 316 TTY_OVERRUN);
317 err = 0; 317 }
318 for (i = 1; i < urb->actual_length ; ++i) 318 for (i = 1; i < urb->actual_length ; ++i)
319 tty_insert_flip_char(&port->port, data[i], err); 319 tty_insert_flip_char(&port->port, data[i],
320 TTY_NORMAL);
320 } else { 321 } else {
321 /* some bytes had errors, every byte has status */ 322 /* some bytes had errors, every byte has status */
322 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 323 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
323 for (i = 0; i + 1 < urb->actual_length; i += 2) { 324 for (i = 0; i + 1 < urb->actual_length; i += 2) {
324 int stat = data[i], flag = 0; 325 int stat = data[i];
325 if (stat & RXERROR_OVERRUN) 326 int flag = TTY_NORMAL;
326 flag |= TTY_OVERRUN; 327
327 if (stat & RXERROR_FRAMING) 328 if (stat & RXERROR_OVERRUN) {
328 flag |= TTY_FRAME; 329 tty_insert_flip_char(&port->port, 0,
329 if (stat & RXERROR_PARITY) 330 TTY_OVERRUN);
330 flag |= TTY_PARITY; 331 }
331 /* XXX should handle break (0x10) */ 332 /* XXX should handle break (0x10) */
333 if (stat & RXERROR_PARITY)
334 flag = TTY_PARITY;
335 else if (stat & RXERROR_FRAMING)
336 flag = TTY_FRAME;
337
332 tty_insert_flip_char(&port->port, data[i+1], 338 tty_insert_flip_char(&port->port, data[i+1],
333 flag); 339 flag);
334 } 340 }
@@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb)
649 } else { 655 } else {
650 /* some bytes had errors, every byte has status */ 656 /* some bytes had errors, every byte has status */
651 for (i = 0; i + 1 < urb->actual_length; i += 2) { 657 for (i = 0; i + 1 < urb->actual_length; i += 2) {
652 int stat = data[i], flag = 0; 658 int stat = data[i];
653 if (stat & RXERROR_OVERRUN) 659 int flag = TTY_NORMAL;
654 flag |= TTY_OVERRUN; 660
655 if (stat & RXERROR_FRAMING) 661 if (stat & RXERROR_OVERRUN) {
656 flag |= TTY_FRAME; 662 tty_insert_flip_char(&port->port, 0,
657 if (stat & RXERROR_PARITY) 663 TTY_OVERRUN);
658 flag |= TTY_PARITY; 664 }
659 /* XXX should handle break (0x10) */ 665 /* XXX should handle break (0x10) */
666 if (stat & RXERROR_PARITY)
667 flag = TTY_PARITY;
668 else if (stat & RXERROR_FRAMING)
669 flag = TTY_FRAME;
670
660 tty_insert_flip_char(&port->port, data[i+1], 671 tty_insert_flip_char(&port->port, data[i+1],
661 flag); 672 flag);
662 } 673 }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
713 */ 724 */
714 for (x = 0; x + 1 < len && 725 for (x = 0; x + 1 < len &&
715 i + 1 < urb->actual_length; x += 2) { 726 i + 1 < urb->actual_length; x += 2) {
716 int stat = data[i], flag = 0; 727 int stat = data[i];
728 int flag = TTY_NORMAL;
717 729
718 if (stat & RXERROR_OVERRUN) 730 if (stat & RXERROR_OVERRUN) {
719 flag |= TTY_OVERRUN; 731 tty_insert_flip_char(&port->port, 0,
720 if (stat & RXERROR_FRAMING) 732 TTY_OVERRUN);
721 flag |= TTY_FRAME; 733 }
722 if (stat & RXERROR_PARITY)
723 flag |= TTY_PARITY;
724 /* XXX should handle break (0x10) */ 734 /* XXX should handle break (0x10) */
735 if (stat & RXERROR_PARITY)
736 flag = TTY_PARITY;
737 else if (stat & RXERROR_FRAMING)
738 flag = TTY_FRAME;
739
725 tty_insert_flip_char(&port->port, data[i+1], 740 tty_insert_flip_char(&port->port, data[i+1],
726 flag); 741 flag);
727 i += 2; 742 i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
773 if ((data[0] & 0x80) == 0) { 788 if ((data[0] & 0x80) == 0) {
774 /* no errors on individual bytes, only 789 /* no errors on individual bytes, only
775 possible overrun err*/ 790 possible overrun err*/
776 if (data[0] & RXERROR_OVERRUN) 791 if (data[0] & RXERROR_OVERRUN) {
777 err = TTY_OVERRUN; 792 tty_insert_flip_char(&port->port, 0,
778 else 793 TTY_OVERRUN);
779 err = 0; 794 }
780 for (i = 1; i < urb->actual_length ; ++i) 795 for (i = 1; i < urb->actual_length ; ++i)
781 tty_insert_flip_char(&port->port, 796 tty_insert_flip_char(&port->port,
782 data[i], err); 797 data[i], TTY_NORMAL);
783 } else { 798 } else {
784 /* some bytes had errors, every byte has status */ 799 /* some bytes had errors, every byte has status */
785 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 800 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
786 for (i = 0; i + 1 < urb->actual_length; i += 2) { 801 for (i = 0; i + 1 < urb->actual_length; i += 2) {
787 int stat = data[i], flag = 0; 802 int stat = data[i];
788 if (stat & RXERROR_OVERRUN) 803 int flag = TTY_NORMAL;
789 flag |= TTY_OVERRUN; 804
790 if (stat & RXERROR_FRAMING) 805 if (stat & RXERROR_OVERRUN) {
791 flag |= TTY_FRAME; 806 tty_insert_flip_char(
792 if (stat & RXERROR_PARITY) 807 &port->port, 0,
793 flag |= TTY_PARITY; 808 TTY_OVERRUN);
809 }
794 /* XXX should handle break (0x10) */ 810 /* XXX should handle break (0x10) */
811 if (stat & RXERROR_PARITY)
812 flag = TTY_PARITY;
813 else if (stat & RXERROR_FRAMING)
814 flag = TTY_FRAME;
815
795 tty_insert_flip_char(&port->port, 816 tty_insert_flip_char(&port->port,
796 data[i+1], flag); 817 data[i+1], flag);
797 } 818 }
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index a7fe664b6b7d..70a098de429f 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
490 if (*tty_flag == TTY_NORMAL) 490 if (*tty_flag == TTY_NORMAL)
491 *tty_flag = TTY_FRAME; 491 *tty_flag = TTY_FRAME;
492 } 492 }
493 if (lsr & UART_LSR_OE){ 493 if (lsr & UART_LSR_OE) {
494 port->icount.overrun++; 494 port->icount.overrun++;
495 if (*tty_flag == TTY_NORMAL) 495 tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
496 *tty_flag = TTY_OVERRUN;
497 } 496 }
498 } 497 }
499 498
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
511 if ((len >= 4) && 510 if ((len >= 4) &&
512 (packet[0] == 0x1b) && (packet[1] == 0x1b) && 511 (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
513 ((packet[2] == 0x00) || (packet[2] == 0x01))) { 512 ((packet[2] == 0x00) || (packet[2] == 0x01))) {
514 if (packet[2] == 0x00) { 513 if (packet[2] == 0x00)
515 ssu100_update_lsr(port, packet[3], &flag); 514 ssu100_update_lsr(port, packet[3], &flag);
516 if (flag == TTY_OVERRUN)
517 tty_insert_flip_char(&port->port, 0,
518 TTY_OVERRUN);
519 }
520 if (packet[2] == 0x01) 515 if (packet[2] == 0x01)
521 ssu100_update_msr(port, packet[3]); 516 ssu100_update_msr(port, packet[3]);
522 517
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index e08f64780e30..2d81e1d8ee30 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -164,10 +164,10 @@ void usb_stor_show_sense(const struct us_data *us,
164 unsigned char asc, 164 unsigned char asc,
165 unsigned char ascq) 165 unsigned char ascq)
166{ 166{
167 const char *what, *keystr; 167 const char *what, *keystr, *fmt;
168 168
169 keystr = scsi_sense_key_string(key); 169 keystr = scsi_sense_key_string(key);
170 what = scsi_extd_sense_format(asc, ascq); 170 what = scsi_extd_sense_format(asc, ascq, &fmt);
171 171
172 if (keystr == NULL) 172 if (keystr == NULL)
173 keystr = "(Unknown Key)"; 173 keystr = "(Unknown Key)";
@@ -175,8 +175,10 @@ void usb_stor_show_sense(const struct us_data *us,
175 what = "(unknown ASC/ASCQ)"; 175 what = "(unknown ASC/ASCQ)";
176 176
177 usb_stor_dbg(us, "%s: ", keystr); 177 usb_stor_dbg(us, "%s: ", keystr);
178 US_DEBUGPX(what, ascq); 178 if (fmt)
179 US_DEBUGPX("\n"); 179 US_DEBUGPX("%s (%s%x)\n", what, fmt, ascq);
180 else
181 US_DEBUGPX("%s\n", what);
180} 182}
181 183
182int usb_stor_dbg(const struct us_data *us, const char *fmt, ...) 184int usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
@@ -186,7 +188,7 @@ int usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
186 188
187 va_start(args, fmt); 189 va_start(args, fmt);
188 190
189 r = dev_vprintk_emit(7, &us->pusb_dev->dev, fmt, args); 191 r = dev_vprintk_emit(LOGLEVEL_DEBUG, &us->pusb_dev->dev, fmt, args);
190 192
191 va_end(args); 193 va_end(args);
192 194
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 89b24349269e..4047edfb64e1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -181,7 +181,7 @@ static int uas_get_tag(struct scsi_cmnd *cmnd)
181{ 181{
182 int tag; 182 int tag;
183 183
184 if (blk_rq_tagged(cmnd->request)) 184 if (cmnd->flags & SCMD_TAGGED)
185 tag = cmnd->request->tag + 2; 185 tag = cmnd->request->tag + 2;
186 else 186 else
187 tag = 1; 187 tag = 1;
@@ -799,8 +799,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
799 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 799 if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
800 sdev->no_report_opcodes = 1; 800 sdev->no_report_opcodes = 1;
801 801
802 scsi_set_tag_type(sdev, MSG_ORDERED_TAG); 802 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
803 scsi_activate_tcq(sdev, devinfo->qdepth - 2);
804 return 0; 803 return 0;
805} 804}
806 805
@@ -817,7 +816,6 @@ static struct scsi_host_template uas_host_template = {
817 .sg_tablesize = SG_NONE, 816 .sg_tablesize = SG_NONE,
818 .cmd_per_lun = 1, /* until we override it */ 817 .cmd_per_lun = 1, /* until we override it */
819 .skip_settle_delay = 1, 818 .skip_settle_delay = 1,
820 .ordered_tag = 1,
821 819
822 /* 820 /*
823 * The uas drivers expects tags not to be bigger than the maximum 821 * The uas drivers expects tags not to be bigger than the maximum
@@ -825,6 +823,7 @@ static struct scsi_host_template uas_host_template = {
825 * allocator. 823 * allocator.
826 */ 824 */
827 .disable_blk_mq = true, 825 .disable_blk_mq = true,
826 .use_blk_tags = 1,
828}; 827};
829 828
830#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ 829#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 2fefaf923e4a..18a283d6de1c 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
103 "VL711", 103 "VL711",
104 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 104 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
105 US_FL_NO_ATA_1X), 105 US_FL_NO_ATA_1X),
106
107/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
108UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
109 "Hitachi",
110 "External HDD",
111 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
112 US_FL_IGNORE_UAS),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 553212f037c3..e8d695b3f54e 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -560,7 +560,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
560 struct msi_msg msg; 560 struct msi_msg msg;
561 561
562 get_cached_msi_msg(irq, &msg); 562 get_cached_msi_msg(irq, &msg);
563 write_msi_msg(irq, &msg); 563 pci_write_msi_msg(irq, &msg);
564 } 564 }
565 565
566 ret = request_irq(irq, vfio_msihandler, 0, 566 ret = request_irq(irq, vfio_msihandler, 0,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8dae2f724a35..a935c254749e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -48,20 +48,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
48 * status internally; used for zerocopy tx only. 48 * status internally; used for zerocopy tx only.
49 */ 49 */
50/* Lower device DMA failed */ 50/* Lower device DMA failed */
51#define VHOST_DMA_FAILED_LEN 3 51#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
52/* Lower device DMA done */ 52/* Lower device DMA done */
53#define VHOST_DMA_DONE_LEN 2 53#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
54/* Lower device DMA in progress */ 54/* Lower device DMA in progress */
55#define VHOST_DMA_IN_PROGRESS 1 55#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
56/* Buffer unused */ 56/* Buffer unused */
57#define VHOST_DMA_CLEAR_LEN 0 57#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
58 58
59#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) 59#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
60 60
61enum { 61enum {
62 VHOST_NET_FEATURES = VHOST_FEATURES | 62 VHOST_NET_FEATURES = VHOST_FEATURES |
63 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | 63 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
64 (1ULL << VIRTIO_NET_F_MRG_RXBUF), 64 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
65 (1ULL << VIRTIO_F_VERSION_1),
65}; 66};
66 67
67enum { 68enum {
@@ -416,7 +417,7 @@ static void handle_tx(struct vhost_net *net)
416 struct ubuf_info *ubuf; 417 struct ubuf_info *ubuf;
417 ubuf = nvq->ubuf_info + nvq->upend_idx; 418 ubuf = nvq->ubuf_info + nvq->upend_idx;
418 419
419 vq->heads[nvq->upend_idx].id = head; 420 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
420 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; 421 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
421 ubuf->callback = vhost_zerocopy_callback; 422 ubuf->callback = vhost_zerocopy_callback;
422 ubuf->ctx = nvq->ubufs; 423 ubuf->ctx = nvq->ubufs;
@@ -500,6 +501,10 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
500 int headcount = 0; 501 int headcount = 0;
501 unsigned d; 502 unsigned d;
502 int r, nlogs = 0; 503 int r, nlogs = 0;
504 /* len is always initialized before use since we are always called with
505 * datalen > 0.
506 */
507 u32 uninitialized_var(len);
503 508
504 while (datalen > 0 && headcount < quota) { 509 while (datalen > 0 && headcount < quota) {
505 if (unlikely(seg >= UIO_MAXIOV)) { 510 if (unlikely(seg >= UIO_MAXIOV)) {
@@ -527,13 +532,14 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
527 nlogs += *log_num; 532 nlogs += *log_num;
528 log += *log_num; 533 log += *log_num;
529 } 534 }
530 heads[headcount].id = d; 535 heads[headcount].id = cpu_to_vhost32(vq, d);
531 heads[headcount].len = iov_length(vq->iov + seg, in); 536 len = iov_length(vq->iov + seg, in);
532 datalen -= heads[headcount].len; 537 heads[headcount].len = cpu_to_vhost32(vq, len);
538 datalen -= len;
533 ++headcount; 539 ++headcount;
534 seg += in; 540 seg += in;
535 } 541 }
536 heads[headcount - 1].len += datalen; 542 heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
537 *iovcount = seg; 543 *iovcount = seg;
538 if (unlikely(log)) 544 if (unlikely(log))
539 *log_num = nlogs; 545 *log_num = nlogs;
@@ -1025,7 +1031,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
1025 size_t vhost_hlen, sock_hlen, hdr_len; 1031 size_t vhost_hlen, sock_hlen, hdr_len;
1026 int i; 1032 int i;
1027 1033
1028 hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? 1034 hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1035 (1ULL << VIRTIO_F_VERSION_1))) ?
1029 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 1036 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1030 sizeof(struct virtio_net_hdr); 1037 sizeof(struct virtio_net_hdr);
1031 if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { 1038 if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a17f11850669..01c01cb3933f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -168,6 +168,7 @@ enum {
168 VHOST_SCSI_VQ_IO = 2, 168 VHOST_SCSI_VQ_IO = 2,
169}; 169};
170 170
171/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
171enum { 172enum {
172 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 173 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173 (1ULL << VIRTIO_SCSI_F_T10_PI) 174 (1ULL << VIRTIO_SCSI_F_T10_PI)
@@ -577,8 +578,8 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
577 return NULL; 578 return NULL;
578 } 579 }
579 580
580 evt->event.event = event; 581 evt->event.event = cpu_to_vhost32(vq, event);
581 evt->event.reason = reason; 582 evt->event.reason = cpu_to_vhost32(vq, reason);
582 vs->vs_events_nr++; 583 vs->vs_events_nr++;
583 584
584 return evt; 585 return evt;
@@ -636,7 +637,7 @@ again:
636 } 637 }
637 638
638 if (vs->vs_events_missed) { 639 if (vs->vs_events_missed) {
639 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; 640 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
640 vs->vs_events_missed = false; 641 vs->vs_events_missed = false;
641 } 642 }
642 643
@@ -695,12 +696,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
695 cmd, se_cmd->residual_count, se_cmd->scsi_status); 696 cmd, se_cmd->residual_count, se_cmd->scsi_status);
696 697
697 memset(&v_rsp, 0, sizeof(v_rsp)); 698 memset(&v_rsp, 0, sizeof(v_rsp));
698 v_rsp.resid = se_cmd->residual_count; 699 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
699 /* TODO is status_qualifier field needed? */ 700 /* TODO is status_qualifier field needed? */
700 v_rsp.status = se_cmd->scsi_status; 701 v_rsp.status = se_cmd->scsi_status;
701 v_rsp.sense_len = se_cmd->scsi_sense_length; 702 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
703 se_cmd->scsi_sense_length);
702 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 704 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
703 v_rsp.sense_len); 705 se_cmd->scsi_sense_length);
704 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 706 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
705 if (likely(ret == 0)) { 707 if (likely(ret == 0)) {
706 struct vhost_scsi_virtqueue *q; 708 struct vhost_scsi_virtqueue *q;
@@ -1095,14 +1097,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1095 ", but wrong data_direction\n"); 1097 ", but wrong data_direction\n");
1096 goto err_cmd; 1098 goto err_cmd;
1097 } 1099 }
1098 prot_bytes = v_req_pi.pi_bytesout; 1100 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1099 } else if (v_req_pi.pi_bytesin) { 1101 } else if (v_req_pi.pi_bytesin) {
1100 if (data_direction != DMA_FROM_DEVICE) { 1102 if (data_direction != DMA_FROM_DEVICE) {
1101 vq_err(vq, "Received non zero di_pi_niov" 1103 vq_err(vq, "Received non zero di_pi_niov"
1102 ", but wrong data_direction\n"); 1104 ", but wrong data_direction\n");
1103 goto err_cmd; 1105 goto err_cmd;
1104 } 1106 }
1105 prot_bytes = v_req_pi.pi_bytesin; 1107 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1106 } 1108 }
1107 if (prot_bytes) { 1109 if (prot_bytes) {
1108 int tmp = 0; 1110 int tmp = 0;
@@ -1117,12 +1119,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1117 data_first += prot_niov; 1119 data_first += prot_niov;
1118 data_niov = data_num - prot_niov; 1120 data_niov = data_num - prot_niov;
1119 } 1121 }
1120 tag = v_req_pi.tag; 1122 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1121 task_attr = v_req_pi.task_attr; 1123 task_attr = v_req_pi.task_attr;
1122 cdb = &v_req_pi.cdb[0]; 1124 cdb = &v_req_pi.cdb[0];
1123 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; 1125 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1124 } else { 1126 } else {
1125 tag = v_req.tag; 1127 tag = vhost64_to_cpu(vq, v_req.tag);
1126 task_attr = v_req.task_attr; 1128 task_attr = v_req.task_attr;
1127 cdb = &v_req.cdb[0]; 1129 cdb = &v_req.cdb[0];
1128 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1130 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c90f4374442a..ed71b5347a76 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -33,8 +33,8 @@ enum {
33 VHOST_MEMORY_F_LOG = 0x1, 33 VHOST_MEMORY_F_LOG = 0x1,
34}; 34};
35 35
36#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) 36#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
37#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) 37#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
38 38
39static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 39static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
40 poll_table *pt) 40 poll_table *pt)
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
1001static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1001static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1002{ 1002{
1003 void __user *used; 1003 void __user *used;
1004 if (__put_user(vq->used_flags, &vq->used->flags) < 0) 1004 if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
1005 return -EFAULT; 1005 return -EFAULT;
1006 if (unlikely(vq->log_used)) { 1006 if (unlikely(vq->log_used)) {
1007 /* Make sure the flag is seen before log. */ 1007 /* Make sure the flag is seen before log. */
@@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1019 1019
1020static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) 1020static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1021{ 1021{
1022 if (__put_user(vq->avail_idx, vhost_avail_event(vq))) 1022 if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
1023 return -EFAULT; 1023 return -EFAULT;
1024 if (unlikely(vq->log_used)) { 1024 if (unlikely(vq->log_used)) {
1025 void __user *used; 1025 void __user *used;
@@ -1038,6 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1038 1038
1039int vhost_init_used(struct vhost_virtqueue *vq) 1039int vhost_init_used(struct vhost_virtqueue *vq)
1040{ 1040{
1041 __virtio16 last_used_idx;
1041 int r; 1042 int r;
1042 if (!vq->private_data) 1043 if (!vq->private_data)
1043 return 0; 1044 return 0;
@@ -1046,7 +1047,13 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1046 if (r) 1047 if (r)
1047 return r; 1048 return r;
1048 vq->signalled_used_valid = false; 1049 vq->signalled_used_valid = false;
1049 return get_user(vq->last_used_idx, &vq->used->idx); 1050 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
1051 return -EFAULT;
1052 r = __get_user(last_used_idx, &vq->used->idx);
1053 if (r)
1054 return r;
1055 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1056 return 0;
1050} 1057}
1051EXPORT_SYMBOL_GPL(vhost_init_used); 1058EXPORT_SYMBOL_GPL(vhost_init_used);
1052 1059
@@ -1087,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1087/* Each buffer in the virtqueues is actually a chain of descriptors. This 1094/* Each buffer in the virtqueues is actually a chain of descriptors. This
1088 * function returns the next descriptor in the chain, 1095 * function returns the next descriptor in the chain,
1089 * or -1U if we're at the end. */ 1096 * or -1U if we're at the end. */
1090static unsigned next_desc(struct vring_desc *desc) 1097static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1091{ 1098{
1092 unsigned int next; 1099 unsigned int next;
1093 1100
1094 /* If this descriptor says it doesn't chain, we're done. */ 1101 /* If this descriptor says it doesn't chain, we're done. */
1095 if (!(desc->flags & VRING_DESC_F_NEXT)) 1102 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1096 return -1U; 1103 return -1U;
1097 1104
1098 /* Check they're not leading us off end of descriptors. */ 1105 /* Check they're not leading us off end of descriptors. */
1099 next = desc->next; 1106 next = vhost16_to_cpu(vq, desc->next);
1100 /* Make sure compiler knows to grab that: we don't want it changing! */ 1107 /* Make sure compiler knows to grab that: we don't want it changing! */
1101 /* We will use the result as an index in an array, so most 1108 /* We will use the result as an index in an array, so most
1102 * architectures only need a compiler barrier here. */ 1109 * architectures only need a compiler barrier here. */
@@ -1113,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq,
1113{ 1120{
1114 struct vring_desc desc; 1121 struct vring_desc desc;
1115 unsigned int i = 0, count, found = 0; 1122 unsigned int i = 0, count, found = 0;
1123 u32 len = vhost32_to_cpu(vq, indirect->len);
1116 int ret; 1124 int ret;
1117 1125
1118 /* Sanity check */ 1126 /* Sanity check */
1119 if (unlikely(indirect->len % sizeof desc)) { 1127 if (unlikely(len % sizeof desc)) {
1120 vq_err(vq, "Invalid length in indirect descriptor: " 1128 vq_err(vq, "Invalid length in indirect descriptor: "
1121 "len 0x%llx not multiple of 0x%zx\n", 1129 "len 0x%llx not multiple of 0x%zx\n",
1122 (unsigned long long)indirect->len, 1130 (unsigned long long)len,
1123 sizeof desc); 1131 sizeof desc);
1124 return -EINVAL; 1132 return -EINVAL;
1125 } 1133 }
1126 1134
1127 ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect, 1135 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1128 UIO_MAXIOV); 1136 UIO_MAXIOV);
1129 if (unlikely(ret < 0)) { 1137 if (unlikely(ret < 0)) {
1130 vq_err(vq, "Translation failure %d in indirect.\n", ret); 1138 vq_err(vq, "Translation failure %d in indirect.\n", ret);
@@ -1135,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
1135 * architectures only need a compiler barrier here. */ 1143 * architectures only need a compiler barrier here. */
1136 read_barrier_depends(); 1144 read_barrier_depends();
1137 1145
1138 count = indirect->len / sizeof desc; 1146 count = len / sizeof desc;
1139 /* Buffers are chained via a 16 bit next field, so 1147 /* Buffers are chained via a 16 bit next field, so
1140 * we can have at most 2^16 of these. */ 1148 * we can have at most 2^16 of these. */
1141 if (unlikely(count > USHRT_MAX + 1)) { 1149 if (unlikely(count > USHRT_MAX + 1)) {
@@ -1155,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq,
1155 if (unlikely(memcpy_fromiovec((unsigned char *)&desc, 1163 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1156 vq->indirect, sizeof desc))) { 1164 vq->indirect, sizeof desc))) {
1157 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1165 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1158 i, (size_t)indirect->addr + i * sizeof desc); 1166 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1159 return -EINVAL; 1167 return -EINVAL;
1160 } 1168 }
1161 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { 1169 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1162 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 1170 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1163 i, (size_t)indirect->addr + i * sizeof desc); 1171 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1164 return -EINVAL; 1172 return -EINVAL;
1165 } 1173 }
1166 1174
1167 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, 1175 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1176 vhost32_to_cpu(vq, desc.len), iov + iov_count,
1168 iov_size - iov_count); 1177 iov_size - iov_count);
1169 if (unlikely(ret < 0)) { 1178 if (unlikely(ret < 0)) {
1170 vq_err(vq, "Translation failure %d indirect idx %d\n", 1179 vq_err(vq, "Translation failure %d indirect idx %d\n",
@@ -1172,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq,
1172 return ret; 1181 return ret;
1173 } 1182 }
1174 /* If this is an input descriptor, increment that count. */ 1183 /* If this is an input descriptor, increment that count. */
1175 if (desc.flags & VRING_DESC_F_WRITE) { 1184 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1176 *in_num += ret; 1185 *in_num += ret;
1177 if (unlikely(log)) { 1186 if (unlikely(log)) {
1178 log[*log_num].addr = desc.addr; 1187 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1179 log[*log_num].len = desc.len; 1188 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1180 ++*log_num; 1189 ++*log_num;
1181 } 1190 }
1182 } else { 1191 } else {
@@ -1189,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
1189 } 1198 }
1190 *out_num += ret; 1199 *out_num += ret;
1191 } 1200 }
1192 } while ((i = next_desc(&desc)) != -1); 1201 } while ((i = next_desc(vq, &desc)) != -1);
1193 return 0; 1202 return 0;
1194} 1203}
1195 1204
@@ -1209,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1209 struct vring_desc desc; 1218 struct vring_desc desc;
1210 unsigned int i, head, found = 0; 1219 unsigned int i, head, found = 0;
1211 u16 last_avail_idx; 1220 u16 last_avail_idx;
1221 __virtio16 avail_idx;
1222 __virtio16 ring_head;
1212 int ret; 1223 int ret;
1213 1224
1214 /* Check it isn't doing very strange things with descriptor numbers. */ 1225 /* Check it isn't doing very strange things with descriptor numbers. */
1215 last_avail_idx = vq->last_avail_idx; 1226 last_avail_idx = vq->last_avail_idx;
1216 if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { 1227 if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
1217 vq_err(vq, "Failed to access avail idx at %p\n", 1228 vq_err(vq, "Failed to access avail idx at %p\n",
1218 &vq->avail->idx); 1229 &vq->avail->idx);
1219 return -EFAULT; 1230 return -EFAULT;
1220 } 1231 }
1232 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
1221 1233
1222 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { 1234 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1223 vq_err(vq, "Guest moved used index from %u to %u", 1235 vq_err(vq, "Guest moved used index from %u to %u",
@@ -1234,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1234 1246
1235 /* Grab the next descriptor number they're advertising, and increment 1247 /* Grab the next descriptor number they're advertising, and increment
1236 * the index we've seen. */ 1248 * the index we've seen. */
1237 if (unlikely(__get_user(head, 1249 if (unlikely(__get_user(ring_head,
1238 &vq->avail->ring[last_avail_idx % vq->num]))) { 1250 &vq->avail->ring[last_avail_idx % vq->num]))) {
1239 vq_err(vq, "Failed to read head: idx %d address %p\n", 1251 vq_err(vq, "Failed to read head: idx %d address %p\n",
1240 last_avail_idx, 1252 last_avail_idx,
@@ -1242,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1242 return -EFAULT; 1254 return -EFAULT;
1243 } 1255 }
1244 1256
1257 head = vhost16_to_cpu(vq, ring_head);
1258
1245 /* If their number is silly, that's an error. */ 1259 /* If their number is silly, that's an error. */
1246 if (unlikely(head >= vq->num)) { 1260 if (unlikely(head >= vq->num)) {
1247 vq_err(vq, "Guest says index %u > %u is available", 1261 vq_err(vq, "Guest says index %u > %u is available",
@@ -1274,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1274 i, vq->desc + i); 1288 i, vq->desc + i);
1275 return -EFAULT; 1289 return -EFAULT;
1276 } 1290 }
1277 if (desc.flags & VRING_DESC_F_INDIRECT) { 1291 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
1278 ret = get_indirect(vq, iov, iov_size, 1292 ret = get_indirect(vq, iov, iov_size,
1279 out_num, in_num, 1293 out_num, in_num,
1280 log, log_num, &desc); 1294 log, log_num, &desc);
@@ -1286,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1286 continue; 1300 continue;
1287 } 1301 }
1288 1302
1289 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, 1303 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1304 vhost32_to_cpu(vq, desc.len), iov + iov_count,
1290 iov_size - iov_count); 1305 iov_size - iov_count);
1291 if (unlikely(ret < 0)) { 1306 if (unlikely(ret < 0)) {
1292 vq_err(vq, "Translation failure %d descriptor idx %d\n", 1307 vq_err(vq, "Translation failure %d descriptor idx %d\n",
1293 ret, i); 1308 ret, i);
1294 return ret; 1309 return ret;
1295 } 1310 }
1296 if (desc.flags & VRING_DESC_F_WRITE) { 1311 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1297 /* If this is an input descriptor, 1312 /* If this is an input descriptor,
1298 * increment that count. */ 1313 * increment that count. */
1299 *in_num += ret; 1314 *in_num += ret;
1300 if (unlikely(log)) { 1315 if (unlikely(log)) {
1301 log[*log_num].addr = desc.addr; 1316 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1302 log[*log_num].len = desc.len; 1317 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1303 ++*log_num; 1318 ++*log_num;
1304 } 1319 }
1305 } else { 1320 } else {
@@ -1312,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1312 } 1327 }
1313 *out_num += ret; 1328 *out_num += ret;
1314 } 1329 }
1315 } while ((i = next_desc(&desc)) != -1); 1330 } while ((i = next_desc(vq, &desc)) != -1);
1316 1331
1317 /* On success, increment avail index. */ 1332 /* On success, increment avail index. */
1318 vq->last_avail_idx++; 1333 vq->last_avail_idx++;
@@ -1335,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1335 * want to notify the guest, using eventfd. */ 1350 * want to notify the guest, using eventfd. */
1336int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 1351int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1337{ 1352{
1338 struct vring_used_elem heads = { head, len }; 1353 struct vring_used_elem heads = {
1354 cpu_to_vhost32(vq, head),
1355 cpu_to_vhost32(vq, len)
1356 };
1339 1357
1340 return vhost_add_used_n(vq, &heads, 1); 1358 return vhost_add_used_n(vq, &heads, 1);
1341} 1359}
@@ -1404,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1404 1422
1405 /* Make sure buffer is written before we update index. */ 1423 /* Make sure buffer is written before we update index. */
1406 smp_wmb(); 1424 smp_wmb();
1407 if (put_user(vq->last_used_idx, &vq->used->idx)) { 1425 if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
1408 vq_err(vq, "Failed to increment used idx"); 1426 vq_err(vq, "Failed to increment used idx");
1409 return -EFAULT; 1427 return -EFAULT;
1410 } 1428 }
@@ -1422,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n);
1422 1440
1423static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1441static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1424{ 1442{
1425 __u16 old, new, event; 1443 __u16 old, new;
1444 __virtio16 event;
1426 bool v; 1445 bool v;
1427 /* Flush out used index updates. This is paired 1446 /* Flush out used index updates. This is paired
1428 * with the barrier that the Guest executes when enabling 1447 * with the barrier that the Guest executes when enabling
@@ -1434,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1434 return true; 1453 return true;
1435 1454
1436 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 1455 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1437 __u16 flags; 1456 __virtio16 flags;
1438 if (__get_user(flags, &vq->avail->flags)) { 1457 if (__get_user(flags, &vq->avail->flags)) {
1439 vq_err(vq, "Failed to get flags"); 1458 vq_err(vq, "Failed to get flags");
1440 return true; 1459 return true;
1441 } 1460 }
1442 return !(flags & VRING_AVAIL_F_NO_INTERRUPT); 1461 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
1443 } 1462 }
1444 old = vq->signalled_used; 1463 old = vq->signalled_used;
1445 v = vq->signalled_used_valid; 1464 v = vq->signalled_used_valid;
@@ -1449,11 +1468,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1449 if (unlikely(!v)) 1468 if (unlikely(!v))
1450 return true; 1469 return true;
1451 1470
1452 if (get_user(event, vhost_used_event(vq))) { 1471 if (__get_user(event, vhost_used_event(vq))) {
1453 vq_err(vq, "Failed to get used event idx"); 1472 vq_err(vq, "Failed to get used event idx");
1454 return true; 1473 return true;
1455 } 1474 }
1456 return vring_need_event(event, new, old); 1475 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
1457} 1476}
1458 1477
1459/* This actually signals the guest, using eventfd. */ 1478/* This actually signals the guest, using eventfd. */
@@ -1488,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1488/* OK, now we need to know about added descriptors. */ 1507/* OK, now we need to know about added descriptors. */
1489bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1508bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1490{ 1509{
1491 u16 avail_idx; 1510 __virtio16 avail_idx;
1492 int r; 1511 int r;
1493 1512
1494 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1513 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -1519,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1519 return false; 1538 return false;
1520 } 1539 }
1521 1540
1522 return avail_idx != vq->avail_idx; 1541 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
1523} 1542}
1524EXPORT_SYMBOL_GPL(vhost_enable_notify); 1543EXPORT_SYMBOL_GPL(vhost_enable_notify);
1525 1544
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 3eda654b8f5a..8c1c792900ba 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,8 +12,6 @@
12#include <linux/virtio_ring.h> 12#include <linux/virtio_ring.h>
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14 14
15struct vhost_device;
16
17struct vhost_work; 15struct vhost_work;
18typedef void (*vhost_work_fn_t)(struct vhost_work *work); 16typedef void (*vhost_work_fn_t)(struct vhost_work *work);
19 17
@@ -54,8 +52,6 @@ struct vhost_log {
54 u64 len; 52 u64 len;
55}; 53};
56 54
57struct vhost_virtqueue;
58
59/* The virtqueue structure describes a queue attached to a device. */ 55/* The virtqueue structure describes a queue attached to a device. */
60struct vhost_virtqueue { 56struct vhost_virtqueue {
61 struct vhost_dev *dev; 57 struct vhost_dev *dev;
@@ -106,7 +102,7 @@ struct vhost_virtqueue {
106 /* Protected by virtqueue mutex. */ 102 /* Protected by virtqueue mutex. */
107 struct vhost_memory *memory; 103 struct vhost_memory *memory;
108 void *private_data; 104 void *private_data;
109 unsigned acked_features; 105 u64 acked_features;
110 /* Log write descriptors */ 106 /* Log write descriptors */
111 void __user *log_base; 107 void __user *log_base;
112 struct vhost_log *log; 108 struct vhost_log *log;
@@ -172,8 +168,39 @@ enum {
172 (1ULL << VHOST_F_LOG_ALL), 168 (1ULL << VHOST_F_LOG_ALL),
173}; 169};
174 170
175static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit) 171static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
172{
173 return vq->acked_features & (1ULL << bit);
174}
175
176/* Memory accessors */
177static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
178{
179 return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
180}
181
182static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
183{
184 return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
185}
186
187static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
188{
189 return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
190}
191
192static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
193{
194 return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
195}
196
197static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
198{
199 return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
200}
201
202static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
176{ 203{
177 return vq->acked_features & (1 << bit); 204 return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
178} 205}
179#endif 206#endif
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 8d03924749b8..efb09046a8cf 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -168,7 +168,6 @@ if BACKLIGHT_CLASS_DEVICE
168config BACKLIGHT_ATMEL_LCDC 168config BACKLIGHT_ATMEL_LCDC
169 bool "Atmel LCDC Contrast-as-Backlight control" 169 bool "Atmel LCDC Contrast-as-Backlight control"
170 depends on FB_ATMEL 170 depends on FB_ATMEL
171 default y if MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK || MACH_AT91SAM9263EK
172 help 171 help
173 This provides a backlight control internal to the Atmel LCDC 172 This provides a backlight control internal to the Atmel LCDC
174 driver. If the LCD "contrast control" on your board is wired 173 driver. If the LCD "contrast control" on your board is wired
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 25fb8e3d75b1..a26d3bb25650 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -17,6 +17,7 @@
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/platform_data/lp855x.h> 18#include <linux/platform_data/lp855x.h>
19#include <linux/pwm.h> 19#include <linux/pwm.h>
20#include <linux/regulator/consumer.h>
20 21
21/* LP8550/1/2/3/6 Registers */ 22/* LP8550/1/2/3/6 Registers */
22#define LP855X_BRIGHTNESS_CTRL 0x00 23#define LP855X_BRIGHTNESS_CTRL 0x00
@@ -341,8 +342,10 @@ static const struct attribute_group lp855x_attr_group = {
341}; 342};
342 343
343#ifdef CONFIG_OF 344#ifdef CONFIG_OF
344static int lp855x_parse_dt(struct device *dev, struct device_node *node) 345static int lp855x_parse_dt(struct lp855x *lp)
345{ 346{
347 struct device *dev = lp->dev;
348 struct device_node *node = dev->of_node;
346 struct lp855x_platform_data *pdata; 349 struct lp855x_platform_data *pdata;
347 int rom_length; 350 int rom_length;
348 351
@@ -381,12 +384,19 @@ static int lp855x_parse_dt(struct device *dev, struct device_node *node)
381 pdata->rom_data = &rom[0]; 384 pdata->rom_data = &rom[0];
382 } 385 }
383 386
384 dev->platform_data = pdata; 387 pdata->supply = devm_regulator_get(dev, "power");
388 if (IS_ERR(pdata->supply)) {
389 if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
390 return -EPROBE_DEFER;
391 pdata->supply = NULL;
392 }
393
394 lp->pdata = pdata;
385 395
386 return 0; 396 return 0;
387} 397}
388#else 398#else
389static int lp855x_parse_dt(struct device *dev, struct device_node *node) 399static int lp855x_parse_dt(struct lp855x *lp)
390{ 400{
391 return -EINVAL; 401 return -EINVAL;
392} 402}
@@ -395,18 +405,8 @@ static int lp855x_parse_dt(struct device *dev, struct device_node *node)
395static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) 405static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
396{ 406{
397 struct lp855x *lp; 407 struct lp855x *lp;
398 struct lp855x_platform_data *pdata = dev_get_platdata(&cl->dev);
399 struct device_node *node = cl->dev.of_node;
400 int ret; 408 int ret;
401 409
402 if (!pdata) {
403 ret = lp855x_parse_dt(&cl->dev, node);
404 if (ret < 0)
405 return ret;
406
407 pdata = dev_get_platdata(&cl->dev);
408 }
409
410 if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) 410 if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
411 return -EIO; 411 return -EIO;
412 412
@@ -414,16 +414,31 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
414 if (!lp) 414 if (!lp)
415 return -ENOMEM; 415 return -ENOMEM;
416 416
417 if (pdata->period_ns > 0)
418 lp->mode = PWM_BASED;
419 else
420 lp->mode = REGISTER_BASED;
421
422 lp->client = cl; 417 lp->client = cl;
423 lp->dev = &cl->dev; 418 lp->dev = &cl->dev;
424 lp->pdata = pdata;
425 lp->chipname = id->name; 419 lp->chipname = id->name;
426 lp->chip_id = id->driver_data; 420 lp->chip_id = id->driver_data;
421 lp->pdata = dev_get_platdata(&cl->dev);
422
423 if (!lp->pdata) {
424 ret = lp855x_parse_dt(lp);
425 if (ret < 0)
426 return ret;
427 }
428
429 if (lp->pdata->period_ns > 0)
430 lp->mode = PWM_BASED;
431 else
432 lp->mode = REGISTER_BASED;
433
434 if (lp->pdata->supply) {
435 ret = regulator_enable(lp->pdata->supply);
436 if (ret < 0) {
437 dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
438 return ret;
439 }
440 }
441
427 i2c_set_clientdata(cl, lp); 442 i2c_set_clientdata(cl, lp);
428 443
429 ret = lp855x_configure(lp); 444 ret = lp855x_configure(lp);
@@ -455,6 +470,8 @@ static int lp855x_remove(struct i2c_client *cl)
455 470
456 lp->bl->props.brightness = 0; 471 lp->bl->props.brightness = 0;
457 backlight_update_status(lp->bl); 472 backlight_update_status(lp->bl);
473 if (lp->pdata->supply)
474 regulator_disable(lp->pdata->supply);
458 sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); 475 sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
459 476
460 return 0; 477 return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index cb5ae4c08469..3a145a643e0d 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -34,6 +34,7 @@ struct pwm_bl_data {
34 struct regulator *power_supply; 34 struct regulator *power_supply;
35 struct gpio_desc *enable_gpio; 35 struct gpio_desc *enable_gpio;
36 unsigned int scale; 36 unsigned int scale;
37 bool legacy;
37 int (*notify)(struct device *, 38 int (*notify)(struct device *,
38 int brightness); 39 int brightness);
39 void (*notify_after)(struct device *, 40 void (*notify_after)(struct device *,
@@ -274,7 +275,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
274 pb->pwm = devm_pwm_get(&pdev->dev, NULL); 275 pb->pwm = devm_pwm_get(&pdev->dev, NULL);
275 if (IS_ERR(pb->pwm)) { 276 if (IS_ERR(pb->pwm)) {
276 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); 277 dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
277 278 pb->legacy = true;
278 pb->pwm = pwm_request(data->pwm_id, "pwm-backlight"); 279 pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
279 if (IS_ERR(pb->pwm)) { 280 if (IS_ERR(pb->pwm)) {
280 dev_err(&pdev->dev, "unable to request legacy PWM\n"); 281 dev_err(&pdev->dev, "unable to request legacy PWM\n");
@@ -339,6 +340,8 @@ static int pwm_backlight_remove(struct platform_device *pdev)
339 340
340 if (pb->exit) 341 if (pb->exit)
341 pb->exit(&pdev->dev); 342 pb->exit(&pdev->dev);
343 if (pb->legacy)
344 pwm_free(pb->pwm);
342 345
343 return 0; 346 return 0;
344} 347}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index eb976ee3a02f..ea437245562e 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3624,7 +3624,7 @@ static int __init fb_console_init(void)
3624 return 0; 3624 return 0;
3625} 3625}
3626 3626
3627module_init(fb_console_init); 3627fs_initcall(fb_console_init);
3628 3628
3629#ifdef MODULE 3629#ifdef MODULE
3630 3630
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index c7bf606a8706..4916c97216f8 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -999,23 +999,6 @@ config FB_ATMEL
999 help 999 help
1000 This enables support for the AT91/AT32 LCD Controller. 1000 This enables support for the AT91/AT32 LCD Controller.
1001 1001
1002config FB_INTSRAM
1003 bool "Frame Buffer in internal SRAM"
1004 depends on FB_ATMEL && ARCH_AT91SAM9261
1005 help
1006 Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want
1007 to let frame buffer in external SDRAM.
1008
1009config FB_ATMEL_STN
1010 bool "Use a STN display with AT91/AT32 LCD Controller"
1011 depends on FB_ATMEL && (MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK)
1012 default n
1013 help
1014 Say Y if you want to connect a STN LCD display to the AT91/AT32 LCD
1015 Controller. Say N if you want to connect a TFT.
1016
1017 If unsure, say N.
1018
1019config FB_NVIDIA 1002config FB_NVIDIA
1020 tristate "nVidia Framebuffer Support" 1003 tristate "nVidia Framebuffer Support"
1021 depends on FB && PCI 1004 depends on FB && PCI
@@ -2425,7 +2408,7 @@ config FB_JZ4740
2425 2408
2426config FB_MXS 2409config FB_MXS
2427 tristate "MXS LCD framebuffer support" 2410 tristate "MXS LCD framebuffer support"
2428 depends on FB && ARCH_MXS 2411 depends on FB && (ARCH_MXS || ARCH_MXC)
2429 select FB_CFB_FILLRECT 2412 select FB_CFB_FILLRECT
2430 select FB_CFB_COPYAREA 2413 select FB_CFB_COPYAREA
2431 select FB_CFB_IMAGEBLIT 2414 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 6ad23bd3523a..32c0b6b28097 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -27,7 +27,6 @@
27#include <linux/bitops.h> 27#include <linux/bitops.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/hardirq.h> 29#include <linux/hardirq.h>
30#include <linux/dma-mapping.h>
31#include <linux/of.h> 30#include <linux/of.h>
32#include <linux/of_address.h> 31#include <linux/of_address.h>
33#include <linux/of_graph.h> 32#include <linux/of_graph.h>
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index adc4ea2cc5a0..b305a1e7cc76 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -1016,7 +1016,7 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1016 1016
1017 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); 1017 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
1018 1018
1019 par->state.vgabase = (void __iomem *) vga_res.start; 1019 par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
1020 1020
1021 /* FIXME get memsize */ 1021 /* FIXME get memsize */
1022 regval = vga_rseq(par->state.vgabase, 0x10); 1022 regval = vga_rseq(par->state.vgabase, 0x10);
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 4aa56ba78f32..6d9ef39810c8 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -54,7 +54,8 @@ struct gbefb_par {
54#endif 54#endif
55#endif 55#endif
56#ifdef CONFIG_X86 56#ifdef CONFIG_X86
57#define pgprot_fb(_prot) ((_prot) | _PAGE_PCD) 57#define pgprot_fb(_prot) (((_prot) & ~_PAGE_CACHE_MASK) | \
58 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))
58#endif 59#endif
59 60
60/* 61/*
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index b563b920f159..a0f496049db7 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -223,10 +223,10 @@ struct mmp_path *mmp_register_path(struct mmp_path_info *info)
223EXPORT_SYMBOL_GPL(mmp_register_path); 223EXPORT_SYMBOL_GPL(mmp_register_path);
224 224
225/* 225/*
226 * mmp_unregister_path - unregister and destory path 226 * mmp_unregister_path - unregister and destroy path
227 * @p: path to be destoried. 227 * @p: path to be destroyed.
228 * 228 *
229 * this function registers path and destorys it. 229 * this function registers path and destroys it.
230 */ 230 */
231void mmp_unregister_path(struct mmp_path *path) 231void mmp_unregister_path(struct mmp_path *path)
232{ 232{
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 8621a9f2bdcc..3c12bd83b561 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -441,8 +441,7 @@ static void path_deinit(struct mmphw_path_plat *path_plat)
441 if (!path_plat) 441 if (!path_plat)
442 return; 442 return;
443 443
444 if (path_plat->path) 444 mmp_unregister_path(path_plat->path);
445 mmp_unregister_path(path_plat->path);
446} 445}
447 446
448static int mmphw_probe(struct platform_device *pdev) 447static int mmphw_probe(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 23ec781e9a61..f23fca0be9d7 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -334,8 +334,7 @@ static void mx3fb_init_backlight(struct mx3fb_data *fbd)
334 334
335static void mx3fb_exit_backlight(struct mx3fb_data *fbd) 335static void mx3fb_exit_backlight(struct mx3fb_data *fbd)
336{ 336{
337 if (fbd->bl) 337 backlight_device_unregister(fbd->bl);
338 backlight_device_unregister(fbd->bl);
339} 338}
340 339
341static void mx3fb_dma_done(void *); 340static void mx3fb_dma_done(void *);
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index accf48a2cce4..f8ac4a452f26 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -172,6 +172,8 @@ struct mxsfb_info {
172 struct fb_info fb_info; 172 struct fb_info fb_info;
173 struct platform_device *pdev; 173 struct platform_device *pdev;
174 struct clk *clk; 174 struct clk *clk;
175 struct clk *clk_axi;
176 struct clk *clk_disp_axi;
175 void __iomem *base; /* registers */ 177 void __iomem *base; /* registers */
176 unsigned allocated_size; 178 unsigned allocated_size;
177 int enabled; 179 int enabled;
@@ -331,6 +333,11 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
331 } 333 }
332 } 334 }
333 335
336 if (host->clk_axi)
337 clk_prepare_enable(host->clk_axi);
338
339 if (host->clk_disp_axi)
340 clk_prepare_enable(host->clk_disp_axi);
334 clk_prepare_enable(host->clk); 341 clk_prepare_enable(host->clk);
335 clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U); 342 clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
336 343
@@ -374,6 +381,10 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
374 writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4); 381 writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
375 382
376 clk_disable_unprepare(host->clk); 383 clk_disable_unprepare(host->clk);
384 if (host->clk_disp_axi)
385 clk_disable_unprepare(host->clk_disp_axi);
386 if (host->clk_axi)
387 clk_disable_unprepare(host->clk_axi);
377 388
378 host->enabled = 0; 389 host->enabled = 0;
379 390
@@ -867,6 +878,14 @@ static int mxsfb_probe(struct platform_device *pdev)
867 goto fb_release; 878 goto fb_release;
868 } 879 }
869 880
881 host->clk_axi = devm_clk_get(&host->pdev->dev, "axi");
882 if (IS_ERR(host->clk_axi))
883 host->clk_axi = NULL;
884
885 host->clk_disp_axi = devm_clk_get(&host->pdev->dev, "disp_axi");
886 if (IS_ERR(host->clk_disp_axi))
887 host->clk_disp_axi = NULL;
888
870 host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd"); 889 host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd");
871 if (IS_ERR(host->reg_lcd)) 890 if (IS_ERR(host->reg_lcd))
872 host->reg_lcd = NULL; 891 host->reg_lcd = NULL;
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 2dfb6e5ff0cc..3d38e478bc64 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -262,8 +262,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
262 262
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 if (ddata->i2c_adapter) 265 i2c_put_adapter(ddata->i2c_adapter);
266 i2c_put_adapter(ddata->i2c_adapter);
267 266
268 dev_err(&pdev->dev, "Failed to find video source\n"); 267 dev_err(&pdev->dev, "Failed to find video source\n");
269 return -EPROBE_DEFER; 268 return -EPROBE_DEFER;
@@ -352,8 +351,7 @@ static int dvic_probe(struct platform_device *pdev)
352err_reg: 351err_reg:
353 omap_dss_put_device(ddata->in); 352 omap_dss_put_device(ddata->in);
354 353
355 if (ddata->i2c_adapter) 354 i2c_put_adapter(ddata->i2c_adapter);
356 i2c_put_adapter(ddata->i2c_adapter);
357 355
358 return r; 356 return r;
359} 357}
@@ -371,8 +369,7 @@ static int __exit dvic_remove(struct platform_device *pdev)
371 369
372 omap_dss_put_device(in); 370 omap_dss_put_device(in);
373 371
374 if (ddata->i2c_adapter) 372 i2c_put_adapter(ddata->i2c_adapter);
375 i2c_put_adapter(ddata->i2c_adapter);
376 373
377 return 0; 374 return 0;
378} 375}
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 7b25967a91eb..219f14f59672 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
@@ -170,98 +170,6 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
170 return in->ops.hdmi->detect(in); 170 return in->ops.hdmi->detect(in);
171} 171}
172 172
173static int hdmic_audio_enable(struct omap_dss_device *dssdev)
174{
175 struct panel_drv_data *ddata = to_panel_data(dssdev);
176 struct omap_dss_device *in = ddata->in;
177 int r;
178
179 /* enable audio only if the display is active */
180 if (!omapdss_device_is_enabled(dssdev))
181 return -EPERM;
182
183 r = in->ops.hdmi->audio_enable(in);
184 if (r)
185 return r;
186
187 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
188
189 return 0;
190}
191
192static void hdmic_audio_disable(struct omap_dss_device *dssdev)
193{
194 struct panel_drv_data *ddata = to_panel_data(dssdev);
195 struct omap_dss_device *in = ddata->in;
196
197 in->ops.hdmi->audio_disable(in);
198
199 dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
200}
201
202static int hdmic_audio_start(struct omap_dss_device *dssdev)
203{
204 struct panel_drv_data *ddata = to_panel_data(dssdev);
205 struct omap_dss_device *in = ddata->in;
206 int r;
207
208 /*
209 * No need to check the panel state. It was checked when trasitioning
210 * to AUDIO_ENABLED.
211 */
212 if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED)
213 return -EPERM;
214
215 r = in->ops.hdmi->audio_start(in);
216 if (r)
217 return r;
218
219 dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
220
221 return 0;
222}
223
224static void hdmic_audio_stop(struct omap_dss_device *dssdev)
225{
226 struct panel_drv_data *ddata = to_panel_data(dssdev);
227 struct omap_dss_device *in = ddata->in;
228
229 in->ops.hdmi->audio_stop(in);
230
231 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
232}
233
234static bool hdmic_audio_supported(struct omap_dss_device *dssdev)
235{
236 struct panel_drv_data *ddata = to_panel_data(dssdev);
237 struct omap_dss_device *in = ddata->in;
238
239 if (!omapdss_device_is_enabled(dssdev))
240 return false;
241
242 return in->ops.hdmi->audio_supported(in);
243}
244
245static int hdmic_audio_config(struct omap_dss_device *dssdev,
246 struct omap_dss_audio *audio)
247{
248 struct panel_drv_data *ddata = to_panel_data(dssdev);
249 struct omap_dss_device *in = ddata->in;
250 int r;
251
252 /* config audio only if the display is active */
253 if (!omapdss_device_is_enabled(dssdev))
254 return -EPERM;
255
256 r = in->ops.hdmi->audio_config(in, audio);
257 if (r)
258 return r;
259
260 dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
261
262 return 0;
263}
264
265static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) 173static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
266{ 174{
267 struct panel_drv_data *ddata = to_panel_data(dssdev); 175 struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -296,13 +204,6 @@ static struct omap_dss_driver hdmic_driver = {
296 .detect = hdmic_detect, 204 .detect = hdmic_detect,
297 .set_hdmi_mode = hdmic_set_hdmi_mode, 205 .set_hdmi_mode = hdmic_set_hdmi_mode,
298 .set_hdmi_infoframe = hdmic_set_infoframe, 206 .set_hdmi_infoframe = hdmic_set_infoframe,
299
300 .audio_enable = hdmic_audio_enable,
301 .audio_disable = hdmic_audio_disable,
302 .audio_start = hdmic_audio_start,
303 .audio_stop = hdmic_audio_stop,
304 .audio_supported = hdmic_audio_supported,
305 .audio_config = hdmic_audio_config,
306}; 207};
307 208
308static int hdmic_probe_pdata(struct platform_device *pdev) 209static int hdmic_probe_pdata(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index 47ee7cdee1c5..e349064ed615 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
@@ -249,6 +249,7 @@ static int tfp410_probe(struct platform_device *pdev)
249 dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; 249 dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
250 dssdev->owner = THIS_MODULE; 250 dssdev->owner = THIS_MODULE;
251 dssdev->phy.dpi.data_lines = ddata->data_lines; 251 dssdev->phy.dpi.data_lines = ddata->data_lines;
252 dssdev->port_num = 1;
252 253
253 r = omapdss_register_output(dssdev); 254 r = omapdss_register_output(dssdev);
254 if (r) { 255 if (r) {
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index c4abd56dd846..c7a3ce2c5120 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -193,55 +193,6 @@ static bool tpd_detect(struct omap_dss_device *dssdev)
193 return gpio_get_value_cansleep(ddata->hpd_gpio); 193 return gpio_get_value_cansleep(ddata->hpd_gpio);
194} 194}
195 195
196static int tpd_audio_enable(struct omap_dss_device *dssdev)
197{
198 struct panel_drv_data *ddata = to_panel_data(dssdev);
199 struct omap_dss_device *in = ddata->in;
200
201 return in->ops.hdmi->audio_enable(in);
202}
203
204static void tpd_audio_disable(struct omap_dss_device *dssdev)
205{
206 struct panel_drv_data *ddata = to_panel_data(dssdev);
207 struct omap_dss_device *in = ddata->in;
208
209 in->ops.hdmi->audio_disable(in);
210}
211
212static int tpd_audio_start(struct omap_dss_device *dssdev)
213{
214 struct panel_drv_data *ddata = to_panel_data(dssdev);
215 struct omap_dss_device *in = ddata->in;
216
217 return in->ops.hdmi->audio_start(in);
218}
219
220static void tpd_audio_stop(struct omap_dss_device *dssdev)
221{
222 struct panel_drv_data *ddata = to_panel_data(dssdev);
223 struct omap_dss_device *in = ddata->in;
224
225 in->ops.hdmi->audio_stop(in);
226}
227
228static bool tpd_audio_supported(struct omap_dss_device *dssdev)
229{
230 struct panel_drv_data *ddata = to_panel_data(dssdev);
231 struct omap_dss_device *in = ddata->in;
232
233 return in->ops.hdmi->audio_supported(in);
234}
235
236static int tpd_audio_config(struct omap_dss_device *dssdev,
237 struct omap_dss_audio *audio)
238{
239 struct panel_drv_data *ddata = to_panel_data(dssdev);
240 struct omap_dss_device *in = ddata->in;
241
242 return in->ops.hdmi->audio_config(in, audio);
243}
244
245static int tpd_set_infoframe(struct omap_dss_device *dssdev, 196static int tpd_set_infoframe(struct omap_dss_device *dssdev,
246 const struct hdmi_avi_infoframe *avi) 197 const struct hdmi_avi_infoframe *avi)
247{ 198{
@@ -275,13 +226,6 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
275 .detect = tpd_detect, 226 .detect = tpd_detect,
276 .set_infoframe = tpd_set_infoframe, 227 .set_infoframe = tpd_set_infoframe,
277 .set_hdmi_mode = tpd_set_hdmi_mode, 228 .set_hdmi_mode = tpd_set_hdmi_mode,
278
279 .audio_enable = tpd_audio_enable,
280 .audio_disable = tpd_audio_disable,
281 .audio_start = tpd_audio_start,
282 .audio_stop = tpd_audio_stop,
283 .audio_supported = tpd_audio_supported,
284 .audio_config = tpd_audio_config,
285}; 229};
286 230
287static int tpd_probe_pdata(struct platform_device *pdev) 231static int tpd_probe_pdata(struct platform_device *pdev)
@@ -409,6 +353,7 @@ static int tpd_probe(struct platform_device *pdev)
409 dssdev->type = OMAP_DISPLAY_TYPE_HDMI; 353 dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
410 dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; 354 dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
411 dssdev->owner = THIS_MODULE; 355 dssdev->owner = THIS_MODULE;
356 dssdev->port_num = 1;
412 357
413 in = ddata->in; 358 in = ddata->in;
414 359
diff --git a/drivers/video/fbdev/omap2/dss/Kconfig b/drivers/video/fbdev/omap2/dss/Kconfig
index 3d5eb6c36c22..d1fa730c7d54 100644
--- a/drivers/video/fbdev/omap2/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/dss/Kconfig
@@ -74,9 +74,6 @@ config OMAP4_DSS_HDMI
74 help 74 help
75 HDMI support for OMAP4 based SoCs. 75 HDMI support for OMAP4 based SoCs.
76 76
77config OMAP4_DSS_HDMI_AUDIO
78 bool
79
80config OMAP5_DSS_HDMI 77config OMAP5_DSS_HDMI
81 bool "HDMI support for OMAP5" 78 bool "HDMI support for OMAP5"
82 default n 79 default n
@@ -86,10 +83,6 @@ config OMAP5_DSS_HDMI
86 Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI 83 Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
87 specification. 84 specification.
88 85
89config OMAP5_DSS_HDMI_AUDIO
90 depends on OMAP5_DSS_HDMI
91 bool
92
93config OMAP2_DSS_SDI 86config OMAP2_DSS_SDI
94 bool "SDI support" 87 bool "SDI support"
95 default n 88 default n
diff --git a/drivers/video/fbdev/omap2/dss/Makefile b/drivers/video/fbdev/omap2/dss/Makefile
index 245f933060ee..2ea9d382354c 100644
--- a/drivers/video/fbdev/omap2/dss/Makefile
+++ b/drivers/video/fbdev/omap2/dss/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
2obj-$(CONFIG_OMAP2_DSS) += omapdss.o 2obj-$(CONFIG_OMAP2_DSS) += omapdss.o
3# Core DSS files 3# Core DSS files
4omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \ 4omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
5 output.o dss-of.o 5 output.o dss-of.o pll.o
6# DSS compat layer files 6# DSS compat layer files
7omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \ 7omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
8 dispc-compat.o display-sysfs.o 8 dispc-compat.o display-sysfs.o
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 0e9a74bb9fc2..0729c08ac75a 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -3028,7 +3028,7 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
3028 3028
3029unsigned long dispc_fclk_rate(void) 3029unsigned long dispc_fclk_rate(void)
3030{ 3030{
3031 struct platform_device *dsidev; 3031 struct dss_pll *pll;
3032 unsigned long r = 0; 3032 unsigned long r = 0;
3033 3033
3034 switch (dss_get_dispc_clk_source()) { 3034 switch (dss_get_dispc_clk_source()) {
@@ -3036,12 +3036,12 @@ unsigned long dispc_fclk_rate(void)
3036 r = dss_get_dispc_clk_rate(); 3036 r = dss_get_dispc_clk_rate();
3037 break; 3037 break;
3038 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 3038 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
3039 dsidev = dsi_get_dsidev_from_id(0); 3039 pll = dss_pll_find("dsi0");
3040 r = dsi_get_pll_hsdiv_dispc_rate(dsidev); 3040 r = pll->cinfo.clkout[0];
3041 break; 3041 break;
3042 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 3042 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
3043 dsidev = dsi_get_dsidev_from_id(1); 3043 pll = dss_pll_find("dsi1");
3044 r = dsi_get_pll_hsdiv_dispc_rate(dsidev); 3044 r = pll->cinfo.clkout[0];
3045 break; 3045 break;
3046 default: 3046 default:
3047 BUG(); 3047 BUG();
@@ -3053,7 +3053,7 @@ unsigned long dispc_fclk_rate(void)
3053 3053
3054unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) 3054unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
3055{ 3055{
3056 struct platform_device *dsidev; 3056 struct dss_pll *pll;
3057 int lcd; 3057 int lcd;
3058 unsigned long r; 3058 unsigned long r;
3059 u32 l; 3059 u32 l;
@@ -3068,12 +3068,12 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
3068 r = dss_get_dispc_clk_rate(); 3068 r = dss_get_dispc_clk_rate();
3069 break; 3069 break;
3070 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 3070 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
3071 dsidev = dsi_get_dsidev_from_id(0); 3071 pll = dss_pll_find("dsi0");
3072 r = dsi_get_pll_hsdiv_dispc_rate(dsidev); 3072 r = pll->cinfo.clkout[0];
3073 break; 3073 break;
3074 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 3074 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
3075 dsidev = dsi_get_dsidev_from_id(1); 3075 pll = dss_pll_find("dsi1");
3076 r = dsi_get_pll_hsdiv_dispc_rate(dsidev); 3076 r = pll->cinfo.clkout[0];
3077 break; 3077 break;
3078 default: 3078 default:
3079 BUG(); 3079 BUG();
diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 4a3363dae74a..2edf5caa002f 100644
--- a/drivers/video/fbdev/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
@@ -31,17 +31,20 @@
31#include <linux/regulator/consumer.h> 31#include <linux/regulator/consumer.h>
32#include <linux/string.h> 32#include <linux/string.h>
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/clk.h>
34 35
35#include <video/omapdss.h> 36#include <video/omapdss.h>
36 37
37#include "dss.h" 38#include "dss.h"
38#include "dss_features.h" 39#include "dss_features.h"
39 40
40static struct { 41#define HSDIV_DISPC 0
42
43struct dpi_data {
41 struct platform_device *pdev; 44 struct platform_device *pdev;
42 45
43 struct regulator *vdds_dsi_reg; 46 struct regulator *vdds_dsi_reg;
44 struct platform_device *dsidev; 47 struct dss_pll *pll;
45 48
46 struct mutex lock; 49 struct mutex lock;
47 50
@@ -52,9 +55,20 @@ static struct {
52 struct omap_dss_device output; 55 struct omap_dss_device output;
53 56
54 bool port_initialized; 57 bool port_initialized;
55} dpi; 58};
59
60static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
61{
62 return container_of(dssdev, struct dpi_data, output);
63}
64
65/* only used in non-DT mode */
66static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
67{
68 return dev_get_drvdata(&pdev->dev);
69}
56 70
57static struct platform_device *dpi_get_dsidev(enum omap_channel channel) 71static struct dss_pll *dpi_get_pll(enum omap_channel channel)
58{ 72{
59 /* 73 /*
60 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL 74 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -75,9 +89,9 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
75 case OMAPDSS_VER_OMAP4: 89 case OMAPDSS_VER_OMAP4:
76 switch (channel) { 90 switch (channel) {
77 case OMAP_DSS_CHANNEL_LCD: 91 case OMAP_DSS_CHANNEL_LCD:
78 return dsi_get_dsidev_from_id(0); 92 return dss_pll_find("dsi0");
79 case OMAP_DSS_CHANNEL_LCD2: 93 case OMAP_DSS_CHANNEL_LCD2:
80 return dsi_get_dsidev_from_id(1); 94 return dss_pll_find("dsi1");
81 default: 95 default:
82 return NULL; 96 return NULL;
83 } 97 }
@@ -85,9 +99,9 @@ static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
85 case OMAPDSS_VER_OMAP5: 99 case OMAPDSS_VER_OMAP5:
86 switch (channel) { 100 switch (channel) {
87 case OMAP_DSS_CHANNEL_LCD: 101 case OMAP_DSS_CHANNEL_LCD:
88 return dsi_get_dsidev_from_id(0); 102 return dss_pll_find("dsi0");
89 case OMAP_DSS_CHANNEL_LCD3: 103 case OMAP_DSS_CHANNEL_LCD3:
90 return dsi_get_dsidev_from_id(1); 104 return dss_pll_find("dsi1");
91 default: 105 default:
92 return NULL; 106 return NULL;
93 } 107 }
@@ -114,7 +128,7 @@ static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
114} 128}
115 129
116struct dpi_clk_calc_ctx { 130struct dpi_clk_calc_ctx {
117 struct platform_device *dsidev; 131 struct dss_pll *pll;
118 132
119 /* inputs */ 133 /* inputs */
120 134
@@ -122,7 +136,7 @@ struct dpi_clk_calc_ctx {
122 136
123 /* outputs */ 137 /* outputs */
124 138
125 struct dsi_clock_info dsi_cinfo; 139 struct dss_pll_clock_info dsi_cinfo;
126 unsigned long fck; 140 unsigned long fck;
127 struct dispc_clock_info dispc_cinfo; 141 struct dispc_clock_info dispc_cinfo;
128}; 142};
@@ -154,7 +168,7 @@ static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
154} 168}
155 169
156 170
157static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc, 171static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
158 void *data) 172 void *data)
159{ 173{
160 struct dpi_clk_calc_ctx *ctx = data; 174 struct dpi_clk_calc_ctx *ctx = data;
@@ -164,30 +178,31 @@ static bool dpi_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
164 * shifted. So skip all odd dividers when the pixel clock is on the 178 * shifted. So skip all odd dividers when the pixel clock is on the
165 * higher side. 179 * higher side.
166 */ 180 */
167 if (regm_dispc > 1 && regm_dispc % 2 != 0 && ctx->pck_min >= 100000000) 181 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
168 return false; 182 return false;
169 183
170 ctx->dsi_cinfo.regm_dispc = regm_dispc; 184 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
171 ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc; 185 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
172 186
173 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, 187 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
174 dpi_calc_dispc_cb, ctx); 188 dpi_calc_dispc_cb, ctx);
175} 189}
176 190
177 191
178static bool dpi_calc_pll_cb(int regn, int regm, unsigned long fint, 192static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
179 unsigned long pll, 193 unsigned long clkdco,
180 void *data) 194 void *data)
181{ 195{
182 struct dpi_clk_calc_ctx *ctx = data; 196 struct dpi_clk_calc_ctx *ctx = data;
183 197
184 ctx->dsi_cinfo.regn = regn; 198 ctx->dsi_cinfo.n = n;
185 ctx->dsi_cinfo.regm = regm; 199 ctx->dsi_cinfo.m = m;
186 ctx->dsi_cinfo.fint = fint; 200 ctx->dsi_cinfo.fint = fint;
187 ctx->dsi_cinfo.clkin4ddr = pll; 201 ctx->dsi_cinfo.clkdco = clkdco;
188 202
189 return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->pck_min, 203 return dss_pll_hsdiv_calc(ctx->pll, clkdco,
190 dpi_calc_hsdiv_cb, ctx); 204 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
205 dpi_calc_hsdiv_cb, ctx);
191} 206}
192 207
193static bool dpi_calc_dss_cb(unsigned long fck, void *data) 208static bool dpi_calc_dss_cb(unsigned long fck, void *data)
@@ -200,23 +215,23 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
200 dpi_calc_dispc_cb, ctx); 215 dpi_calc_dispc_cb, ctx);
201} 216}
202 217
203static bool dpi_dsi_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) 218static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck,
219 struct dpi_clk_calc_ctx *ctx)
204{ 220{
205 unsigned long clkin; 221 unsigned long clkin;
206 unsigned long pll_min, pll_max; 222 unsigned long pll_min, pll_max;
207 223
208 clkin = dsi_get_pll_clkin(dpi.dsidev);
209
210 memset(ctx, 0, sizeof(*ctx)); 224 memset(ctx, 0, sizeof(*ctx));
211 ctx->dsidev = dpi.dsidev; 225 ctx->pll = dpi->pll;
212 ctx->pck_min = pck - 1000; 226 ctx->pck_min = pck - 1000;
213 ctx->pck_max = pck + 1000; 227 ctx->pck_max = pck + 1000;
214 ctx->dsi_cinfo.clkin = clkin;
215 228
216 pll_min = 0; 229 pll_min = 0;
217 pll_max = 0; 230 pll_max = 0;
218 231
219 return dsi_pll_calc(dpi.dsidev, clkin, 232 clkin = clk_get_rate(ctx->pll->clkin);
233
234 return dss_pll_calc(ctx->pll, clkin,
220 pll_min, pll_max, 235 pll_min, pll_max,
221 dpi_calc_pll_cb, ctx); 236 dpi_calc_pll_cb, ctx);
222} 237}
@@ -252,7 +267,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
252 267
253 268
254 269
255static int dpi_set_dsi_clk(enum omap_channel channel, 270static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
256 unsigned long pck_req, unsigned long *fck, int *lck_div, 271 unsigned long pck_req, unsigned long *fck, int *lck_div,
257 int *pck_div) 272 int *pck_div)
258{ 273{
@@ -260,28 +275,28 @@ static int dpi_set_dsi_clk(enum omap_channel channel,
260 int r; 275 int r;
261 bool ok; 276 bool ok;
262 277
263 ok = dpi_dsi_clk_calc(pck_req, &ctx); 278 ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx);
264 if (!ok) 279 if (!ok)
265 return -EINVAL; 280 return -EINVAL;
266 281
267 r = dsi_pll_set_clock_div(dpi.dsidev, &ctx.dsi_cinfo); 282 r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo);
268 if (r) 283 if (r)
269 return r; 284 return r;
270 285
271 dss_select_lcd_clk_source(channel, 286 dss_select_lcd_clk_source(channel,
272 dpi_get_alt_clk_src(channel)); 287 dpi_get_alt_clk_src(channel));
273 288
274 dpi.mgr_config.clock_info = ctx.dispc_cinfo; 289 dpi->mgr_config.clock_info = ctx.dispc_cinfo;
275 290
276 *fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk; 291 *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
277 *lck_div = ctx.dispc_cinfo.lck_div; 292 *lck_div = ctx.dispc_cinfo.lck_div;
278 *pck_div = ctx.dispc_cinfo.pck_div; 293 *pck_div = ctx.dispc_cinfo.pck_div;
279 294
280 return 0; 295 return 0;
281} 296}
282 297
283static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck, 298static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
284 int *lck_div, int *pck_div) 299 unsigned long *fck, int *lck_div, int *pck_div)
285{ 300{
286 struct dpi_clk_calc_ctx ctx; 301 struct dpi_clk_calc_ctx ctx;
287 int r; 302 int r;
@@ -295,7 +310,7 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
295 if (r) 310 if (r)
296 return r; 311 return r;
297 312
298 dpi.mgr_config.clock_info = ctx.dispc_cinfo; 313 dpi->mgr_config.clock_info = ctx.dispc_cinfo;
299 314
300 *fck = ctx.fck; 315 *fck = ctx.fck;
301 *lck_div = ctx.dispc_cinfo.lck_div; 316 *lck_div = ctx.dispc_cinfo.lck_div;
@@ -304,19 +319,21 @@ static int dpi_set_dispc_clk(unsigned long pck_req, unsigned long *fck,
304 return 0; 319 return 0;
305} 320}
306 321
307static int dpi_set_mode(struct omap_overlay_manager *mgr) 322static int dpi_set_mode(struct dpi_data *dpi)
308{ 323{
309 struct omap_video_timings *t = &dpi.timings; 324 struct omap_dss_device *out = &dpi->output;
325 struct omap_overlay_manager *mgr = out->manager;
326 struct omap_video_timings *t = &dpi->timings;
310 int lck_div = 0, pck_div = 0; 327 int lck_div = 0, pck_div = 0;
311 unsigned long fck = 0; 328 unsigned long fck = 0;
312 unsigned long pck; 329 unsigned long pck;
313 int r = 0; 330 int r = 0;
314 331
315 if (dpi.dsidev) 332 if (dpi->pll)
316 r = dpi_set_dsi_clk(mgr->id, t->pixelclock, &fck, 333 r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck,
317 &lck_div, &pck_div); 334 &lck_div, &pck_div);
318 else 335 else
319 r = dpi_set_dispc_clk(t->pixelclock, &fck, 336 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
320 &lck_div, &pck_div); 337 &lck_div, &pck_div);
321 if (r) 338 if (r)
322 return r; 339 return r;
@@ -335,28 +352,32 @@ static int dpi_set_mode(struct omap_overlay_manager *mgr)
335 return 0; 352 return 0;
336} 353}
337 354
338static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr) 355static void dpi_config_lcd_manager(struct dpi_data *dpi)
339{ 356{
340 dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; 357 struct omap_dss_device *out = &dpi->output;
358 struct omap_overlay_manager *mgr = out->manager;
359
360 dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
341 361
342 dpi.mgr_config.stallmode = false; 362 dpi->mgr_config.stallmode = false;
343 dpi.mgr_config.fifohandcheck = false; 363 dpi->mgr_config.fifohandcheck = false;
344 364
345 dpi.mgr_config.video_port_width = dpi.data_lines; 365 dpi->mgr_config.video_port_width = dpi->data_lines;
346 366
347 dpi.mgr_config.lcden_sig_polarity = 0; 367 dpi->mgr_config.lcden_sig_polarity = 0;
348 368
349 dss_mgr_set_lcd_config(mgr, &dpi.mgr_config); 369 dss_mgr_set_lcd_config(mgr, &dpi->mgr_config);
350} 370}
351 371
352static int dpi_display_enable(struct omap_dss_device *dssdev) 372static int dpi_display_enable(struct omap_dss_device *dssdev)
353{ 373{
354 struct omap_dss_device *out = &dpi.output; 374 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
375 struct omap_dss_device *out = &dpi->output;
355 int r; 376 int r;
356 377
357 mutex_lock(&dpi.lock); 378 mutex_lock(&dpi->lock);
358 379
359 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi.vdds_dsi_reg) { 380 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi->vdds_dsi_reg) {
360 DSSERR("no VDSS_DSI regulator\n"); 381 DSSERR("no VDSS_DSI regulator\n");
361 r = -ENODEV; 382 r = -ENODEV;
362 goto err_no_reg; 383 goto err_no_reg;
@@ -369,7 +390,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
369 } 390 }
370 391
371 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) { 392 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
372 r = regulator_enable(dpi.vdds_dsi_reg); 393 r = regulator_enable(dpi->vdds_dsi_reg);
373 if (r) 394 if (r)
374 goto err_reg_enable; 395 goto err_reg_enable;
375 } 396 }
@@ -378,25 +399,21 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
378 if (r) 399 if (r)
379 goto err_get_dispc; 400 goto err_get_dispc;
380 401
381 r = dss_dpi_select_source(out->manager->id); 402 r = dss_dpi_select_source(out->port_num, out->manager->id);
382 if (r) 403 if (r)
383 goto err_src_sel; 404 goto err_src_sel;
384 405
385 if (dpi.dsidev) { 406 if (dpi->pll) {
386 r = dsi_runtime_get(dpi.dsidev); 407 r = dss_pll_enable(dpi->pll);
387 if (r)
388 goto err_get_dsi;
389
390 r = dsi_pll_init(dpi.dsidev, 0, 1);
391 if (r) 408 if (r)
392 goto err_dsi_pll_init; 409 goto err_dsi_pll_init;
393 } 410 }
394 411
395 r = dpi_set_mode(out->manager); 412 r = dpi_set_mode(dpi);
396 if (r) 413 if (r)
397 goto err_set_mode; 414 goto err_set_mode;
398 415
399 dpi_config_lcd_manager(out->manager); 416 dpi_config_lcd_manager(dpi);
400 417
401 mdelay(2); 418 mdelay(2);
402 419
@@ -404,78 +421,80 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
404 if (r) 421 if (r)
405 goto err_mgr_enable; 422 goto err_mgr_enable;
406 423
407 mutex_unlock(&dpi.lock); 424 mutex_unlock(&dpi->lock);
408 425
409 return 0; 426 return 0;
410 427
411err_mgr_enable: 428err_mgr_enable:
412err_set_mode: 429err_set_mode:
413 if (dpi.dsidev) 430 if (dpi->pll)
414 dsi_pll_uninit(dpi.dsidev, true); 431 dss_pll_disable(dpi->pll);
415err_dsi_pll_init: 432err_dsi_pll_init:
416 if (dpi.dsidev)
417 dsi_runtime_put(dpi.dsidev);
418err_get_dsi:
419err_src_sel: 433err_src_sel:
420 dispc_runtime_put(); 434 dispc_runtime_put();
421err_get_dispc: 435err_get_dispc:
422 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) 436 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
423 regulator_disable(dpi.vdds_dsi_reg); 437 regulator_disable(dpi->vdds_dsi_reg);
424err_reg_enable: 438err_reg_enable:
425err_no_out_mgr: 439err_no_out_mgr:
426err_no_reg: 440err_no_reg:
427 mutex_unlock(&dpi.lock); 441 mutex_unlock(&dpi->lock);
428 return r; 442 return r;
429} 443}
430 444
431static void dpi_display_disable(struct omap_dss_device *dssdev) 445static void dpi_display_disable(struct omap_dss_device *dssdev)
432{ 446{
433 struct omap_overlay_manager *mgr = dpi.output.manager; 447 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
448 struct omap_overlay_manager *mgr = dpi->output.manager;
434 449
435 mutex_lock(&dpi.lock); 450 mutex_lock(&dpi->lock);
436 451
437 dss_mgr_disable(mgr); 452 dss_mgr_disable(mgr);
438 453
439 if (dpi.dsidev) { 454 if (dpi->pll) {
440 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); 455 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
441 dsi_pll_uninit(dpi.dsidev, true); 456 dss_pll_disable(dpi->pll);
442 dsi_runtime_put(dpi.dsidev);
443 } 457 }
444 458
445 dispc_runtime_put(); 459 dispc_runtime_put();
446 460
447 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) 461 if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
448 regulator_disable(dpi.vdds_dsi_reg); 462 regulator_disable(dpi->vdds_dsi_reg);
449 463
450 mutex_unlock(&dpi.lock); 464 mutex_unlock(&dpi->lock);
451} 465}
452 466
453static void dpi_set_timings(struct omap_dss_device *dssdev, 467static void dpi_set_timings(struct omap_dss_device *dssdev,
454 struct omap_video_timings *timings) 468 struct omap_video_timings *timings)
455{ 469{
470 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
471
456 DSSDBG("dpi_set_timings\n"); 472 DSSDBG("dpi_set_timings\n");
457 473
458 mutex_lock(&dpi.lock); 474 mutex_lock(&dpi->lock);
459 475
460 dpi.timings = *timings; 476 dpi->timings = *timings;
461 477
462 mutex_unlock(&dpi.lock); 478 mutex_unlock(&dpi->lock);
463} 479}
464 480
465static void dpi_get_timings(struct omap_dss_device *dssdev, 481static void dpi_get_timings(struct omap_dss_device *dssdev,
466 struct omap_video_timings *timings) 482 struct omap_video_timings *timings)
467{ 483{
468 mutex_lock(&dpi.lock); 484 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
485
486 mutex_lock(&dpi->lock);
469 487
470 *timings = dpi.timings; 488 *timings = dpi->timings;
471 489
472 mutex_unlock(&dpi.lock); 490 mutex_unlock(&dpi->lock);
473} 491}
474 492
475static int dpi_check_timings(struct omap_dss_device *dssdev, 493static int dpi_check_timings(struct omap_dss_device *dssdev,
476 struct omap_video_timings *timings) 494 struct omap_video_timings *timings)
477{ 495{
478 struct omap_overlay_manager *mgr = dpi.output.manager; 496 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
497 struct omap_overlay_manager *mgr = dpi->output.manager;
479 int lck_div, pck_div; 498 int lck_div, pck_div;
480 unsigned long fck; 499 unsigned long fck;
481 unsigned long pck; 500 unsigned long pck;
@@ -488,12 +507,12 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
488 if (timings->pixelclock == 0) 507 if (timings->pixelclock == 0)
489 return -EINVAL; 508 return -EINVAL;
490 509
491 if (dpi.dsidev) { 510 if (dpi->pll) {
492 ok = dpi_dsi_clk_calc(timings->pixelclock, &ctx); 511 ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx);
493 if (!ok) 512 if (!ok)
494 return -EINVAL; 513 return -EINVAL;
495 514
496 fck = ctx.dsi_cinfo.dsi_pll_hsdiv_dispc_clk; 515 fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
497 } else { 516 } else {
498 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); 517 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
499 if (!ok) 518 if (!ok)
@@ -514,74 +533,69 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
514 533
515static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) 534static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
516{ 535{
517 mutex_lock(&dpi.lock); 536 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
518 537
519 dpi.data_lines = data_lines; 538 mutex_lock(&dpi->lock);
520 539
521 mutex_unlock(&dpi.lock); 540 dpi->data_lines = data_lines;
541
542 mutex_unlock(&dpi->lock);
522} 543}
523 544
524static int dpi_verify_dsi_pll(struct platform_device *dsidev) 545static int dpi_verify_dsi_pll(struct dss_pll *pll)
525{ 546{
526 int r; 547 int r;
527 548
528 /* do initial setup with the PLL to see if it is operational */ 549 /* do initial setup with the PLL to see if it is operational */
529 550
530 r = dsi_runtime_get(dsidev); 551 r = dss_pll_enable(pll);
531 if (r) 552 if (r)
532 return r; 553 return r;
533 554
534 r = dsi_pll_init(dsidev, 0, 1); 555 dss_pll_disable(pll);
535 if (r) {
536 dsi_runtime_put(dsidev);
537 return r;
538 }
539
540 dsi_pll_uninit(dsidev, true);
541 dsi_runtime_put(dsidev);
542 556
543 return 0; 557 return 0;
544} 558}
545 559
546static int dpi_init_regulator(void) 560static int dpi_init_regulator(struct dpi_data *dpi)
547{ 561{
548 struct regulator *vdds_dsi; 562 struct regulator *vdds_dsi;
549 563
550 if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) 564 if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
551 return 0; 565 return 0;
552 566
553 if (dpi.vdds_dsi_reg) 567 if (dpi->vdds_dsi_reg)
554 return 0; 568 return 0;
555 569
556 vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi"); 570 vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
557 if (IS_ERR(vdds_dsi)) { 571 if (IS_ERR(vdds_dsi)) {
558 if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) 572 if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
559 DSSERR("can't get VDDS_DSI regulator\n"); 573 DSSERR("can't get VDDS_DSI regulator\n");
560 return PTR_ERR(vdds_dsi); 574 return PTR_ERR(vdds_dsi);
561 } 575 }
562 576
563 dpi.vdds_dsi_reg = vdds_dsi; 577 dpi->vdds_dsi_reg = vdds_dsi;
564 578
565 return 0; 579 return 0;
566} 580}
567 581
568static void dpi_init_pll(void) 582static void dpi_init_pll(struct dpi_data *dpi)
569{ 583{
570 struct platform_device *dsidev; 584 struct dss_pll *pll;
571 585
572 if (dpi.dsidev) 586 if (dpi->pll)
573 return; 587 return;
574 588
575 dsidev = dpi_get_dsidev(dpi.output.dispc_channel); 589 pll = dpi_get_pll(dpi->output.dispc_channel);
576 if (!dsidev) 590 if (!pll)
577 return; 591 return;
578 592
579 if (dpi_verify_dsi_pll(dsidev)) { 593 if (dpi_verify_dsi_pll(pll)) {
580 DSSWARN("DSI PLL not operational\n"); 594 DSSWARN("DSI PLL not operational\n");
581 return; 595 return;
582 } 596 }
583 597
584 dpi.dsidev = dsidev; 598 dpi->pll = pll;
585} 599}
586 600
587/* 601/*
@@ -590,7 +604,7 @@ static void dpi_init_pll(void)
590 * the channel in some more dynamic manner, or get the channel as a user 604 * the channel in some more dynamic manner, or get the channel as a user
591 * parameter. 605 * parameter.
592 */ 606 */
593static enum omap_channel dpi_get_channel(void) 607static enum omap_channel dpi_get_channel(int port_num)
594{ 608{
595 switch (omapdss_get_version()) { 609 switch (omapdss_get_version()) {
596 case OMAPDSS_VER_OMAP24xx: 610 case OMAPDSS_VER_OMAP24xx:
@@ -618,14 +632,15 @@ static enum omap_channel dpi_get_channel(void)
618static int dpi_connect(struct omap_dss_device *dssdev, 632static int dpi_connect(struct omap_dss_device *dssdev,
619 struct omap_dss_device *dst) 633 struct omap_dss_device *dst)
620{ 634{
635 struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
621 struct omap_overlay_manager *mgr; 636 struct omap_overlay_manager *mgr;
622 int r; 637 int r;
623 638
624 r = dpi_init_regulator(); 639 r = dpi_init_regulator(dpi);
625 if (r) 640 if (r)
626 return r; 641 return r;
627 642
628 dpi_init_pll(); 643 dpi_init_pll(dpi);
629 644
630 mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); 645 mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
631 if (!mgr) 646 if (!mgr)
@@ -676,13 +691,14 @@ static const struct omapdss_dpi_ops dpi_ops = {
676 691
677static void dpi_init_output(struct platform_device *pdev) 692static void dpi_init_output(struct platform_device *pdev)
678{ 693{
679 struct omap_dss_device *out = &dpi.output; 694 struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
695 struct omap_dss_device *out = &dpi->output;
680 696
681 out->dev = &pdev->dev; 697 out->dev = &pdev->dev;
682 out->id = OMAP_DSS_OUTPUT_DPI; 698 out->id = OMAP_DSS_OUTPUT_DPI;
683 out->output_type = OMAP_DISPLAY_TYPE_DPI; 699 out->output_type = OMAP_DISPLAY_TYPE_DPI;
684 out->name = "dpi.0"; 700 out->name = "dpi.0";
685 out->dispc_channel = dpi_get_channel(); 701 out->dispc_channel = dpi_get_channel(0);
686 out->ops.dpi = &dpi_ops; 702 out->ops.dpi = &dpi_ops;
687 out->owner = THIS_MODULE; 703 out->owner = THIS_MODULE;
688 704
@@ -691,16 +707,69 @@ static void dpi_init_output(struct platform_device *pdev)
691 707
692static void __exit dpi_uninit_output(struct platform_device *pdev) 708static void __exit dpi_uninit_output(struct platform_device *pdev)
693{ 709{
694 struct omap_dss_device *out = &dpi.output; 710 struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
711 struct omap_dss_device *out = &dpi->output;
712
713 omapdss_unregister_output(out);
714}
715
716static void dpi_init_output_port(struct platform_device *pdev,
717 struct device_node *port)
718{
719 struct dpi_data *dpi = port->data;
720 struct omap_dss_device *out = &dpi->output;
721 int r;
722 u32 port_num;
723
724 r = of_property_read_u32(port, "reg", &port_num);
725 if (r)
726 port_num = 0;
727
728 switch (port_num) {
729 case 2:
730 out->name = "dpi.2";
731 break;
732 case 1:
733 out->name = "dpi.1";
734 break;
735 case 0:
736 default:
737 out->name = "dpi.0";
738 break;
739 }
740
741 out->dev = &pdev->dev;
742 out->id = OMAP_DSS_OUTPUT_DPI;
743 out->output_type = OMAP_DISPLAY_TYPE_DPI;
744 out->dispc_channel = dpi_get_channel(port_num);
745 out->port_num = port_num;
746 out->ops.dpi = &dpi_ops;
747 out->owner = THIS_MODULE;
748
749 omapdss_register_output(out);
750}
751
752static void __exit dpi_uninit_output_port(struct device_node *port)
753{
754 struct dpi_data *dpi = port->data;
755 struct omap_dss_device *out = &dpi->output;
695 756
696 omapdss_unregister_output(out); 757 omapdss_unregister_output(out);
697} 758}
698 759
699static int omap_dpi_probe(struct platform_device *pdev) 760static int omap_dpi_probe(struct platform_device *pdev)
700{ 761{
701 dpi.pdev = pdev; 762 struct dpi_data *dpi;
763
764 dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
765 if (!dpi)
766 return -ENOMEM;
702 767
703 mutex_init(&dpi.lock); 768 dpi->pdev = pdev;
769
770 dev_set_drvdata(&pdev->dev, dpi);
771
772 mutex_init(&dpi->lock);
704 773
705 dpi_init_output(pdev); 774 dpi_init_output(pdev);
706 775
@@ -736,10 +805,15 @@ void __exit dpi_uninit_platform_driver(void)
736 805
737int __init dpi_init_port(struct platform_device *pdev, struct device_node *port) 806int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
738{ 807{
808 struct dpi_data *dpi;
739 struct device_node *ep; 809 struct device_node *ep;
740 u32 datalines; 810 u32 datalines;
741 int r; 811 int r;
742 812
813 dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
814 if (!dpi)
815 return -ENOMEM;
816
743 ep = omapdss_of_get_next_endpoint(port, NULL); 817 ep = omapdss_of_get_next_endpoint(port, NULL);
744 if (!ep) 818 if (!ep)
745 return 0; 819 return 0;
@@ -750,17 +824,18 @@ int __init dpi_init_port(struct platform_device *pdev, struct device_node *port)
750 goto err_datalines; 824 goto err_datalines;
751 } 825 }
752 826
753 dpi.data_lines = datalines; 827 dpi->data_lines = datalines;
754 828
755 of_node_put(ep); 829 of_node_put(ep);
756 830
757 dpi.pdev = pdev; 831 dpi->pdev = pdev;
832 port->data = dpi;
758 833
759 mutex_init(&dpi.lock); 834 mutex_init(&dpi->lock);
760 835
761 dpi_init_output(pdev); 836 dpi_init_output_port(pdev, port);
762 837
763 dpi.port_initialized = true; 838 dpi->port_initialized = true;
764 839
765 return 0; 840 return 0;
766 841
@@ -770,10 +845,12 @@ err_datalines:
770 return r; 845 return r;
771} 846}
772 847
773void __exit dpi_uninit_port(void) 848void __exit dpi_uninit_port(struct device_node *port)
774{ 849{
775 if (!dpi.port_initialized) 850 struct dpi_data *dpi = port->data;
851
852 if (!dpi->port_initialized)
776 return; 853 return;
777 854
778 dpi_uninit_output(dpi.pdev); 855 dpi_uninit_output_port(port);
779} 856}
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 0793bc67a275..73af35159468 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -219,6 +219,10 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
219 219
220static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel); 220static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
221 221
222/* DSI PLL HSDIV indices */
223#define HSDIV_DISPC 0
224#define HSDIV_DSI 1
225
222#define DSI_MAX_NR_ISRS 2 226#define DSI_MAX_NR_ISRS 2
223#define DSI_MAX_NR_LANES 5 227#define DSI_MAX_NR_LANES 5
224 228
@@ -271,6 +275,7 @@ struct dsi_isr_tables {
271 275
272struct dsi_clk_calc_ctx { 276struct dsi_clk_calc_ctx {
273 struct platform_device *dsidev; 277 struct platform_device *dsidev;
278 struct dss_pll *pll;
274 279
275 /* inputs */ 280 /* inputs */
276 281
@@ -280,13 +285,18 @@ struct dsi_clk_calc_ctx {
280 285
281 /* outputs */ 286 /* outputs */
282 287
283 struct dsi_clock_info dsi_cinfo; 288 struct dss_pll_clock_info dsi_cinfo;
284 struct dispc_clock_info dispc_cinfo; 289 struct dispc_clock_info dispc_cinfo;
285 290
286 struct omap_video_timings dispc_vm; 291 struct omap_video_timings dispc_vm;
287 struct omap_dss_dsi_videomode_timings dsi_vm; 292 struct omap_dss_dsi_videomode_timings dsi_vm;
288}; 293};
289 294
295struct dsi_lp_clock_info {
296 unsigned long lp_clk;
297 u16 lp_clk_div;
298};
299
290struct dsi_data { 300struct dsi_data {
291 struct platform_device *pdev; 301 struct platform_device *pdev;
292 void __iomem *proto_base; 302 void __iomem *proto_base;
@@ -300,12 +310,14 @@ struct dsi_data {
300 bool is_enabled; 310 bool is_enabled;
301 311
302 struct clk *dss_clk; 312 struct clk *dss_clk;
303 struct clk *sys_clk;
304 313
305 struct dispc_clock_info user_dispc_cinfo; 314 struct dispc_clock_info user_dispc_cinfo;
306 struct dsi_clock_info user_dsi_cinfo; 315 struct dss_pll_clock_info user_dsi_cinfo;
316
317 struct dsi_lp_clock_info user_lp_cinfo;
318 struct dsi_lp_clock_info current_lp_cinfo;
307 319
308 struct dsi_clock_info current_cinfo; 320 struct dss_pll pll;
309 321
310 bool vdds_dsi_enabled; 322 bool vdds_dsi_enabled;
311 struct regulator *vdds_dsi_reg; 323 struct regulator *vdds_dsi_reg;
@@ -321,8 +333,6 @@ struct dsi_data {
321 struct mutex lock; 333 struct mutex lock;
322 struct semaphore bus_lock; 334 struct semaphore bus_lock;
323 335
324 unsigned pll_locked;
325
326 spinlock_t irq_lock; 336 spinlock_t irq_lock;
327 struct dsi_isr_tables isr_tables; 337 struct dsi_isr_tables isr_tables;
328 /* space for a copy used by the interrupt handler */ 338 /* space for a copy used by the interrupt handler */
@@ -347,7 +357,7 @@ struct dsi_data {
347 357
348 unsigned long cache_req_pck; 358 unsigned long cache_req_pck;
349 unsigned long cache_clk_freq; 359 unsigned long cache_clk_freq;
350 struct dsi_clock_info cache_cinfo; 360 struct dss_pll_clock_info cache_cinfo;
351 361
352 u32 errors; 362 u32 errors;
353 spinlock_t errors_lock; 363 spinlock_t errors_lock;
@@ -362,11 +372,6 @@ struct dsi_data {
362 spinlock_t irq_stats_lock; 372 spinlock_t irq_stats_lock;
363 struct dsi_irq_stats irq_stats; 373 struct dsi_irq_stats irq_stats;
364#endif 374#endif
365 /* DSI PLL Parameter Ranges */
366 unsigned long regm_max, regn_max;
367 unsigned long regm_dispc_max, regm_dsi_max;
368 unsigned long fint_min, fint_max;
369 unsigned long lpdiv_max;
370 375
371 unsigned num_lanes_supported; 376 unsigned num_lanes_supported;
372 unsigned line_buffer_size; 377 unsigned line_buffer_size;
@@ -412,7 +417,7 @@ static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss
412 return to_platform_device(dssdev->dev); 417 return to_platform_device(dssdev->dev);
413} 418}
414 419
415struct platform_device *dsi_get_dsidev_from_id(int module) 420static struct platform_device *dsi_get_dsidev_from_id(int module)
416{ 421{
417 struct omap_dss_device *out; 422 struct omap_dss_device *out;
418 enum omap_dss_output_id id; 423 enum omap_dss_output_id id;
@@ -1134,7 +1139,7 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
1134 return e; 1139 return e;
1135} 1140}
1136 1141
1137int dsi_runtime_get(struct platform_device *dsidev) 1142static int dsi_runtime_get(struct platform_device *dsidev)
1138{ 1143{
1139 int r; 1144 int r;
1140 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1145 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -1146,7 +1151,7 @@ int dsi_runtime_get(struct platform_device *dsidev)
1146 return r < 0 ? r : 0; 1151 return r < 0 ? r : 0;
1147} 1152}
1148 1153
1149void dsi_runtime_put(struct platform_device *dsidev) 1154static void dsi_runtime_put(struct platform_device *dsidev)
1150{ 1155{
1151 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1156 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1152 int r; 1157 int r;
@@ -1188,23 +1193,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
1188 return 0; 1193 return 0;
1189} 1194}
1190 1195
1191/* source clock for DSI PLL. this could also be PCLKFREE */
1192static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1193 bool enable)
1194{
1195 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1196
1197 if (enable)
1198 clk_prepare_enable(dsi->sys_clk);
1199 else
1200 clk_disable_unprepare(dsi->sys_clk);
1201
1202 if (enable && dsi->pll_locked) {
1203 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1204 DSSERR("cannot lock PLL when enabling clocks\n");
1205 }
1206}
1207
1208static void _dsi_print_reset_status(struct platform_device *dsidev) 1196static void _dsi_print_reset_status(struct platform_device *dsidev)
1209{ 1197{
1210 u32 l; 1198 u32 l;
@@ -1256,25 +1244,25 @@ static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1256 return 0; 1244 return 0;
1257} 1245}
1258 1246
1259unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) 1247static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1260{ 1248{
1261 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1249 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1262 1250
1263 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk; 1251 return dsi->pll.cinfo.clkout[HSDIV_DISPC];
1264} 1252}
1265 1253
1266static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) 1254static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1267{ 1255{
1268 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1256 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1269 1257
1270 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk; 1258 return dsi->pll.cinfo.clkout[HSDIV_DSI];
1271} 1259}
1272 1260
1273static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) 1261static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1274{ 1262{
1275 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1263 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1276 1264
1277 return dsi->current_cinfo.clkin4ddr / 16; 1265 return dsi->pll.cinfo.clkdco / 16;
1278} 1266}
1279 1267
1280static unsigned long dsi_fclk_rate(struct platform_device *dsidev) 1268static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
@@ -1293,10 +1281,10 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1293 return r; 1281 return r;
1294} 1282}
1295 1283
1296static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo, 1284static int dsi_lp_clock_calc(unsigned long dsi_fclk,
1297 unsigned long lp_clk_min, unsigned long lp_clk_max) 1285 unsigned long lp_clk_min, unsigned long lp_clk_max,
1286 struct dsi_lp_clock_info *lp_cinfo)
1298{ 1287{
1299 unsigned long dsi_fclk = cinfo->dsi_pll_hsdiv_dsi_clk;
1300 unsigned lp_clk_div; 1288 unsigned lp_clk_div;
1301 unsigned long lp_clk; 1289 unsigned long lp_clk;
1302 1290
@@ -1306,8 +1294,8 @@ static int dsi_lp_clock_calc(struct dsi_clock_info *cinfo,
1306 if (lp_clk < lp_clk_min || lp_clk > lp_clk_max) 1294 if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
1307 return -EINVAL; 1295 return -EINVAL;
1308 1296
1309 cinfo->lp_clk_div = lp_clk_div; 1297 lp_cinfo->lp_clk_div = lp_clk_div;
1310 cinfo->lp_clk = lp_clk; 1298 lp_cinfo->lp_clk = lp_clk;
1311 1299
1312 return 0; 1300 return 0;
1313} 1301}
@@ -1318,10 +1306,12 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
1318 unsigned long dsi_fclk; 1306 unsigned long dsi_fclk;
1319 unsigned lp_clk_div; 1307 unsigned lp_clk_div;
1320 unsigned long lp_clk; 1308 unsigned long lp_clk;
1309 unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
1310
1321 1311
1322 lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div; 1312 lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
1323 1313
1324 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max) 1314 if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
1325 return -EINVAL; 1315 return -EINVAL;
1326 1316
1327 dsi_fclk = dsi_fclk_rate(dsidev); 1317 dsi_fclk = dsi_fclk_rate(dsidev);
@@ -1329,8 +1319,8 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
1329 lp_clk = dsi_fclk / 2 / lp_clk_div; 1319 lp_clk = dsi_fclk / 2 / lp_clk_div;
1330 1320
1331 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); 1321 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1332 dsi->current_cinfo.lp_clk = lp_clk; 1322 dsi->current_lp_cinfo.lp_clk = lp_clk;
1333 dsi->current_cinfo.lp_clk_div = lp_clk_div; 1323 dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
1334 1324
1335 /* LP_CLK_DIVISOR */ 1325 /* LP_CLK_DIVISOR */
1336 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); 1326 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
@@ -1391,286 +1381,33 @@ static int dsi_pll_power(struct platform_device *dsidev,
1391 return 0; 1381 return 0;
1392} 1382}
1393 1383
1394unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
1395{
1396 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1397 return clk_get_rate(dsi->sys_clk);
1398}
1399
1400bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
1401 unsigned long out_min, dsi_hsdiv_calc_func func, void *data)
1402{
1403 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1404 int regm, regm_start, regm_stop;
1405 unsigned long out_max;
1406 unsigned long out;
1407
1408 out_min = out_min ? out_min : 1;
1409 out_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1410
1411 regm_start = max(DIV_ROUND_UP(pll, out_max), 1ul);
1412 regm_stop = min(pll / out_min, dsi->regm_dispc_max);
1413
1414 for (regm = regm_start; regm <= regm_stop; ++regm) {
1415 out = pll / regm;
1416
1417 if (func(regm, out, data))
1418 return true;
1419 }
1420
1421 return false;
1422}
1423
1424bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
1425 unsigned long pll_min, unsigned long pll_max,
1426 dsi_pll_calc_func func, void *data)
1427{
1428 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1429 int regn, regn_start, regn_stop;
1430 int regm, regm_start, regm_stop;
1431 unsigned long fint, pll;
1432 const unsigned long pll_hw_max = 1800000000;
1433 unsigned long fint_hw_min, fint_hw_max;
1434
1435 fint_hw_min = dsi->fint_min;
1436 fint_hw_max = dsi->fint_max;
1437 1384
1438 regn_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul); 1385static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
1439 regn_stop = min(clkin / fint_hw_min, dsi->regn_max);
1440
1441 pll_max = pll_max ? pll_max : ULONG_MAX;
1442
1443 for (regn = regn_start; regn <= regn_stop; ++regn) {
1444 fint = clkin / regn;
1445
1446 regm_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1447 1ul);
1448 regm_stop = min3(pll_max / fint / 2,
1449 pll_hw_max / fint / 2,
1450 dsi->regm_max);
1451
1452 for (regm = regm_start; regm <= regm_stop; ++regm) {
1453 pll = 2 * regm * fint;
1454
1455 if (func(regn, regm, fint, pll, data))
1456 return true;
1457 }
1458 }
1459
1460 return false;
1461}
1462
1463/* calculate clock rates using dividers in cinfo */
1464static int dsi_calc_clock_rates(struct platform_device *dsidev,
1465 struct dsi_clock_info *cinfo)
1466{
1467 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1468
1469 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1470 return -EINVAL;
1471
1472 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1473 return -EINVAL;
1474
1475 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1476 return -EINVAL;
1477
1478 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1479 return -EINVAL;
1480
1481 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1482 cinfo->fint = cinfo->clkin / cinfo->regn;
1483
1484 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1485 return -EINVAL;
1486
1487 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1488
1489 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1490 return -EINVAL;
1491
1492 if (cinfo->regm_dispc > 0)
1493 cinfo->dsi_pll_hsdiv_dispc_clk =
1494 cinfo->clkin4ddr / cinfo->regm_dispc;
1495 else
1496 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1497
1498 if (cinfo->regm_dsi > 0)
1499 cinfo->dsi_pll_hsdiv_dsi_clk =
1500 cinfo->clkin4ddr / cinfo->regm_dsi;
1501 else
1502 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1503
1504 return 0;
1505}
1506
1507static void dsi_pll_calc_dsi_fck(struct dsi_clock_info *cinfo)
1508{ 1386{
1509 unsigned long max_dsi_fck; 1387 unsigned long max_dsi_fck;
1510 1388
1511 max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK); 1389 max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
1512 1390
1513 cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck); 1391 cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
1514 cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi; 1392 cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
1515} 1393}
1516 1394
1517int dsi_pll_set_clock_div(struct platform_device *dsidev, 1395static int dsi_pll_enable(struct dss_pll *pll)
1518 struct dsi_clock_info *cinfo)
1519{ 1396{
1520 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1397 struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
1398 struct platform_device *dsidev = dsi->pdev;
1521 int r = 0; 1399 int r = 0;
1522 u32 l;
1523 int f = 0;
1524 u8 regn_start, regn_end, regm_start, regm_end;
1525 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1526
1527 DSSDBG("DSI PLL clock config starts");
1528
1529 dsi->current_cinfo.clkin = cinfo->clkin;
1530 dsi->current_cinfo.fint = cinfo->fint;
1531 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1532 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1533 cinfo->dsi_pll_hsdiv_dispc_clk;
1534 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1535 cinfo->dsi_pll_hsdiv_dsi_clk;
1536
1537 dsi->current_cinfo.regn = cinfo->regn;
1538 dsi->current_cinfo.regm = cinfo->regm;
1539 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1540 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1541
1542 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1543
1544 DSSDBG("clkin rate %ld\n", cinfo->clkin);
1545
1546 /* DSIPHY == CLKIN4DDR */
1547 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1548 cinfo->regm,
1549 cinfo->regn,
1550 cinfo->clkin,
1551 cinfo->clkin4ddr);
1552
1553 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1554 cinfo->clkin4ddr / 1000 / 1000 / 2);
1555
1556 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1557
1558 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1559 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1560 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1561 cinfo->dsi_pll_hsdiv_dispc_clk);
1562 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1563 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1564 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1565 cinfo->dsi_pll_hsdiv_dsi_clk);
1566
1567 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
1568 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
1569 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
1570 &regm_dispc_end);
1571 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1572 &regm_dsi_end);
1573
1574 /* DSI_PLL_AUTOMODE = manual */
1575 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1576
1577 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1578 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1579 /* DSI_PLL_REGN */
1580 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1581 /* DSI_PLL_REGM */
1582 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1583 /* DSI_CLOCK_DIV */
1584 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1585 regm_dispc_start, regm_dispc_end);
1586 /* DSIPROTO_CLOCK_DIV */
1587 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1588 regm_dsi_start, regm_dsi_end);
1589 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1590
1591 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1592
1593 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1594
1595 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1596 f = cinfo->fint < 1000000 ? 0x3 :
1597 cinfo->fint < 1250000 ? 0x4 :
1598 cinfo->fint < 1500000 ? 0x5 :
1599 cinfo->fint < 1750000 ? 0x6 :
1600 0x7;
1601
1602 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1603 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1604 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1605
1606 l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
1607 }
1608
1609 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1610 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1611 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1612 if (dss_has_feature(FEAT_DSI_PLL_REFSEL))
1613 l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
1614 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1615
1616 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1617
1618 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1619 DSSERR("dsi pll go bit not going down.\n");
1620 r = -EIO;
1621 goto err;
1622 }
1623
1624 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1625 DSSERR("cannot lock PLL\n");
1626 r = -EIO;
1627 goto err;
1628 }
1629
1630 dsi->pll_locked = 1;
1631
1632 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1633 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1634 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1635 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1636 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1637 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1638 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1639 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1640 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1641 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1642 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1643 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1644 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1645 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1646 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1647 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1648
1649 DSSDBG("PLL config done\n");
1650err:
1651 return r;
1652}
1653
1654int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1655 bool enable_hsdiv)
1656{
1657 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1658 int r = 0;
1659 enum dsi_pll_power_state pwstate;
1660 1400
1661 DSSDBG("PLL init\n"); 1401 DSSDBG("PLL init\n");
1662 1402
1663 /*
1664 * It seems that on many OMAPs we need to enable both to have a
1665 * functional HSDivider.
1666 */
1667 enable_hsclk = enable_hsdiv = true;
1668
1669 r = dsi_regulator_init(dsidev); 1403 r = dsi_regulator_init(dsidev);
1670 if (r) 1404 if (r)
1671 return r; 1405 return r;
1672 1406
1673 dsi_enable_pll_clock(dsidev, 1); 1407 r = dsi_runtime_get(dsidev);
1408 if (r)
1409 return r;
1410
1674 /* 1411 /*
1675 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. 1412 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1676 */ 1413 */
@@ -1697,16 +1434,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1697 * fill the whole display. No idea about this */ 1434 * fill the whole display. No idea about this */
1698 dispc_pck_free_enable(0); 1435 dispc_pck_free_enable(0);
1699 1436
1700 if (enable_hsclk && enable_hsdiv) 1437 r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
1701 pwstate = DSI_PLL_POWER_ON_ALL;
1702 else if (enable_hsclk)
1703 pwstate = DSI_PLL_POWER_ON_HSCLK;
1704 else if (enable_hsdiv)
1705 pwstate = DSI_PLL_POWER_ON_DIV;
1706 else
1707 pwstate = DSI_PLL_POWER_OFF;
1708
1709 r = dsi_pll_power(dsidev, pwstate);
1710 1438
1711 if (r) 1439 if (r)
1712 goto err1; 1440 goto err1;
@@ -1721,15 +1449,14 @@ err1:
1721 } 1449 }
1722err0: 1450err0:
1723 dsi_disable_scp_clk(dsidev); 1451 dsi_disable_scp_clk(dsidev);
1724 dsi_enable_pll_clock(dsidev, 0); 1452 dsi_runtime_put(dsidev);
1725 return r; 1453 return r;
1726} 1454}
1727 1455
1728void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) 1456static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1729{ 1457{
1730 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1458 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1731 1459
1732 dsi->pll_locked = 0;
1733 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); 1460 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1734 if (disconnect_lanes) { 1461 if (disconnect_lanes) {
1735 WARN_ON(!dsi->vdds_dsi_enabled); 1462 WARN_ON(!dsi->vdds_dsi_enabled);
@@ -1738,18 +1465,27 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1738 } 1465 }
1739 1466
1740 dsi_disable_scp_clk(dsidev); 1467 dsi_disable_scp_clk(dsidev);
1741 dsi_enable_pll_clock(dsidev, 0); 1468 dsi_runtime_put(dsidev);
1742 1469
1743 DSSDBG("PLL uninit done\n"); 1470 DSSDBG("PLL uninit done\n");
1744} 1471}
1745 1472
1473static void dsi_pll_disable(struct dss_pll *pll)
1474{
1475 struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
1476 struct platform_device *dsidev = dsi->pdev;
1477
1478 dsi_pll_uninit(dsidev, true);
1479}
1480
1746static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, 1481static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1747 struct seq_file *s) 1482 struct seq_file *s)
1748{ 1483{
1749 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1484 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1750 struct dsi_clock_info *cinfo = &dsi->current_cinfo; 1485 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1751 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1486 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1752 int dsi_module = dsi->module_id; 1487 int dsi_module = dsi->module_id;
1488 struct dss_pll *pll = &dsi->pll;
1753 1489
1754 dispc_clk_src = dss_get_dispc_clk_source(); 1490 dispc_clk_src = dss_get_dispc_clk_source();
1755 dsi_clk_src = dss_get_dsi_clk_source(dsi_module); 1491 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1759,28 +1495,28 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1759 1495
1760 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); 1496 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1761 1497
1762 seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin); 1498 seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin));
1763 1499
1764 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); 1500 seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n);
1765 1501
1766 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n", 1502 seq_printf(s, "CLKIN4DDR\t%-16lum %u\n",
1767 cinfo->clkin4ddr, cinfo->regm); 1503 cinfo->clkdco, cinfo->m);
1768 1504
1769 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n", 1505 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1770 dss_feat_get_clk_source_name(dsi_module == 0 ? 1506 dss_feat_get_clk_source_name(dsi_module == 0 ?
1771 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 1507 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
1772 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), 1508 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
1773 cinfo->dsi_pll_hsdiv_dispc_clk, 1509 cinfo->clkout[HSDIV_DISPC],
1774 cinfo->regm_dispc, 1510 cinfo->mX[HSDIV_DISPC],
1775 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1511 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1776 "off" : "on"); 1512 "off" : "on");
1777 1513
1778 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n", 1514 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1779 dss_feat_get_clk_source_name(dsi_module == 0 ? 1515 dss_feat_get_clk_source_name(dsi_module == 0 ?
1780 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 1516 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
1781 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), 1517 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
1782 cinfo->dsi_pll_hsdiv_dsi_clk, 1518 cinfo->clkout[HSDIV_DSI],
1783 cinfo->regm_dsi, 1519 cinfo->mX[HSDIV_DSI],
1784 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1520 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1785 "off" : "on"); 1521 "off" : "on");
1786 1522
@@ -1793,11 +1529,11 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1793 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); 1529 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1794 1530
1795 seq_printf(s, "DDR_CLK\t\t%lu\n", 1531 seq_printf(s, "DDR_CLK\t\t%lu\n",
1796 cinfo->clkin4ddr / 4); 1532 cinfo->clkdco / 4);
1797 1533
1798 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev)); 1534 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1799 1535
1800 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); 1536 seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
1801 1537
1802 dsi_runtime_put(dsidev); 1538 dsi_runtime_put(dsidev);
1803} 1539}
@@ -2132,7 +1868,7 @@ static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2132 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1868 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2133 1869
2134 /* convert time in ns to ddr ticks, rounding up */ 1870 /* convert time in ns to ddr ticks, rounding up */
2135 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4; 1871 unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
2136 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; 1872 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2137} 1873}
2138 1874
@@ -2140,7 +1876,7 @@ static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2140{ 1876{
2141 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1877 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2142 1878
2143 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4; 1879 unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
2144 return ddr * 1000 * 1000 / (ddr_clk / 1000); 1880 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2145} 1881}
2146 1882
@@ -3730,7 +3466,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
3730 struct omap_video_timings *timings = &dsi->timings; 3466 struct omap_video_timings *timings = &dsi->timings;
3731 int bpp = dsi_get_pixel_size(dsi->pix_fmt); 3467 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3732 int ndl = dsi->num_lanes_used - 1; 3468 int ndl = dsi->num_lanes_used - 1;
3733 int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1; 3469 int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
3734 int hsa_interleave_hs = 0, hsa_interleave_lp = 0; 3470 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3735 int hfp_interleave_hs = 0, hfp_interleave_lp = 0; 3471 int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3736 int hbp_interleave_hs = 0, hbp_interleave_lp = 0; 3472 int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
@@ -4441,18 +4177,12 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4441static int dsi_configure_dsi_clocks(struct platform_device *dsidev) 4177static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
4442{ 4178{
4443 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4179 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4444 struct dsi_clock_info cinfo; 4180 struct dss_pll_clock_info cinfo;
4445 int r; 4181 int r;
4446 4182
4447 cinfo = dsi->user_dsi_cinfo; 4183 cinfo = dsi->user_dsi_cinfo;
4448 4184
4449 r = dsi_calc_clock_rates(dsidev, &cinfo); 4185 r = dss_pll_set_config(&dsi->pll, &cinfo);
4450 if (r) {
4451 DSSERR("Failed to calc dsi clocks\n");
4452 return r;
4453 }
4454
4455 r = dsi_pll_set_clock_div(dsidev, &cinfo);
4456 if (r) { 4186 if (r) {
4457 DSSERR("Failed to set dsi clocks\n"); 4187 DSSERR("Failed to set dsi clocks\n");
4458 return r; 4188 return r;
@@ -4466,7 +4196,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4466 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4196 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4467 int r; 4197 int r;
4468 4198
4469 r = dsi_pll_init(dsidev, true, true); 4199 r = dss_pll_enable(&dsi->pll);
4470 if (r) 4200 if (r)
4471 goto err0; 4201 goto err0;
4472 4202
@@ -4510,7 +4240,7 @@ err3:
4510err2: 4240err2:
4511 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4241 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4512err1: 4242err1:
4513 dsi_pll_uninit(dsidev, true); 4243 dss_pll_disable(&dsi->pll);
4514err0: 4244err0:
4515 return r; 4245 return r;
4516} 4246}
@@ -4551,8 +4281,6 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
4551 if (r) 4281 if (r)
4552 goto err_get_dsi; 4282 goto err_get_dsi;
4553 4283
4554 dsi_enable_pll_clock(dsidev, 1);
4555
4556 _dsi_initialize_irq(dsidev); 4284 _dsi_initialize_irq(dsidev);
4557 4285
4558 r = dsi_display_init_dsi(dsidev); 4286 r = dsi_display_init_dsi(dsidev);
@@ -4564,7 +4292,6 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
4564 return 0; 4292 return 0;
4565 4293
4566err_init_dsi: 4294err_init_dsi:
4567 dsi_enable_pll_clock(dsidev, 0);
4568 dsi_runtime_put(dsidev); 4295 dsi_runtime_put(dsidev);
4569err_get_dsi: 4296err_get_dsi:
4570 mutex_unlock(&dsi->lock); 4297 mutex_unlock(&dsi->lock);
@@ -4592,7 +4319,6 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
4592 dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps); 4319 dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
4593 4320
4594 dsi_runtime_put(dsidev); 4321 dsi_runtime_put(dsidev);
4595 dsi_enable_pll_clock(dsidev, 0);
4596 4322
4597 mutex_unlock(&dsi->lock); 4323 mutex_unlock(&dsi->lock);
4598} 4324}
@@ -4713,29 +4439,30 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4713 return true; 4439 return true;
4714} 4440}
4715 4441
4716static bool dsi_cm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc, 4442static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
4717 void *data) 4443 void *data)
4718{ 4444{
4719 struct dsi_clk_calc_ctx *ctx = data; 4445 struct dsi_clk_calc_ctx *ctx = data;
4720 4446
4721 ctx->dsi_cinfo.regm_dispc = regm_dispc; 4447 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
4722 ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc; 4448 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
4723 4449
4724 return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max, 4450 return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
4725 dsi_cm_calc_dispc_cb, ctx); 4451 dsi_cm_calc_dispc_cb, ctx);
4726} 4452}
4727 4453
4728static bool dsi_cm_calc_pll_cb(int regn, int regm, unsigned long fint, 4454static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
4729 unsigned long pll, void *data) 4455 unsigned long clkdco, void *data)
4730{ 4456{
4731 struct dsi_clk_calc_ctx *ctx = data; 4457 struct dsi_clk_calc_ctx *ctx = data;
4732 4458
4733 ctx->dsi_cinfo.regn = regn; 4459 ctx->dsi_cinfo.n = n;
4734 ctx->dsi_cinfo.regm = regm; 4460 ctx->dsi_cinfo.m = m;
4735 ctx->dsi_cinfo.fint = fint; 4461 ctx->dsi_cinfo.fint = fint;
4736 ctx->dsi_cinfo.clkin4ddr = pll; 4462 ctx->dsi_cinfo.clkdco = clkdco;
4737 4463
4738 return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min, 4464 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
4465 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4739 dsi_cm_calc_hsdiv_cb, ctx); 4466 dsi_cm_calc_hsdiv_cb, ctx);
4740} 4467}
4741 4468
@@ -4748,7 +4475,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
4748 unsigned long pll_min, pll_max; 4475 unsigned long pll_min, pll_max;
4749 unsigned long pck, txbyteclk; 4476 unsigned long pck, txbyteclk;
4750 4477
4751 clkin = clk_get_rate(dsi->sys_clk); 4478 clkin = clk_get_rate(dsi->pll.clkin);
4752 bitspp = dsi_get_pixel_size(cfg->pixel_format); 4479 bitspp = dsi_get_pixel_size(cfg->pixel_format);
4753 ndl = dsi->num_lanes_used - 1; 4480 ndl = dsi->num_lanes_used - 1;
4754 4481
@@ -4764,16 +4491,16 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
4764 4491
4765 memset(ctx, 0, sizeof(*ctx)); 4492 memset(ctx, 0, sizeof(*ctx));
4766 ctx->dsidev = dsi->pdev; 4493 ctx->dsidev = dsi->pdev;
4494 ctx->pll = &dsi->pll;
4767 ctx->config = cfg; 4495 ctx->config = cfg;
4768 ctx->req_pck_min = pck; 4496 ctx->req_pck_min = pck;
4769 ctx->req_pck_nom = pck; 4497 ctx->req_pck_nom = pck;
4770 ctx->req_pck_max = pck * 3 / 2; 4498 ctx->req_pck_max = pck * 3 / 2;
4771 ctx->dsi_cinfo.clkin = clkin;
4772 4499
4773 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); 4500 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4774 pll_max = cfg->hs_clk_max * 4; 4501 pll_max = cfg->hs_clk_max * 4;
4775 4502
4776 return dsi_pll_calc(dsi->pdev, clkin, 4503 return dss_pll_calc(ctx->pll, clkin,
4777 pll_min, pll_max, 4504 pll_min, pll_max,
4778 dsi_cm_calc_pll_cb, ctx); 4505 dsi_cm_calc_pll_cb, ctx);
4779} 4506}
@@ -4784,7 +4511,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
4784 const struct omap_dss_dsi_config *cfg = ctx->config; 4511 const struct omap_dss_dsi_config *cfg = ctx->config;
4785 int bitspp = dsi_get_pixel_size(cfg->pixel_format); 4512 int bitspp = dsi_get_pixel_size(cfg->pixel_format);
4786 int ndl = dsi->num_lanes_used - 1; 4513 int ndl = dsi->num_lanes_used - 1;
4787 unsigned long hsclk = ctx->dsi_cinfo.clkin4ddr / 4; 4514 unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
4788 unsigned long byteclk = hsclk / 4; 4515 unsigned long byteclk = hsclk / 4;
4789 4516
4790 unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max; 4517 unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
@@ -4999,14 +4726,14 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
4999 return true; 4726 return true;
5000} 4727}
5001 4728
5002static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc, 4729static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
5003 void *data) 4730 void *data)
5004{ 4731{
5005 struct dsi_clk_calc_ctx *ctx = data; 4732 struct dsi_clk_calc_ctx *ctx = data;
5006 unsigned long pck_max; 4733 unsigned long pck_max;
5007 4734
5008 ctx->dsi_cinfo.regm_dispc = regm_dispc; 4735 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
5009 ctx->dsi_cinfo.dsi_pll_hsdiv_dispc_clk = dispc; 4736 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
5010 4737
5011 /* 4738 /*
5012 * In burst mode we can let the dispc pck be arbitrarily high, but it 4739 * In burst mode we can let the dispc pck be arbitrarily high, but it
@@ -5022,17 +4749,18 @@ static bool dsi_vm_calc_hsdiv_cb(int regm_dispc, unsigned long dispc,
5022 dsi_vm_calc_dispc_cb, ctx); 4749 dsi_vm_calc_dispc_cb, ctx);
5023} 4750}
5024 4751
5025static bool dsi_vm_calc_pll_cb(int regn, int regm, unsigned long fint, 4752static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
5026 unsigned long pll, void *data) 4753 unsigned long clkdco, void *data)
5027{ 4754{
5028 struct dsi_clk_calc_ctx *ctx = data; 4755 struct dsi_clk_calc_ctx *ctx = data;
5029 4756
5030 ctx->dsi_cinfo.regn = regn; 4757 ctx->dsi_cinfo.n = n;
5031 ctx->dsi_cinfo.regm = regm; 4758 ctx->dsi_cinfo.m = m;
5032 ctx->dsi_cinfo.fint = fint; 4759 ctx->dsi_cinfo.fint = fint;
5033 ctx->dsi_cinfo.clkin4ddr = pll; 4760 ctx->dsi_cinfo.clkdco = clkdco;
5034 4761
5035 return dsi_hsdiv_calc(ctx->dsidev, pll, ctx->req_pck_min, 4762 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
4763 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
5036 dsi_vm_calc_hsdiv_cb, ctx); 4764 dsi_vm_calc_hsdiv_cb, ctx);
5037} 4765}
5038 4766
@@ -5048,14 +4776,13 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
5048 int bitspp = dsi_get_pixel_size(cfg->pixel_format); 4776 int bitspp = dsi_get_pixel_size(cfg->pixel_format);
5049 unsigned long byteclk_min; 4777 unsigned long byteclk_min;
5050 4778
5051 clkin = clk_get_rate(dsi->sys_clk); 4779 clkin = clk_get_rate(dsi->pll.clkin);
5052 4780
5053 memset(ctx, 0, sizeof(*ctx)); 4781 memset(ctx, 0, sizeof(*ctx));
5054 ctx->dsidev = dsi->pdev; 4782 ctx->dsidev = dsi->pdev;
4783 ctx->pll = &dsi->pll;
5055 ctx->config = cfg; 4784 ctx->config = cfg;
5056 4785
5057 ctx->dsi_cinfo.clkin = clkin;
5058
5059 /* these limits should come from the panel driver */ 4786 /* these limits should come from the panel driver */
5060 ctx->req_pck_min = t->pixelclock - 1000; 4787 ctx->req_pck_min = t->pixelclock - 1000;
5061 ctx->req_pck_nom = t->pixelclock; 4788 ctx->req_pck_nom = t->pixelclock;
@@ -5074,7 +4801,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
5074 pll_max = byteclk_max * 4 * 4; 4801 pll_max = byteclk_max * 4 * 4;
5075 } 4802 }
5076 4803
5077 return dsi_pll_calc(dsi->pdev, clkin, 4804 return dss_pll_calc(ctx->pll, clkin,
5078 pll_min, pll_max, 4805 pll_min, pll_max,
5079 dsi_vm_calc_pll_cb, ctx); 4806 dsi_vm_calc_pll_cb, ctx);
5080} 4807}
@@ -5106,8 +4833,8 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
5106 4833
5107 dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo); 4834 dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
5108 4835
5109 r = dsi_lp_clock_calc(&ctx.dsi_cinfo, config->lp_clk_min, 4836 r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
5110 config->lp_clk_max); 4837 config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
5111 if (r) { 4838 if (r) {
5112 DSSERR("failed to find suitable DSI LP clock settings\n"); 4839 DSSERR("failed to find suitable DSI LP clock settings\n");
5113 goto err; 4840 goto err;
@@ -5234,35 +4961,6 @@ static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
5234 } 4961 }
5235} 4962}
5236 4963
5237void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
5238{
5239 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
5240 DSSERR("%s (%s) not active\n",
5241 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
5242 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
5243}
5244
5245void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
5246{
5247 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
5248 DSSERR("%s (%s) not active\n",
5249 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
5250 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
5251}
5252
5253static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
5254{
5255 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5256
5257 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
5258 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
5259 dsi->regm_dispc_max =
5260 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
5261 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
5262 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
5263 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
5264 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
5265}
5266 4964
5267static int dsi_get_clocks(struct platform_device *dsidev) 4965static int dsi_get_clocks(struct platform_device *dsidev)
5268{ 4966{
@@ -5277,14 +4975,6 @@ static int dsi_get_clocks(struct platform_device *dsidev)
5277 4975
5278 dsi->dss_clk = clk; 4976 dsi->dss_clk = clk;
5279 4977
5280 clk = devm_clk_get(&dsidev->dev, "sys_clk");
5281 if (IS_ERR(clk)) {
5282 DSSERR("can't get sys_clk\n");
5283 return PTR_ERR(clk);
5284 }
5285
5286 dsi->sys_clk = clk;
5287
5288 return 0; 4978 return 0;
5289} 4979}
5290 4980
@@ -5453,6 +5143,135 @@ err:
5453 return r; 5143 return r;
5454} 5144}
5455 5145
5146static const struct dss_pll_ops dsi_pll_ops = {
5147 .enable = dsi_pll_enable,
5148 .disable = dsi_pll_disable,
5149 .set_config = dss_pll_write_config_type_a,
5150};
5151
5152static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5153 .n_max = (1 << 7) - 1,
5154 .m_max = (1 << 11) - 1,
5155 .mX_max = (1 << 4) - 1,
5156 .fint_min = 750000,
5157 .fint_max = 2100000,
5158 .clkdco_low = 1000000000,
5159 .clkdco_max = 1800000000,
5160
5161 .n_msb = 7,
5162 .n_lsb = 1,
5163 .m_msb = 18,
5164 .m_lsb = 8,
5165
5166 .mX_msb[0] = 22,
5167 .mX_lsb[0] = 19,
5168 .mX_msb[1] = 26,
5169 .mX_lsb[1] = 23,
5170
5171 .has_stopmode = true,
5172 .has_freqsel = true,
5173 .has_selfreqdco = false,
5174 .has_refsel = false,
5175};
5176
5177static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5178 .n_max = (1 << 8) - 1,
5179 .m_max = (1 << 12) - 1,
5180 .mX_max = (1 << 5) - 1,
5181 .fint_min = 500000,
5182 .fint_max = 2500000,
5183 .clkdco_low = 1000000000,
5184 .clkdco_max = 1800000000,
5185
5186 .n_msb = 8,
5187 .n_lsb = 1,
5188 .m_msb = 20,
5189 .m_lsb = 9,
5190
5191 .mX_msb[0] = 25,
5192 .mX_lsb[0] = 21,
5193 .mX_msb[1] = 30,
5194 .mX_lsb[1] = 26,
5195
5196 .has_stopmode = true,
5197 .has_freqsel = false,
5198 .has_selfreqdco = false,
5199 .has_refsel = false,
5200};
5201
5202static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5203 .n_max = (1 << 8) - 1,
5204 .m_max = (1 << 12) - 1,
5205 .mX_max = (1 << 5) - 1,
5206 .fint_min = 150000,
5207 .fint_max = 52000000,
5208 .clkdco_low = 1000000000,
5209 .clkdco_max = 1800000000,
5210
5211 .n_msb = 8,
5212 .n_lsb = 1,
5213 .m_msb = 20,
5214 .m_lsb = 9,
5215
5216 .mX_msb[0] = 25,
5217 .mX_lsb[0] = 21,
5218 .mX_msb[1] = 30,
5219 .mX_lsb[1] = 26,
5220
5221 .has_stopmode = true,
5222 .has_freqsel = false,
5223 .has_selfreqdco = true,
5224 .has_refsel = true,
5225};
5226
5227static int dsi_init_pll_data(struct platform_device *dsidev)
5228{
5229 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5230 struct dss_pll *pll = &dsi->pll;
5231 struct clk *clk;
5232 int r;
5233
5234 clk = devm_clk_get(&dsidev->dev, "sys_clk");
5235 if (IS_ERR(clk)) {
5236 DSSERR("can't get sys_clk\n");
5237 return PTR_ERR(clk);
5238 }
5239
5240 pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
5241 pll->clkin = clk;
5242 pll->base = dsi->pll_base;
5243
5244 switch (omapdss_get_version()) {
5245 case OMAPDSS_VER_OMAP34xx_ES1:
5246 case OMAPDSS_VER_OMAP34xx_ES3:
5247 case OMAPDSS_VER_OMAP3630:
5248 case OMAPDSS_VER_AM35xx:
5249 pll->hw = &dss_omap3_dsi_pll_hw;
5250 break;
5251
5252 case OMAPDSS_VER_OMAP4430_ES1:
5253 case OMAPDSS_VER_OMAP4430_ES2:
5254 case OMAPDSS_VER_OMAP4:
5255 pll->hw = &dss_omap4_dsi_pll_hw;
5256 break;
5257
5258 case OMAPDSS_VER_OMAP5:
5259 pll->hw = &dss_omap5_dsi_pll_hw;
5260 break;
5261
5262 default:
5263 return -ENODEV;
5264 }
5265
5266 pll->ops = &dsi_pll_ops;
5267
5268 r = dss_pll_register(pll);
5269 if (r)
5270 return r;
5271
5272 return 0;
5273}
5274
5456/* DSI1 HW IP initialisation */ 5275/* DSI1 HW IP initialisation */
5457static int omap_dsihw_probe(struct platform_device *dsidev) 5276static int omap_dsihw_probe(struct platform_device *dsidev)
5458{ 5277{
@@ -5598,12 +5417,12 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
5598 dsi->vc[i].vc_id = 0; 5417 dsi->vc[i].vc_id = 0;
5599 } 5418 }
5600 5419
5601 dsi_calc_clock_param_ranges(dsidev);
5602
5603 r = dsi_get_clocks(dsidev); 5420 r = dsi_get_clocks(dsidev);
5604 if (r) 5421 if (r)
5605 return r; 5422 return r;
5606 5423
5424 dsi_init_pll_data(dsidev);
5425
5607 pm_runtime_enable(&dsidev->dev); 5426 pm_runtime_enable(&dsidev->dev);
5608 5427
5609 r = dsi_runtime_get(dsidev); 5428 r = dsi_runtime_get(dsidev);
@@ -5672,6 +5491,8 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
5672 5491
5673 WARN_ON(dsi->scp_clk_refcount > 0); 5492 WARN_ON(dsi->scp_clk_refcount > 0);
5674 5493
5494 dss_pll_unregister(&dsi->pll);
5495
5675 dsi_uninit_output(dsidev); 5496 dsi_uninit_output(dsidev);
5676 5497
5677 pm_runtime_disable(&dsidev->dev); 5498 pm_runtime_disable(&dsidev->dev);
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index a4b20aaf6142..928ee639c0c1 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -20,6 +20,8 @@
20 20
21#include <video/omapdss.h> 21#include <video/omapdss.h>
22 22
23#include "dss.h"
24
23struct device_node * 25struct device_node *
24omapdss_of_get_next_port(const struct device_node *parent, 26omapdss_of_get_next_port(const struct device_node *parent,
25 struct device_node *prev) 27 struct device_node *prev)
@@ -84,20 +86,17 @@ omapdss_of_get_next_endpoint(const struct device_node *parent,
84} 86}
85EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint); 87EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint);
86 88
87static struct device_node * 89struct device_node *dss_of_port_get_parent_device(struct device_node *port)
88omapdss_of_get_remote_device_node(const struct device_node *node)
89{ 90{
90 struct device_node *np; 91 struct device_node *np;
91 int i; 92 int i;
92 93
93 np = of_parse_phandle(node, "remote-endpoint", 0); 94 if (!port)
94
95 if (!np)
96 return NULL; 95 return NULL;
97 96
98 np = of_get_next_parent(np); 97 np = of_get_next_parent(port);
99 98
100 for (i = 0; i < 3 && np; ++i) { 99 for (i = 0; i < 2 && np; ++i) {
101 struct property *prop; 100 struct property *prop;
102 101
103 prop = of_find_property(np, "compatible", NULL); 102 prop = of_find_property(np, "compatible", NULL);
@@ -111,6 +110,31 @@ omapdss_of_get_remote_device_node(const struct device_node *node)
111 return NULL; 110 return NULL;
112} 111}
113 112
113u32 dss_of_port_get_port_number(struct device_node *port)
114{
115 int r;
116 u32 reg;
117
118 r = of_property_read_u32(port, "reg", &reg);
119 if (r)
120 reg = 0;
121
122 return reg;
123}
124
125static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
126{
127 struct device_node *np;
128
129 np = of_parse_phandle(node, "remote-endpoint", 0);
130 if (!np)
131 return NULL;
132
133 np = of_get_next_parent(np);
134
135 return np;
136}
137
114struct device_node * 138struct device_node *
115omapdss_of_get_first_endpoint(const struct device_node *parent) 139omapdss_of_get_first_endpoint(const struct device_node *parent)
116{ 140{
@@ -133,27 +157,25 @@ struct omap_dss_device *
133omapdss_of_find_source_for_first_ep(struct device_node *node) 157omapdss_of_find_source_for_first_ep(struct device_node *node)
134{ 158{
135 struct device_node *ep; 159 struct device_node *ep;
136 struct device_node *src_node; 160 struct device_node *src_port;
137 struct omap_dss_device *src; 161 struct omap_dss_device *src;
138 162
139 ep = omapdss_of_get_first_endpoint(node); 163 ep = omapdss_of_get_first_endpoint(node);
140 if (!ep) 164 if (!ep)
141 return ERR_PTR(-EINVAL); 165 return ERR_PTR(-EINVAL);
142 166
143 src_node = omapdss_of_get_remote_device_node(ep); 167 src_port = omapdss_of_get_remote_port(ep);
144 168 if (!src_port) {
145 of_node_put(ep); 169 of_node_put(ep);
146
147 if (!src_node)
148 return ERR_PTR(-EINVAL); 170 return ERR_PTR(-EINVAL);
171 }
149 172
150 src = omap_dss_find_output_by_node(src_node); 173 of_node_put(ep);
151 174
152 of_node_put(src_node); 175 src = omap_dss_find_output_by_port_node(src_port);
153 176
154 if (!src) 177 of_node_put(src_port);
155 return ERR_PTR(-EPROBE_DEFER);
156 178
157 return src; 179 return src ? src : ERR_PTR(-EPROBE_DEFER);
158} 180}
159EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep); 181EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep);
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 14bcd6c43f72..702c495083ed 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -70,7 +70,9 @@ struct dss_features {
70 u8 fck_div_max; 70 u8 fck_div_max;
71 u8 dss_fck_multiplier; 71 u8 dss_fck_multiplier;
72 const char *parent_clk_name; 72 const char *parent_clk_name;
73 int (*dpi_select_source)(enum omap_channel channel); 73 enum omap_display_type *ports;
74 int num_ports;
75 int (*dpi_select_source)(int port, enum omap_channel channel);
74}; 76};
75 77
76static struct { 78static struct {
@@ -294,7 +296,6 @@ static void dss_dump_regs(struct seq_file *s)
294 296
295static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) 297static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
296{ 298{
297 struct platform_device *dsidev;
298 int b; 299 int b;
299 u8 start, end; 300 u8 start, end;
300 301
@@ -304,13 +305,9 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
304 break; 305 break;
305 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 306 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
306 b = 1; 307 b = 1;
307 dsidev = dsi_get_dsidev_from_id(0);
308 dsi_wait_pll_hsdiv_dispc_active(dsidev);
309 break; 308 break;
310 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 309 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
311 b = 2; 310 b = 2;
312 dsidev = dsi_get_dsidev_from_id(1);
313 dsi_wait_pll_hsdiv_dispc_active(dsidev);
314 break; 311 break;
315 default: 312 default:
316 BUG(); 313 BUG();
@@ -327,7 +324,6 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
327void dss_select_dsi_clk_source(int dsi_module, 324void dss_select_dsi_clk_source(int dsi_module,
328 enum omap_dss_clk_source clk_src) 325 enum omap_dss_clk_source clk_src)
329{ 326{
330 struct platform_device *dsidev;
331 int b, pos; 327 int b, pos;
332 328
333 switch (clk_src) { 329 switch (clk_src) {
@@ -337,14 +333,10 @@ void dss_select_dsi_clk_source(int dsi_module,
337 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 333 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
338 BUG_ON(dsi_module != 0); 334 BUG_ON(dsi_module != 0);
339 b = 1; 335 b = 1;
340 dsidev = dsi_get_dsidev_from_id(0);
341 dsi_wait_pll_hsdiv_dsi_active(dsidev);
342 break; 336 break;
343 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: 337 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
344 BUG_ON(dsi_module != 1); 338 BUG_ON(dsi_module != 1);
345 b = 1; 339 b = 1;
346 dsidev = dsi_get_dsidev_from_id(1);
347 dsi_wait_pll_hsdiv_dsi_active(dsidev);
348 break; 340 break;
349 default: 341 default:
350 BUG(); 342 BUG();
@@ -360,7 +352,6 @@ void dss_select_dsi_clk_source(int dsi_module,
360void dss_select_lcd_clk_source(enum omap_channel channel, 352void dss_select_lcd_clk_source(enum omap_channel channel,
361 enum omap_dss_clk_source clk_src) 353 enum omap_dss_clk_source clk_src)
362{ 354{
363 struct platform_device *dsidev;
364 int b, ix, pos; 355 int b, ix, pos;
365 356
366 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { 357 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
@@ -375,15 +366,11 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
375 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 366 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
376 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); 367 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
377 b = 1; 368 b = 1;
378 dsidev = dsi_get_dsidev_from_id(0);
379 dsi_wait_pll_hsdiv_dispc_active(dsidev);
380 break; 369 break;
381 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 370 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
382 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && 371 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
383 channel != OMAP_DSS_CHANNEL_LCD3); 372 channel != OMAP_DSS_CHANNEL_LCD3);
384 b = 1; 373 b = 1;
385 dsidev = dsi_get_dsidev_from_id(1);
386 dsi_wait_pll_hsdiv_dispc_active(dsidev);
387 break; 374 break;
388 default: 375 default:
389 BUG(); 376 BUG();
@@ -564,7 +551,7 @@ enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
564 return REG_GET(DSS_CONTROL, 15, 15); 551 return REG_GET(DSS_CONTROL, 15, 15);
565} 552}
566 553
567static int dss_dpi_select_source_omap2_omap3(enum omap_channel channel) 554static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel)
568{ 555{
569 if (channel != OMAP_DSS_CHANNEL_LCD) 556 if (channel != OMAP_DSS_CHANNEL_LCD)
570 return -EINVAL; 557 return -EINVAL;
@@ -572,7 +559,7 @@ static int dss_dpi_select_source_omap2_omap3(enum omap_channel channel)
572 return 0; 559 return 0;
573} 560}
574 561
575static int dss_dpi_select_source_omap4(enum omap_channel channel) 562static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
576{ 563{
577 int val; 564 int val;
578 565
@@ -592,7 +579,7 @@ static int dss_dpi_select_source_omap4(enum omap_channel channel)
592 return 0; 579 return 0;
593} 580}
594 581
595static int dss_dpi_select_source_omap5(enum omap_channel channel) 582static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
596{ 583{
597 int val; 584 int val;
598 585
@@ -618,9 +605,9 @@ static int dss_dpi_select_source_omap5(enum omap_channel channel)
618 return 0; 605 return 0;
619} 606}
620 607
621int dss_dpi_select_source(enum omap_channel channel) 608int dss_dpi_select_source(int port, enum omap_channel channel)
622{ 609{
623 return dss.feat->dpi_select_source(channel); 610 return dss.feat->dpi_select_source(port, channel);
624} 611}
625 612
626static int dss_get_clocks(void) 613static int dss_get_clocks(void)
@@ -689,6 +676,16 @@ void dss_debug_dump_clocks(struct seq_file *s)
689} 676}
690#endif 677#endif
691 678
679
680static enum omap_display_type omap2plus_ports[] = {
681 OMAP_DISPLAY_TYPE_DPI,
682};
683
684static enum omap_display_type omap34xx_ports[] = {
685 OMAP_DISPLAY_TYPE_DPI,
686 OMAP_DISPLAY_TYPE_SDI,
687};
688
692static const struct dss_features omap24xx_dss_feats __initconst = { 689static const struct dss_features omap24xx_dss_feats __initconst = {
693 /* 690 /*
694 * fck div max is really 16, but the divider range has gaps. The range 691 * fck div max is really 16, but the divider range has gaps. The range
@@ -698,6 +695,8 @@ static const struct dss_features omap24xx_dss_feats __initconst = {
698 .dss_fck_multiplier = 2, 695 .dss_fck_multiplier = 2,
699 .parent_clk_name = "core_ck", 696 .parent_clk_name = "core_ck",
700 .dpi_select_source = &dss_dpi_select_source_omap2_omap3, 697 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
698 .ports = omap2plus_ports,
699 .num_ports = ARRAY_SIZE(omap2plus_ports),
701}; 700};
702 701
703static const struct dss_features omap34xx_dss_feats __initconst = { 702static const struct dss_features omap34xx_dss_feats __initconst = {
@@ -705,6 +704,8 @@ static const struct dss_features omap34xx_dss_feats __initconst = {
705 .dss_fck_multiplier = 2, 704 .dss_fck_multiplier = 2,
706 .parent_clk_name = "dpll4_ck", 705 .parent_clk_name = "dpll4_ck",
707 .dpi_select_source = &dss_dpi_select_source_omap2_omap3, 706 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
707 .ports = omap34xx_ports,
708 .num_ports = ARRAY_SIZE(omap34xx_ports),
708}; 709};
709 710
710static const struct dss_features omap3630_dss_feats __initconst = { 711static const struct dss_features omap3630_dss_feats __initconst = {
@@ -712,6 +713,8 @@ static const struct dss_features omap3630_dss_feats __initconst = {
712 .dss_fck_multiplier = 1, 713 .dss_fck_multiplier = 1,
713 .parent_clk_name = "dpll4_ck", 714 .parent_clk_name = "dpll4_ck",
714 .dpi_select_source = &dss_dpi_select_source_omap2_omap3, 715 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
716 .ports = omap2plus_ports,
717 .num_ports = ARRAY_SIZE(omap2plus_ports),
715}; 718};
716 719
717static const struct dss_features omap44xx_dss_feats __initconst = { 720static const struct dss_features omap44xx_dss_feats __initconst = {
@@ -719,6 +722,8 @@ static const struct dss_features omap44xx_dss_feats __initconst = {
719 .dss_fck_multiplier = 1, 722 .dss_fck_multiplier = 1,
720 .parent_clk_name = "dpll_per_x2_ck", 723 .parent_clk_name = "dpll_per_x2_ck",
721 .dpi_select_source = &dss_dpi_select_source_omap4, 724 .dpi_select_source = &dss_dpi_select_source_omap4,
725 .ports = omap2plus_ports,
726 .num_ports = ARRAY_SIZE(omap2plus_ports),
722}; 727};
723 728
724static const struct dss_features omap54xx_dss_feats __initconst = { 729static const struct dss_features omap54xx_dss_feats __initconst = {
@@ -726,6 +731,8 @@ static const struct dss_features omap54xx_dss_feats __initconst = {
726 .dss_fck_multiplier = 1, 731 .dss_fck_multiplier = 1,
727 .parent_clk_name = "dpll_per_x2_ck", 732 .parent_clk_name = "dpll_per_x2_ck",
728 .dpi_select_source = &dss_dpi_select_source_omap5, 733 .dpi_select_source = &dss_dpi_select_source_omap5,
734 .ports = omap2plus_ports,
735 .num_ports = ARRAY_SIZE(omap2plus_ports),
729}; 736};
730 737
731static const struct dss_features am43xx_dss_feats __initconst = { 738static const struct dss_features am43xx_dss_feats __initconst = {
@@ -733,6 +740,8 @@ static const struct dss_features am43xx_dss_feats __initconst = {
733 .dss_fck_multiplier = 0, 740 .dss_fck_multiplier = 0,
734 .parent_clk_name = NULL, 741 .parent_clk_name = NULL,
735 .dpi_select_source = &dss_dpi_select_source_omap2_omap3, 742 .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
743 .ports = omap2plus_ports,
744 .num_ports = ARRAY_SIZE(omap2plus_ports),
736}; 745};
737 746
738static int __init dss_init_features(struct platform_device *pdev) 747static int __init dss_init_features(struct platform_device *pdev)
@@ -798,37 +807,77 @@ static int __init dss_init_ports(struct platform_device *pdev)
798 if (!port) 807 if (!port)
799 return 0; 808 return 0;
800 809
810 if (dss.feat->num_ports == 0)
811 return 0;
812
801 do { 813 do {
814 enum omap_display_type port_type;
802 u32 reg; 815 u32 reg;
803 816
804 r = of_property_read_u32(port, "reg", &reg); 817 r = of_property_read_u32(port, "reg", &reg);
805 if (r) 818 if (r)
806 reg = 0; 819 reg = 0;
807 820
808#ifdef CONFIG_OMAP2_DSS_DPI 821 if (reg >= dss.feat->num_ports)
809 if (reg == 0) 822 continue;
810 dpi_init_port(pdev, port);
811#endif
812 823
813#ifdef CONFIG_OMAP2_DSS_SDI 824 port_type = dss.feat->ports[reg];
814 if (reg == 1)
815 sdi_init_port(pdev, port);
816#endif
817 825
826 switch (port_type) {
827 case OMAP_DISPLAY_TYPE_DPI:
828 dpi_init_port(pdev, port);
829 break;
830 case OMAP_DISPLAY_TYPE_SDI:
831 sdi_init_port(pdev, port);
832 break;
833 default:
834 break;
835 }
818 } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); 836 } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
819 837
820 return 0; 838 return 0;
821} 839}
822 840
823static void __exit dss_uninit_ports(void) 841static void __exit dss_uninit_ports(struct platform_device *pdev)
824{ 842{
825#ifdef CONFIG_OMAP2_DSS_DPI 843 struct device_node *parent = pdev->dev.of_node;
826 dpi_uninit_port(); 844 struct device_node *port;
827#endif
828 845
829#ifdef CONFIG_OMAP2_DSS_SDI 846 if (parent == NULL)
830 sdi_uninit_port(); 847 return;
831#endif 848
849 port = omapdss_of_get_next_port(parent, NULL);
850 if (!port)
851 return;
852
853 if (dss.feat->num_ports == 0)
854 return;
855
856 do {
857 enum omap_display_type port_type;
858 u32 reg;
859 int r;
860
861 r = of_property_read_u32(port, "reg", &reg);
862 if (r)
863 reg = 0;
864
865 if (reg >= dss.feat->num_ports)
866 continue;
867
868 port_type = dss.feat->ports[reg];
869
870 switch (port_type) {
871 case OMAP_DISPLAY_TYPE_DPI:
872 dpi_uninit_port(port);
873 break;
874 case OMAP_DISPLAY_TYPE_SDI:
875 sdi_uninit_port(port);
876 break;
877 default:
878 break;
879 }
880 } while ((port = omapdss_of_get_next_port(parent, port)) != NULL);
832} 881}
833 882
834/* DSS HW IP initialisation */ 883/* DSS HW IP initialisation */
@@ -910,7 +959,7 @@ err_setup_clocks:
910 959
911static int __exit omap_dsshw_remove(struct platform_device *pdev) 960static int __exit omap_dsshw_remove(struct platform_device *pdev)
912{ 961{
913 dss_uninit_ports(); 962 dss_uninit_ports(pdev);
914 963
915 pm_runtime_disable(&pdev->dev); 964 pm_runtime_disable(&pdev->dev);
916 965
diff --git a/drivers/video/fbdev/omap2/dss/dss.h b/drivers/video/fbdev/omap2/dss/dss.h
index 8ff22c134c62..14fb0c23f4a2 100644
--- a/drivers/video/fbdev/omap2/dss/dss.h
+++ b/drivers/video/fbdev/omap2/dss/dss.h
@@ -100,35 +100,77 @@ enum dss_writeback_channel {
100 DSS_WB_LCD3_MGR = 7, 100 DSS_WB_LCD3_MGR = 7,
101}; 101};
102 102
103struct dispc_clock_info { 103struct dss_pll;
104
105#define DSS_PLL_MAX_HSDIVS 4
106
107/*
108 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
109 * Type-B PLLs: clkout[0] refers to m2.
110 */
111struct dss_pll_clock_info {
104 /* rates that we get with dividers below */ 112 /* rates that we get with dividers below */
105 unsigned long lck; 113 unsigned long fint;
106 unsigned long pck; 114 unsigned long clkdco;
115 unsigned long clkout[DSS_PLL_MAX_HSDIVS];
107 116
108 /* dividers */ 117 /* dividers */
109 u16 lck_div; 118 u16 n;
110 u16 pck_div; 119 u16 m;
120 u32 mf;
121 u16 mX[DSS_PLL_MAX_HSDIVS];
122 u16 sd;
123};
124
125struct dss_pll_ops {
126 int (*enable)(struct dss_pll *pll);
127 void (*disable)(struct dss_pll *pll);
128 int (*set_config)(struct dss_pll *pll,
129 const struct dss_pll_clock_info *cinfo);
130};
131
132struct dss_pll_hw {
133 unsigned n_max;
134 unsigned m_min;
135 unsigned m_max;
136 unsigned mX_max;
137
138 unsigned long fint_min, fint_max;
139 unsigned long clkdco_min, clkdco_low, clkdco_max;
140
141 u8 n_msb, n_lsb;
142 u8 m_msb, m_lsb;
143 u8 mX_msb[DSS_PLL_MAX_HSDIVS], mX_lsb[DSS_PLL_MAX_HSDIVS];
144
145 bool has_stopmode;
146 bool has_freqsel;
147 bool has_selfreqdco;
148 bool has_refsel;
111}; 149};
112 150
113struct dsi_clock_info { 151struct dss_pll {
152 const char *name;
153
154 struct clk *clkin;
155 struct regulator *regulator;
156
157 void __iomem *base;
158
159 const struct dss_pll_hw *hw;
160
161 const struct dss_pll_ops *ops;
162
163 struct dss_pll_clock_info cinfo;
164};
165
166struct dispc_clock_info {
114 /* rates that we get with dividers below */ 167 /* rates that we get with dividers below */
115 unsigned long fint; 168 unsigned long lck;
116 unsigned long clkin4ddr; 169 unsigned long pck;
117 unsigned long clkin;
118 unsigned long dsi_pll_hsdiv_dispc_clk; /* OMAP3: DSI1_PLL_CLK
119 * OMAP4: PLLx_CLK1 */
120 unsigned long dsi_pll_hsdiv_dsi_clk; /* OMAP3: DSI2_PLL_CLK
121 * OMAP4: PLLx_CLK2 */
122 unsigned long lp_clk;
123 170
124 /* dividers */ 171 /* dividers */
125 u16 regn; 172 u16 lck_div;
126 u16 regm; 173 u16 pck_div;
127 u16 regm_dispc; /* OMAP3: REGM3
128 * OMAP4: REGM4 */
129 u16 regm_dsi; /* OMAP3: REGM4
130 * OMAP4: REGM5 */
131 u16 lp_clk_div;
132}; 174};
133 175
134struct dss_lcd_mgr_config { 176struct dss_lcd_mgr_config {
@@ -209,12 +251,16 @@ int dss_init_platform_driver(void) __init;
209void dss_uninit_platform_driver(void); 251void dss_uninit_platform_driver(void);
210 252
211unsigned long dss_get_dispc_clk_rate(void); 253unsigned long dss_get_dispc_clk_rate(void);
212int dss_dpi_select_source(enum omap_channel channel); 254int dss_dpi_select_source(int port, enum omap_channel channel);
213void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 255void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
214enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 256enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
215const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 257const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
216void dss_dump_clocks(struct seq_file *s); 258void dss_dump_clocks(struct seq_file *s);
217 259
260/* dss-of */
261struct device_node *dss_of_port_get_parent_device(struct device_node *port);
262u32 dss_of_port_get_port_number(struct device_node *port);
263
218#if defined(CONFIG_OMAP2_DSS_DEBUGFS) 264#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
219void dss_debug_dump_clocks(struct seq_file *s); 265void dss_debug_dump_clocks(struct seq_file *s);
220#endif 266#endif
@@ -244,16 +290,22 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
244int sdi_init_platform_driver(void) __init; 290int sdi_init_platform_driver(void) __init;
245void sdi_uninit_platform_driver(void) __exit; 291void sdi_uninit_platform_driver(void) __exit;
246 292
293#ifdef CONFIG_OMAP2_DSS_SDI
247int sdi_init_port(struct platform_device *pdev, struct device_node *port) __init; 294int sdi_init_port(struct platform_device *pdev, struct device_node *port) __init;
248void sdi_uninit_port(void) __exit; 295void sdi_uninit_port(struct device_node *port) __exit;
296#else
297static inline int __init sdi_init_port(struct platform_device *pdev,
298 struct device_node *port)
299{
300 return 0;
301}
302static inline void __exit sdi_uninit_port(struct device_node *port)
303{
304}
305#endif
249 306
250/* DSI */ 307/* DSI */
251 308
252typedef bool (*dsi_pll_calc_func)(int regn, int regm, unsigned long fint,
253 unsigned long pll, void *data);
254typedef bool (*dsi_hsdiv_calc_func)(int regm_dispc, unsigned long dispc,
255 void *data);
256
257#ifdef CONFIG_OMAP2_DSS_DSI 309#ifdef CONFIG_OMAP2_DSS_DSI
258 310
259struct dentry; 311struct dentry;
@@ -262,104 +314,36 @@ struct file_operations;
262int dsi_init_platform_driver(void) __init; 314int dsi_init_platform_driver(void) __init;
263void dsi_uninit_platform_driver(void) __exit; 315void dsi_uninit_platform_driver(void) __exit;
264 316
265int dsi_runtime_get(struct platform_device *dsidev);
266void dsi_runtime_put(struct platform_device *dsidev);
267
268void dsi_dump_clocks(struct seq_file *s); 317void dsi_dump_clocks(struct seq_file *s);
269 318
270void dsi_irq_handler(void); 319void dsi_irq_handler(void);
271u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); 320u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
272 321
273unsigned long dsi_get_pll_clkin(struct platform_device *dsidev);
274
275bool dsi_hsdiv_calc(struct platform_device *dsidev, unsigned long pll,
276 unsigned long out_min, dsi_hsdiv_calc_func func, void *data);
277bool dsi_pll_calc(struct platform_device *dsidev, unsigned long clkin,
278 unsigned long pll_min, unsigned long pll_max,
279 dsi_pll_calc_func func, void *data);
280
281unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
282int dsi_pll_set_clock_div(struct platform_device *dsidev,
283 struct dsi_clock_info *cinfo);
284int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
285 bool enable_hsdiv);
286void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
287void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
288void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
289struct platform_device *dsi_get_dsidev_from_id(int module);
290#else 322#else
291static inline int dsi_runtime_get(struct platform_device *dsidev)
292{
293 return 0;
294}
295static inline void dsi_runtime_put(struct platform_device *dsidev)
296{
297}
298static inline u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) 323static inline u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
299{ 324{
300 WARN("%s: DSI not compiled in, returning pixel_size as 0\n", __func__); 325 WARN("%s: DSI not compiled in, returning pixel_size as 0\n", __func__);
301 return 0; 326 return 0;
302} 327}
303static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
304{
305 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
306 return 0;
307}
308static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
309 struct dsi_clock_info *cinfo)
310{
311 WARN("%s: DSI not compiled in\n", __func__);
312 return -ENODEV;
313}
314static inline int dsi_pll_init(struct platform_device *dsidev,
315 bool enable_hsclk, bool enable_hsdiv)
316{
317 WARN("%s: DSI not compiled in\n", __func__);
318 return -ENODEV;
319}
320static inline void dsi_pll_uninit(struct platform_device *dsidev,
321 bool disconnect_lanes)
322{
323}
324static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
325{
326}
327static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
328{
329}
330static inline struct platform_device *dsi_get_dsidev_from_id(int module)
331{
332 return NULL;
333}
334
335static inline unsigned long dsi_get_pll_clkin(struct platform_device *dsidev)
336{
337 return 0;
338}
339
340static inline bool dsi_hsdiv_calc(struct platform_device *dsidev,
341 unsigned long pll, unsigned long out_min,
342 dsi_hsdiv_calc_func func, void *data)
343{
344 return false;
345}
346
347static inline bool dsi_pll_calc(struct platform_device *dsidev,
348 unsigned long clkin,
349 unsigned long pll_min, unsigned long pll_max,
350 dsi_pll_calc_func func, void *data)
351{
352 return false;
353}
354
355#endif 328#endif
356 329
357/* DPI */ 330/* DPI */
358int dpi_init_platform_driver(void) __init; 331int dpi_init_platform_driver(void) __init;
359void dpi_uninit_platform_driver(void) __exit; 332void dpi_uninit_platform_driver(void) __exit;
360 333
334#ifdef CONFIG_OMAP2_DSS_DPI
361int dpi_init_port(struct platform_device *pdev, struct device_node *port) __init; 335int dpi_init_port(struct platform_device *pdev, struct device_node *port) __init;
362void dpi_uninit_port(void) __exit; 336void dpi_uninit_port(struct device_node *port) __exit;
337#else
338static inline int __init dpi_init_port(struct platform_device *pdev,
339 struct device_node *port)
340{
341 return 0;
342}
343static inline void __exit dpi_uninit_port(struct device_node *port)
344{
345}
346#endif
363 347
364/* DISPC */ 348/* DISPC */
365int dispc_init_platform_driver(void) __init; 349int dispc_init_platform_driver(void) __init;
@@ -438,4 +422,29 @@ static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
438} 422}
439#endif 423#endif
440 424
425/* PLL */
426typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint,
427 unsigned long clkdco, void *data);
428typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
429 void *data);
430
431int dss_pll_register(struct dss_pll *pll);
432void dss_pll_unregister(struct dss_pll *pll);
433struct dss_pll *dss_pll_find(const char *name);
434int dss_pll_enable(struct dss_pll *pll);
435void dss_pll_disable(struct dss_pll *pll);
436int dss_pll_set_config(struct dss_pll *pll,
437 const struct dss_pll_clock_info *cinfo);
438
439bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
440 unsigned long out_min, unsigned long out_max,
441 dss_hsdiv_calc_func func, void *data);
442bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
443 unsigned long pll_min, unsigned long pll_max,
444 dss_pll_calc_func func, void *data);
445int dss_pll_write_config_type_a(struct dss_pll *pll,
446 const struct dss_pll_clock_info *cinfo);
447int dss_pll_write_config_type_b(struct dss_pll *pll,
448 const struct dss_pll_clock_info *cinfo);
449
441#endif 450#endif
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.c b/drivers/video/fbdev/omap2/dss/dss_features.c
index 15088df7bd16..0e3da809473c 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/dss/dss_features.c
@@ -72,10 +72,6 @@ static const struct dss_reg_field omap2_dss_reg_fields[] = {
72 [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, 72 [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
73 [FEAT_REG_VERTICALACCU] = { 25, 16 }, 73 [FEAT_REG_VERTICALACCU] = { 25, 16 },
74 [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, 74 [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
75 [FEAT_REG_DSIPLL_REGN] = { 0, 0 },
76 [FEAT_REG_DSIPLL_REGM] = { 0, 0 },
77 [FEAT_REG_DSIPLL_REGM_DISPC] = { 0, 0 },
78 [FEAT_REG_DSIPLL_REGM_DSI] = { 0, 0 },
79}; 75};
80 76
81static const struct dss_reg_field omap3_dss_reg_fields[] = { 77static const struct dss_reg_field omap3_dss_reg_fields[] = {
@@ -87,10 +83,6 @@ static const struct dss_reg_field omap3_dss_reg_fields[] = {
87 [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, 83 [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
88 [FEAT_REG_VERTICALACCU] = { 25, 16 }, 84 [FEAT_REG_VERTICALACCU] = { 25, 16 },
89 [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, 85 [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
90 [FEAT_REG_DSIPLL_REGN] = { 7, 1 },
91 [FEAT_REG_DSIPLL_REGM] = { 18, 8 },
92 [FEAT_REG_DSIPLL_REGM_DISPC] = { 22, 19 },
93 [FEAT_REG_DSIPLL_REGM_DSI] = { 26, 23 },
94}; 86};
95 87
96static const struct dss_reg_field am43xx_dss_reg_fields[] = { 88static const struct dss_reg_field am43xx_dss_reg_fields[] = {
@@ -113,10 +105,6 @@ static const struct dss_reg_field omap4_dss_reg_fields[] = {
113 [FEAT_REG_HORIZONTALACCU] = { 10, 0 }, 105 [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
114 [FEAT_REG_VERTICALACCU] = { 26, 16 }, 106 [FEAT_REG_VERTICALACCU] = { 26, 16 },
115 [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 }, 107 [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
116 [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
117 [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
118 [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
119 [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
120}; 108};
121 109
122static const struct dss_reg_field omap5_dss_reg_fields[] = { 110static const struct dss_reg_field omap5_dss_reg_fields[] = {
@@ -128,10 +116,6 @@ static const struct dss_reg_field omap5_dss_reg_fields[] = {
128 [FEAT_REG_HORIZONTALACCU] = { 10, 0 }, 116 [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
129 [FEAT_REG_VERTICALACCU] = { 26, 16 }, 117 [FEAT_REG_VERTICALACCU] = { 26, 16 },
130 [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 }, 118 [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 },
131 [FEAT_REG_DSIPLL_REGN] = { 8, 1 },
132 [FEAT_REG_DSIPLL_REGM] = { 20, 9 },
133 [FEAT_REG_DSIPLL_REGM_DISPC] = { 25, 21 },
134 [FEAT_REG_DSIPLL_REGM_DSI] = { 30, 26 },
135}; 119};
136 120
137static const enum omap_display_type omap2_dss_supported_displays[] = { 121static const enum omap_display_type omap2_dss_supported_displays[] = {
@@ -437,12 +421,6 @@ static const char * const omap5_dss_clk_source_names[] = {
437static const struct dss_param_range omap2_dss_param_range[] = { 421static const struct dss_param_range omap2_dss_param_range[] = {
438 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, 422 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
439 [FEAT_PARAM_DSS_PCD] = { 2, 255 }, 423 [FEAT_PARAM_DSS_PCD] = { 2, 255 },
440 [FEAT_PARAM_DSIPLL_REGN] = { 0, 0 },
441 [FEAT_PARAM_DSIPLL_REGM] = { 0, 0 },
442 [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, 0 },
443 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, 0 },
444 [FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
445 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
446 [FEAT_PARAM_DOWNSCALE] = { 1, 2 }, 424 [FEAT_PARAM_DOWNSCALE] = { 1, 2 },
447 /* 425 /*
448 * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC 426 * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
@@ -454,11 +432,6 @@ static const struct dss_param_range omap2_dss_param_range[] = {
454static const struct dss_param_range omap3_dss_param_range[] = { 432static const struct dss_param_range omap3_dss_param_range[] = {
455 [FEAT_PARAM_DSS_FCK] = { 0, 173000000 }, 433 [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
456 [FEAT_PARAM_DSS_PCD] = { 1, 255 }, 434 [FEAT_PARAM_DSS_PCD] = { 1, 255 },
457 [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 7) - 1 },
458 [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 11) - 1 },
459 [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 4) - 1 },
460 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 4) - 1 },
461 [FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
462 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, 435 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
463 [FEAT_PARAM_DSI_FCK] = { 0, 173000000 }, 436 [FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
464 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 437 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -475,11 +448,6 @@ static const struct dss_param_range am43xx_dss_param_range[] = {
475static const struct dss_param_range omap4_dss_param_range[] = { 448static const struct dss_param_range omap4_dss_param_range[] = {
476 [FEAT_PARAM_DSS_FCK] = { 0, 186000000 }, 449 [FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
477 [FEAT_PARAM_DSS_PCD] = { 1, 255 }, 450 [FEAT_PARAM_DSS_PCD] = { 1, 255 },
478 [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
479 [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
480 [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
481 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
482 [FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
483 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, 451 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
484 [FEAT_PARAM_DSI_FCK] = { 0, 170000000 }, 452 [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
485 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 453 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -489,11 +457,6 @@ static const struct dss_param_range omap4_dss_param_range[] = {
489static const struct dss_param_range omap5_dss_param_range[] = { 457static const struct dss_param_range omap5_dss_param_range[] = {
490 [FEAT_PARAM_DSS_FCK] = { 0, 209250000 }, 458 [FEAT_PARAM_DSS_FCK] = { 0, 209250000 },
491 [FEAT_PARAM_DSS_PCD] = { 1, 255 }, 459 [FEAT_PARAM_DSS_PCD] = { 1, 255 },
492 [FEAT_PARAM_DSIPLL_REGN] = { 0, (1 << 8) - 1 },
493 [FEAT_PARAM_DSIPLL_REGM] = { 0, (1 << 12) - 1 },
494 [FEAT_PARAM_DSIPLL_REGM_DISPC] = { 0, (1 << 5) - 1 },
495 [FEAT_PARAM_DSIPLL_REGM_DSI] = { 0, (1 << 5) - 1 },
496 [FEAT_PARAM_DSIPLL_FINT] = { 150000, 52000000 },
497 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, 460 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
498 [FEAT_PARAM_DSI_FCK] = { 0, 209250000 }, 461 [FEAT_PARAM_DSI_FCK] = { 0, 209250000 },
499 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 462 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
@@ -517,7 +480,6 @@ static const enum dss_feat_id omap3430_dss_feat_list[] = {
517 FEAT_LINEBUFFERSPLIT, 480 FEAT_LINEBUFFERSPLIT,
518 FEAT_ROWREPEATENABLE, 481 FEAT_ROWREPEATENABLE,
519 FEAT_RESIZECONF, 482 FEAT_RESIZECONF,
520 FEAT_DSI_PLL_FREQSEL,
521 FEAT_DSI_REVERSE_TXCLKESC, 483 FEAT_DSI_REVERSE_TXCLKESC,
522 FEAT_VENC_REQUIRES_TV_DAC_CLK, 484 FEAT_VENC_REQUIRES_TV_DAC_CLK,
523 FEAT_CPR, 485 FEAT_CPR,
@@ -537,7 +499,6 @@ static const enum dss_feat_id am35xx_dss_feat_list[] = {
537 FEAT_LINEBUFFERSPLIT, 499 FEAT_LINEBUFFERSPLIT,
538 FEAT_ROWREPEATENABLE, 500 FEAT_ROWREPEATENABLE,
539 FEAT_RESIZECONF, 501 FEAT_RESIZECONF,
540 FEAT_DSI_PLL_FREQSEL,
541 FEAT_DSI_REVERSE_TXCLKESC, 502 FEAT_DSI_REVERSE_TXCLKESC,
542 FEAT_VENC_REQUIRES_TV_DAC_CLK, 503 FEAT_VENC_REQUIRES_TV_DAC_CLK,
543 FEAT_CPR, 504 FEAT_CPR,
@@ -572,7 +533,6 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {
572 FEAT_ROWREPEATENABLE, 533 FEAT_ROWREPEATENABLE,
573 FEAT_RESIZECONF, 534 FEAT_RESIZECONF,
574 FEAT_DSI_PLL_PWR_BUG, 535 FEAT_DSI_PLL_PWR_BUG,
575 FEAT_DSI_PLL_FREQSEL,
576 FEAT_CPR, 536 FEAT_CPR,
577 FEAT_PRELOAD, 537 FEAT_PRELOAD,
578 FEAT_FIR_COEF_V, 538 FEAT_FIR_COEF_V,
@@ -654,8 +614,6 @@ static const enum dss_feat_id omap5_dss_feat_list[] = {
654 FEAT_ALPHA_FREE_ZORDER, 614 FEAT_ALPHA_FREE_ZORDER,
655 FEAT_FIFO_MERGE, 615 FEAT_FIFO_MERGE,
656 FEAT_BURST_2D, 616 FEAT_BURST_2D,
657 FEAT_DSI_PLL_SELFREQDCO,
658 FEAT_DSI_PLL_REFSEL,
659 FEAT_DSI_PHY_DCC, 617 FEAT_DSI_PHY_DCC,
660 FEAT_MFLAG, 618 FEAT_MFLAG,
661}; 619};
diff --git a/drivers/video/fbdev/omap2/dss/dss_features.h b/drivers/video/fbdev/omap2/dss/dss_features.h
index e3ef3b714896..100f7a2d0638 100644
--- a/drivers/video/fbdev/omap2/dss/dss_features.h
+++ b/drivers/video/fbdev/omap2/dss/dss_features.h
@@ -41,7 +41,6 @@ enum dss_feat_id {
41 FEAT_LCD_CLK_SRC, 41 FEAT_LCD_CLK_SRC,
42 /* DSI-PLL power command 0x3 is not working */ 42 /* DSI-PLL power command 0x3 is not working */
43 FEAT_DSI_PLL_PWR_BUG, 43 FEAT_DSI_PLL_PWR_BUG,
44 FEAT_DSI_PLL_FREQSEL,
45 FEAT_DSI_DCS_CMD_CONFIG_VC, 44 FEAT_DSI_DCS_CMD_CONFIG_VC,
46 FEAT_DSI_VC_OCP_WIDTH, 45 FEAT_DSI_VC_OCP_WIDTH,
47 FEAT_DSI_REVERSE_TXCLKESC, 46 FEAT_DSI_REVERSE_TXCLKESC,
@@ -61,8 +60,6 @@ enum dss_feat_id {
61 /* An unknown HW bug causing the normal FIFO thresholds not to work */ 60 /* An unknown HW bug causing the normal FIFO thresholds not to work */
62 FEAT_OMAP3_DSI_FIFO_BUG, 61 FEAT_OMAP3_DSI_FIFO_BUG,
63 FEAT_BURST_2D, 62 FEAT_BURST_2D,
64 FEAT_DSI_PLL_SELFREQDCO,
65 FEAT_DSI_PLL_REFSEL,
66 FEAT_DSI_PHY_DCC, 63 FEAT_DSI_PHY_DCC,
67 FEAT_MFLAG, 64 FEAT_MFLAG,
68}; 65};
@@ -77,20 +74,11 @@ enum dss_feat_reg_field {
77 FEAT_REG_HORIZONTALACCU, 74 FEAT_REG_HORIZONTALACCU,
78 FEAT_REG_VERTICALACCU, 75 FEAT_REG_VERTICALACCU,
79 FEAT_REG_DISPC_CLK_SWITCH, 76 FEAT_REG_DISPC_CLK_SWITCH,
80 FEAT_REG_DSIPLL_REGN,
81 FEAT_REG_DSIPLL_REGM,
82 FEAT_REG_DSIPLL_REGM_DISPC,
83 FEAT_REG_DSIPLL_REGM_DSI,
84}; 77};
85 78
86enum dss_range_param { 79enum dss_range_param {
87 FEAT_PARAM_DSS_FCK, 80 FEAT_PARAM_DSS_FCK,
88 FEAT_PARAM_DSS_PCD, 81 FEAT_PARAM_DSS_PCD,
89 FEAT_PARAM_DSIPLL_REGN,
90 FEAT_PARAM_DSIPLL_REGM,
91 FEAT_PARAM_DSIPLL_REGM_DISPC,
92 FEAT_PARAM_DSIPLL_REGM_DSI,
93 FEAT_PARAM_DSIPLL_FINT,
94 FEAT_PARAM_DSIPLL_LPDIV, 82 FEAT_PARAM_DSIPLL_LPDIV,
95 FEAT_PARAM_DSI_FCK, 83 FEAT_PARAM_DSI_FCK,
96 FEAT_PARAM_DOWNSCALE, 84 FEAT_PARAM_DOWNSCALE,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi.h b/drivers/video/fbdev/omap2/dss/hdmi.h
index 262771b9b76b..e4a32fe77b02 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi.h
@@ -101,13 +101,6 @@ enum hdmi_core_hdmi_dvi {
101 HDMI_HDMI = 1 101 HDMI_HDMI = 1
102}; 102};
103 103
104enum hdmi_clk_refsel {
105 HDMI_REFSEL_PCLK = 0,
106 HDMI_REFSEL_REF1 = 1,
107 HDMI_REFSEL_REF2 = 2,
108 HDMI_REFSEL_SYSCLK = 3
109};
110
111enum hdmi_packing_mode { 104enum hdmi_packing_mode {
112 HDMI_PACK_10b_RGB_YUV444 = 0, 105 HDMI_PACK_10b_RGB_YUV444 = 0,
113 HDMI_PACK_24b_RGB_YUV444_YUV422 = 1, 106 HDMI_PACK_24b_RGB_YUV444_YUV422 = 1,
@@ -160,7 +153,8 @@ enum hdmi_audio_blk_strt_end_sig {
160 153
161enum hdmi_core_audio_layout { 154enum hdmi_core_audio_layout {
162 HDMI_AUDIO_LAYOUT_2CH = 0, 155 HDMI_AUDIO_LAYOUT_2CH = 0,
163 HDMI_AUDIO_LAYOUT_8CH = 1 156 HDMI_AUDIO_LAYOUT_8CH = 1,
157 HDMI_AUDIO_LAYOUT_6CH = 2
164}; 158};
165 159
166enum hdmi_core_cts_mode { 160enum hdmi_core_cts_mode {
@@ -191,17 +185,6 @@ struct hdmi_config {
191 enum hdmi_core_hdmi_dvi hdmi_dvi_mode; 185 enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
192}; 186};
193 187
194/* HDMI PLL structure */
195struct hdmi_pll_info {
196 u16 regn;
197 u16 regm;
198 u32 regmf;
199 u16 regm2;
200 u16 regsd;
201 u16 dcofreq;
202 enum hdmi_clk_refsel refsel;
203};
204
205struct hdmi_audio_format { 188struct hdmi_audio_format {
206 enum hdmi_stereo_channels stereo_channels; 189 enum hdmi_stereo_channels stereo_channels;
207 u8 active_chnnls_msk; 190 u8 active_chnnls_msk;
@@ -249,12 +232,15 @@ struct hdmi_core_audio_config {
249 232
250struct hdmi_wp_data { 233struct hdmi_wp_data {
251 void __iomem *base; 234 void __iomem *base;
235 phys_addr_t phys_base;
252}; 236};
253 237
254struct hdmi_pll_data { 238struct hdmi_pll_data {
239 struct dss_pll pll;
240
255 void __iomem *base; 241 void __iomem *base;
256 242
257 struct hdmi_pll_info info; 243 struct hdmi_wp_data *wp;
258}; 244};
259 245
260struct hdmi_phy_data { 246struct hdmi_phy_data {
@@ -316,16 +302,19 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
316void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, 302void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
317 struct omap_video_timings *timings, struct hdmi_config *param); 303 struct omap_video_timings *timings, struct hdmi_config *param);
318int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp); 304int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
305phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
319 306
320/* HDMI PLL funcs */ 307/* HDMI PLL funcs */
321int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
322void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
323void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); 308void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
324void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy); 309void hdmi_pll_compute(struct hdmi_pll_data *pll,
325int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll); 310 unsigned long target_tmds, struct dss_pll_clock_info *pi);
311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
312 struct hdmi_wp_data *wp);
313void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
326 314
327/* HDMI PHY funcs */ 315/* HDMI PHY funcs */
328int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg); 316int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
317 unsigned long lfbitclk);
329void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s); 318void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
330int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy); 319int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
331int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes); 320int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
@@ -334,7 +323,7 @@ int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
334int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, 323int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
335 struct hdmi_phy_data *phy); 324 struct hdmi_phy_data *phy);
336 325
337#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) || defined(CONFIG_OMAP5_DSS_HDMI_AUDIO) 326/* Audio funcs */
338int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts); 327int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts);
339int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable); 328int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable);
340int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable); 329int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable);
@@ -342,9 +331,33 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
342 struct hdmi_audio_format *aud_fmt); 331 struct hdmi_audio_format *aud_fmt);
343void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp, 332void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp,
344 struct hdmi_audio_dma *aud_dma); 333 struct hdmi_audio_dma *aud_dma);
345static inline bool hdmi_mode_has_audio(int mode) 334static inline bool hdmi_mode_has_audio(struct hdmi_config *cfg)
346{ 335{
347 return mode == HDMI_HDMI ? true : false; 336 return cfg->hdmi_dvi_mode == HDMI_HDMI ? true : false;
348} 337}
349#endif 338
339/* HDMI DRV data */
340struct omap_hdmi {
341 struct mutex lock;
342 struct platform_device *pdev;
343
344 struct hdmi_wp_data wp;
345 struct hdmi_pll_data pll;
346 struct hdmi_phy_data phy;
347 struct hdmi_core_data core;
348
349 struct hdmi_config cfg;
350
351 struct regulator *vdda_reg;
352
353 bool core_enabled;
354 bool display_enabled;
355
356 struct omap_dss_device output;
357
358 struct platform_device *audio_pdev;
359 void (*audio_abort_cb)(struct device *dev);
360 int wp_idlemode;
361};
362
350#endif 363#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index 9a8713ca090c..f1a02bf938ee 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
@@ -33,29 +33,14 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <video/omapdss.h> 35#include <video/omapdss.h>
36#include <sound/omap-hdmi-audio.h>
36 37
37#include "hdmi4_core.h" 38#include "hdmi4_core.h"
38#include "dss.h" 39#include "dss.h"
39#include "dss_features.h" 40#include "dss_features.h"
41#include "hdmi.h"
40 42
41static struct { 43static struct omap_hdmi hdmi;
42 struct mutex lock;
43 struct platform_device *pdev;
44
45 struct hdmi_wp_data wp;
46 struct hdmi_pll_data pll;
47 struct hdmi_phy_data phy;
48 struct hdmi_core_data core;
49
50 struct hdmi_config cfg;
51
52 struct clk *sys_clk;
53 struct regulator *vdda_hdmi_dac_reg;
54
55 bool core_enabled;
56
57 struct omap_dss_device output;
58} hdmi;
59 44
60static int hdmi_runtime_get(void) 45static int hdmi_runtime_get(void)
61{ 46{
@@ -117,7 +102,7 @@ static int hdmi_init_regulator(void)
117 int r; 102 int r;
118 struct regulator *reg; 103 struct regulator *reg;
119 104
120 if (hdmi.vdda_hdmi_dac_reg != NULL) 105 if (hdmi.vdda_reg != NULL)
121 return 0; 106 return 0;
122 107
123 reg = devm_regulator_get(&hdmi.pdev->dev, "vdda"); 108 reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
@@ -137,7 +122,7 @@ static int hdmi_init_regulator(void)
137 } 122 }
138 } 123 }
139 124
140 hdmi.vdda_hdmi_dac_reg = reg; 125 hdmi.vdda_reg = reg;
141 126
142 return 0; 127 return 0;
143} 128}
@@ -146,7 +131,7 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
146{ 131{
147 int r; 132 int r;
148 133
149 r = regulator_enable(hdmi.vdda_hdmi_dac_reg); 134 r = regulator_enable(hdmi.vdda_reg);
150 if (r) 135 if (r)
151 return r; 136 return r;
152 137
@@ -162,7 +147,7 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
162 return 0; 147 return 0;
163 148
164err_runtime_get: 149err_runtime_get:
165 regulator_disable(hdmi.vdda_hdmi_dac_reg); 150 regulator_disable(hdmi.vdda_reg);
166 151
167 return r; 152 return r;
168} 153}
@@ -172,7 +157,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
172 hdmi.core_enabled = false; 157 hdmi.core_enabled = false;
173 158
174 hdmi_runtime_put(); 159 hdmi_runtime_put();
175 regulator_disable(hdmi.vdda_hdmi_dac_reg); 160 regulator_disable(hdmi.vdda_reg);
176} 161}
177 162
178static int hdmi_power_on_full(struct omap_dss_device *dssdev) 163static int hdmi_power_on_full(struct omap_dss_device *dssdev)
@@ -180,8 +165,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
180 int r; 165 int r;
181 struct omap_video_timings *p; 166 struct omap_video_timings *p;
182 struct omap_overlay_manager *mgr = hdmi.output.manager; 167 struct omap_overlay_manager *mgr = hdmi.output.manager;
183 unsigned long phy;
184 struct hdmi_wp_data *wp = &hdmi.wp; 168 struct hdmi_wp_data *wp = &hdmi.wp;
169 struct dss_pll_clock_info hdmi_cinfo = { 0 };
185 170
186 r = hdmi_power_on_core(dssdev); 171 r = hdmi_power_on_core(dssdev);
187 if (r) 172 if (r)
@@ -195,19 +180,22 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
195 180
196 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res); 181 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
197 182
198 /* the functions below use kHz pixel clock. TODO: change to Hz */ 183 hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
199 phy = p->pixelclock / 1000;
200
201 hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
202 184
203 /* config the PLL and PHY hdmi_set_pll_pwrfirst */ 185 r = dss_pll_enable(&hdmi.pll.pll);
204 r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
205 if (r) { 186 if (r) {
206 DSSDBG("Failed to lock PLL\n"); 187 DSSERR("Failed to enable PLL\n");
207 goto err_pll_enable; 188 goto err_pll_enable;
208 } 189 }
209 190
210 r = hdmi_phy_configure(&hdmi.phy, &hdmi.cfg); 191 r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
192 if (r) {
193 DSSERR("Failed to configure PLL\n");
194 goto err_pll_cfg;
195 }
196
197 r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
198 hdmi_cinfo.clkout[0]);
211 if (r) { 199 if (r) {
212 DSSDBG("Failed to configure PHY\n"); 200 DSSDBG("Failed to configure PHY\n");
213 goto err_phy_cfg; 201 goto err_phy_cfg;
@@ -244,7 +232,8 @@ err_vid_enable:
244err_phy_cfg: 232err_phy_cfg:
245 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); 233 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
246err_phy_pwr: 234err_phy_pwr:
247 hdmi_pll_disable(&hdmi.pll, &hdmi.wp); 235err_pll_cfg:
236 dss_pll_disable(&hdmi.pll.pll);
248err_pll_enable: 237err_pll_enable:
249 hdmi_power_off_core(dssdev); 238 hdmi_power_off_core(dssdev);
250 return -EIO; 239 return -EIO;
@@ -262,7 +251,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
262 251
263 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); 252 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
264 253
265 hdmi_pll_disable(&hdmi.pll, &hdmi.wp); 254 dss_pll_disable(&hdmi.pll.pll);
266 255
267 hdmi_power_off_core(dssdev); 256 hdmi_power_off_core(dssdev);
268} 257}
@@ -352,6 +341,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
352 goto err0; 341 goto err0;
353 } 342 }
354 343
344 hdmi.display_enabled = true;
345
355 mutex_unlock(&hdmi.lock); 346 mutex_unlock(&hdmi.lock);
356 return 0; 347 return 0;
357 348
@@ -366,8 +357,13 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
366 357
367 mutex_lock(&hdmi.lock); 358 mutex_lock(&hdmi.lock);
368 359
360 if (hdmi.audio_pdev && hdmi.audio_abort_cb)
361 hdmi.audio_abort_cb(&hdmi.audio_pdev->dev);
362
369 hdmi_power_off_full(dssdev); 363 hdmi_power_off_full(dssdev);
370 364
365 hdmi.display_enabled = false;
366
371 mutex_unlock(&hdmi.lock); 367 mutex_unlock(&hdmi.lock);
372} 368}
373 369
@@ -404,21 +400,6 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
404 mutex_unlock(&hdmi.lock); 400 mutex_unlock(&hdmi.lock);
405} 401}
406 402
407static int hdmi_get_clocks(struct platform_device *pdev)
408{
409 struct clk *clk;
410
411 clk = devm_clk_get(&pdev->dev, "sys_clk");
412 if (IS_ERR(clk)) {
413 DSSERR("can't get sys_clk\n");
414 return PTR_ERR(clk);
415 }
416
417 hdmi.sys_clk = clk;
418
419 return 0;
420}
421
422static int hdmi_connect(struct omap_dss_device *dssdev, 403static int hdmi_connect(struct omap_dss_device *dssdev,
423 struct omap_dss_device *dst) 404 struct omap_dss_device *dst)
424{ 405{
@@ -484,112 +465,6 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
484 return r; 465 return r;
485} 466}
486 467
487#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
488static int hdmi_audio_enable(struct omap_dss_device *dssdev)
489{
490 int r;
491
492 mutex_lock(&hdmi.lock);
493
494 if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
495 r = -EPERM;
496 goto err;
497 }
498
499 r = hdmi_wp_audio_enable(&hdmi.wp, true);
500 if (r)
501 goto err;
502
503 mutex_unlock(&hdmi.lock);
504 return 0;
505
506err:
507 mutex_unlock(&hdmi.lock);
508 return r;
509}
510
511static void hdmi_audio_disable(struct omap_dss_device *dssdev)
512{
513 hdmi_wp_audio_enable(&hdmi.wp, false);
514}
515
516static int hdmi_audio_start(struct omap_dss_device *dssdev)
517{
518 return hdmi4_audio_start(&hdmi.core, &hdmi.wp);
519}
520
521static void hdmi_audio_stop(struct omap_dss_device *dssdev)
522{
523 hdmi4_audio_stop(&hdmi.core, &hdmi.wp);
524}
525
526static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
527{
528 bool r;
529
530 mutex_lock(&hdmi.lock);
531
532 r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
533
534 mutex_unlock(&hdmi.lock);
535 return r;
536}
537
538static int hdmi_audio_config(struct omap_dss_device *dssdev,
539 struct omap_dss_audio *audio)
540{
541 int r;
542 u32 pclk = hdmi.cfg.timings.pixelclock;
543
544 mutex_lock(&hdmi.lock);
545
546 if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
547 r = -EPERM;
548 goto err;
549 }
550
551 r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
552 if (r)
553 goto err;
554
555 mutex_unlock(&hdmi.lock);
556 return 0;
557
558err:
559 mutex_unlock(&hdmi.lock);
560 return r;
561}
562#else
563static int hdmi_audio_enable(struct omap_dss_device *dssdev)
564{
565 return -EPERM;
566}
567
568static void hdmi_audio_disable(struct omap_dss_device *dssdev)
569{
570}
571
572static int hdmi_audio_start(struct omap_dss_device *dssdev)
573{
574 return -EPERM;
575}
576
577static void hdmi_audio_stop(struct omap_dss_device *dssdev)
578{
579}
580
581static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
582{
583 return false;
584}
585
586static int hdmi_audio_config(struct omap_dss_device *dssdev,
587 struct omap_dss_audio *audio)
588{
589 return -EPERM;
590}
591#endif
592
593static int hdmi_set_infoframe(struct omap_dss_device *dssdev, 468static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
594 const struct hdmi_avi_infoframe *avi) 469 const struct hdmi_avi_infoframe *avi)
595{ 470{
@@ -618,13 +493,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
618 .read_edid = hdmi_read_edid, 493 .read_edid = hdmi_read_edid,
619 .set_infoframe = hdmi_set_infoframe, 494 .set_infoframe = hdmi_set_infoframe,
620 .set_hdmi_mode = hdmi_set_hdmi_mode, 495 .set_hdmi_mode = hdmi_set_hdmi_mode,
621
622 .audio_enable = hdmi_audio_enable,
623 .audio_disable = hdmi_audio_disable,
624 .audio_start = hdmi_audio_start,
625 .audio_stop = hdmi_audio_stop,
626 .audio_supported = hdmi_audio_supported,
627 .audio_config = hdmi_audio_config,
628}; 496};
629 497
630static void hdmi_init_output(struct platform_device *pdev) 498static void hdmi_init_output(struct platform_device *pdev)
@@ -642,7 +510,7 @@ static void hdmi_init_output(struct platform_device *pdev)
642 omapdss_register_output(out); 510 omapdss_register_output(out);
643} 511}
644 512
645static void __exit hdmi_uninit_output(struct platform_device *pdev) 513static void hdmi_uninit_output(struct platform_device *pdev)
646{ 514{
647 struct omap_dss_device *out = &hdmi.output; 515 struct omap_dss_device *out = &hdmi.output;
648 516
@@ -671,6 +539,112 @@ err:
671 return r; 539 return r;
672} 540}
673 541
542/* Audio callbacks */
543static int hdmi_audio_startup(struct device *dev,
544 void (*abort_cb)(struct device *dev))
545{
546 struct omap_hdmi *hd = dev_get_drvdata(dev);
547 int ret = 0;
548
549 mutex_lock(&hd->lock);
550
551 if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
552 ret = -EPERM;
553 goto out;
554 }
555
556 hd->audio_abort_cb = abort_cb;
557
558out:
559 mutex_unlock(&hd->lock);
560
561 return ret;
562}
563
564static int hdmi_audio_shutdown(struct device *dev)
565{
566 struct omap_hdmi *hd = dev_get_drvdata(dev);
567
568 mutex_lock(&hd->lock);
569 hd->audio_abort_cb = NULL;
570 mutex_unlock(&hd->lock);
571
572 return 0;
573}
574
575static int hdmi_audio_start(struct device *dev)
576{
577 struct omap_hdmi *hd = dev_get_drvdata(dev);
578
579 WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
580 WARN_ON(!hd->display_enabled);
581
582 hdmi_wp_audio_enable(&hd->wp, true);
583 hdmi4_audio_start(&hd->core, &hd->wp);
584
585 return 0;
586}
587
588static void hdmi_audio_stop(struct device *dev)
589{
590 struct omap_hdmi *hd = dev_get_drvdata(dev);
591
592 WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
593 WARN_ON(!hd->display_enabled);
594
595 hdmi4_audio_stop(&hd->core, &hd->wp);
596 hdmi_wp_audio_enable(&hd->wp, false);
597}
598
599static int hdmi_audio_config(struct device *dev,
600 struct omap_dss_audio *dss_audio)
601{
602 struct omap_hdmi *hd = dev_get_drvdata(dev);
603 int ret;
604
605 mutex_lock(&hd->lock);
606
607 if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
608 ret = -EPERM;
609 goto out;
610 }
611
612 ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
613 hd->cfg.timings.pixelclock);
614
615out:
616 mutex_unlock(&hd->lock);
617
618 return ret;
619}
620
621static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
622 .audio_startup = hdmi_audio_startup,
623 .audio_shutdown = hdmi_audio_shutdown,
624 .audio_start = hdmi_audio_start,
625 .audio_stop = hdmi_audio_stop,
626 .audio_config = hdmi_audio_config,
627};
628
629static int hdmi_audio_register(struct device *dev)
630{
631 struct omap_hdmi_audio_pdata pdata = {
632 .dev = dev,
633 .dss_version = omapdss_get_version(),
634 .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
635 .ops = &hdmi_audio_ops,
636 };
637
638 hdmi.audio_pdev = platform_device_register_data(
639 dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
640 &pdata, sizeof(pdata));
641
642 if (IS_ERR(hdmi.audio_pdev))
643 return PTR_ERR(hdmi.audio_pdev);
644
645 return 0;
646}
647
674/* HDMI HW IP initialisation */ 648/* HDMI HW IP initialisation */
675static int omapdss_hdmihw_probe(struct platform_device *pdev) 649static int omapdss_hdmihw_probe(struct platform_device *pdev)
676{ 650{
@@ -678,6 +652,7 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
678 int irq; 652 int irq;
679 653
680 hdmi.pdev = pdev; 654 hdmi.pdev = pdev;
655 dev_set_drvdata(&pdev->dev, &hdmi);
681 656
682 mutex_init(&hdmi.lock); 657 mutex_init(&hdmi.lock);
683 658
@@ -691,28 +666,23 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
691 if (r) 666 if (r)
692 return r; 667 return r;
693 668
694 r = hdmi_pll_init(pdev, &hdmi.pll); 669 r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
695 if (r) 670 if (r)
696 return r; 671 return r;
697 672
698 r = hdmi_phy_init(pdev, &hdmi.phy); 673 r = hdmi_phy_init(pdev, &hdmi.phy);
699 if (r) 674 if (r)
700 return r; 675 goto err;
701 676
702 r = hdmi4_core_init(pdev, &hdmi.core); 677 r = hdmi4_core_init(pdev, &hdmi.core);
703 if (r) 678 if (r)
704 return r; 679 goto err;
705
706 r = hdmi_get_clocks(pdev);
707 if (r) {
708 DSSERR("can't get clocks\n");
709 return r;
710 }
711 680
712 irq = platform_get_irq(pdev, 0); 681 irq = platform_get_irq(pdev, 0);
713 if (irq < 0) { 682 if (irq < 0) {
714 DSSERR("platform_get_irq failed\n"); 683 DSSERR("platform_get_irq failed\n");
715 return -ENODEV; 684 r = -ENODEV;
685 goto err;
716 } 686 }
717 687
718 r = devm_request_threaded_irq(&pdev->dev, irq, 688 r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -720,22 +690,38 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
720 IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp); 690 IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
721 if (r) { 691 if (r) {
722 DSSERR("HDMI IRQ request failed\n"); 692 DSSERR("HDMI IRQ request failed\n");
723 return r; 693 goto err;
724 } 694 }
725 695
726 pm_runtime_enable(&pdev->dev); 696 pm_runtime_enable(&pdev->dev);
727 697
728 hdmi_init_output(pdev); 698 hdmi_init_output(pdev);
729 699
700 r = hdmi_audio_register(&pdev->dev);
701 if (r) {
702 DSSERR("Registering HDMI audio failed\n");
703 hdmi_uninit_output(pdev);
704 pm_runtime_disable(&pdev->dev);
705 return r;
706 }
707
730 dss_debugfs_create_file("hdmi", hdmi_dump_regs); 708 dss_debugfs_create_file("hdmi", hdmi_dump_regs);
731 709
732 return 0; 710 return 0;
711err:
712 hdmi_pll_uninit(&hdmi.pll);
713 return r;
733} 714}
734 715
735static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) 716static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
736{ 717{
718 if (hdmi.audio_pdev)
719 platform_device_unregister(hdmi.audio_pdev);
720
737 hdmi_uninit_output(pdev); 721 hdmi_uninit_output(pdev);
738 722
723 hdmi_pll_uninit(&hdmi.pll);
724
739 pm_runtime_disable(&pdev->dev); 725 pm_runtime_disable(&pdev->dev);
740 726
741 return 0; 727 return 0;
@@ -743,8 +729,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
743 729
744static int hdmi_runtime_suspend(struct device *dev) 730static int hdmi_runtime_suspend(struct device *dev)
745{ 731{
746 clk_disable_unprepare(hdmi.sys_clk);
747
748 dispc_runtime_put(); 732 dispc_runtime_put();
749 733
750 return 0; 734 return 0;
@@ -758,8 +742,6 @@ static int hdmi_runtime_resume(struct device *dev)
758 if (r < 0) 742 if (r < 0)
759 return r; 743 return r;
760 744
761 clk_prepare_enable(hdmi.sys_clk);
762
763 return 0; 745 return 0;
764} 746}
765 747
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
index 4ad39cfce254..7eafea5b8e19 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
@@ -31,10 +31,8 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/string.h> 32#include <linux/string.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
35#include <sound/asound.h> 34#include <sound/asound.h>
36#include <sound/asoundef.h> 35#include <sound/asoundef.h>
37#endif
38 36
39#include "hdmi4_core.h" 37#include "hdmi4_core.h"
40#include "dss_features.h" 38#include "dss_features.h"
@@ -530,7 +528,6 @@ void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s)
530 DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID); 528 DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
531} 529}
532 530
533#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
534static void hdmi_core_audio_config(struct hdmi_core_data *core, 531static void hdmi_core_audio_config(struct hdmi_core_data *core,
535 struct hdmi_core_audio_config *cfg) 532 struct hdmi_core_audio_config *cfg)
536{ 533{
@@ -877,17 +874,6 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
877 hdmi_wp_audio_core_req_enable(wp, false); 874 hdmi_wp_audio_core_req_enable(wp, false);
878} 875}
879 876
880int hdmi4_audio_get_dma_port(u32 *offset, u32 *size)
881{
882 if (!offset || !size)
883 return -EINVAL;
884 *offset = HDMI_WP_AUDIO_DATA;
885 *size = 4;
886 return 0;
887}
888
889#endif
890
891int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) 877int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
892{ 878{
893 struct resource *res; 879 struct resource *res;
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.h b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
index 827909eb6c50..a069f96ec6f6 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
@@ -266,12 +266,8 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
266void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s); 266void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
267int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core); 267int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
268 268
269#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
270int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp); 269int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
271void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp); 270void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
272int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, 271int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
273 struct omap_dss_audio *audio, u32 pclk); 272 struct omap_dss_audio *audio, u32 pclk);
274int hdmi4_audio_get_dma_port(u32 *offset, u32 *size);
275#endif
276
277#endif 273#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index 169b764bb9d4..d9d0d781625a 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -38,29 +38,13 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <video/omapdss.h> 40#include <video/omapdss.h>
41#include <sound/omap-hdmi-audio.h>
41 42
42#include "hdmi5_core.h" 43#include "hdmi5_core.h"
43#include "dss.h" 44#include "dss.h"
44#include "dss_features.h" 45#include "dss_features.h"
45 46
46static struct { 47static struct omap_hdmi hdmi;
47 struct mutex lock;
48 struct platform_device *pdev;
49
50 struct hdmi_wp_data wp;
51 struct hdmi_pll_data pll;
52 struct hdmi_phy_data phy;
53 struct hdmi_core_data core;
54
55 struct hdmi_config cfg;
56
57 struct clk *sys_clk;
58 struct regulator *vdda_reg;
59
60 bool core_enabled;
61
62 struct omap_dss_device output;
63} hdmi;
64 48
65static int hdmi_runtime_get(void) 49static int hdmi_runtime_get(void)
66{ 50{
@@ -198,7 +182,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
198 int r; 182 int r;
199 struct omap_video_timings *p; 183 struct omap_video_timings *p;
200 struct omap_overlay_manager *mgr = hdmi.output.manager; 184 struct omap_overlay_manager *mgr = hdmi.output.manager;
201 unsigned long phy; 185 struct dss_pll_clock_info hdmi_cinfo = { 0 };
202 186
203 r = hdmi_power_on_core(dssdev); 187 r = hdmi_power_on_core(dssdev);
204 if (r) 188 if (r)
@@ -208,24 +192,27 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
208 192
209 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res); 193 DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
210 194
211 /* the functions below use kHz pixel clock. TODO: change to Hz */ 195 hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
212 phy = p->pixelclock / 1000;
213
214 hdmi_pll_compute(&hdmi.pll, clk_get_rate(hdmi.sys_clk), phy);
215 196
216 /* disable and clear irqs */ 197 /* disable and clear irqs */
217 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); 198 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
218 hdmi_wp_set_irqstatus(&hdmi.wp, 199 hdmi_wp_set_irqstatus(&hdmi.wp,
219 hdmi_wp_get_irqstatus(&hdmi.wp)); 200 hdmi_wp_get_irqstatus(&hdmi.wp));
220 201
221 /* config the PLL and PHY hdmi_set_pll_pwrfirst */ 202 r = dss_pll_enable(&hdmi.pll.pll);
222 r = hdmi_pll_enable(&hdmi.pll, &hdmi.wp);
223 if (r) { 203 if (r) {
224 DSSDBG("Failed to lock PLL\n"); 204 DSSERR("Failed to enable PLL\n");
225 goto err_pll_enable; 205 goto err_pll_enable;
226 } 206 }
227 207
228 r = hdmi_phy_configure(&hdmi.phy, &hdmi.cfg); 208 r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
209 if (r) {
210 DSSERR("Failed to configure PLL\n");
211 goto err_pll_cfg;
212 }
213
214 r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
215 hdmi_cinfo.clkout[0]);
229 if (r) { 216 if (r) {
230 DSSDBG("Failed to start PHY\n"); 217 DSSDBG("Failed to start PHY\n");
231 goto err_phy_cfg; 218 goto err_phy_cfg;
@@ -262,7 +249,8 @@ err_vid_enable:
262 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); 249 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
263err_phy_pwr: 250err_phy_pwr:
264err_phy_cfg: 251err_phy_cfg:
265 hdmi_pll_disable(&hdmi.pll, &hdmi.wp); 252err_pll_cfg:
253 dss_pll_disable(&hdmi.pll.pll);
266err_pll_enable: 254err_pll_enable:
267 hdmi_power_off_core(dssdev); 255 hdmi_power_off_core(dssdev);
268 return -EIO; 256 return -EIO;
@@ -280,7 +268,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
280 268
281 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); 269 hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
282 270
283 hdmi_pll_disable(&hdmi.pll, &hdmi.wp); 271 dss_pll_disable(&hdmi.pll.pll);
284 272
285 hdmi_power_off_core(dssdev); 273 hdmi_power_off_core(dssdev);
286} 274}
@@ -290,6 +278,10 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
290{ 278{
291 struct omap_dss_device *out = &hdmi.output; 279 struct omap_dss_device *out = &hdmi.output;
292 280
281 /* TODO: proper interlace support */
282 if (timings->interlace)
283 return -EINVAL;
284
293 if (!dispc_mgr_timings_ok(out->dispc_channel, timings)) 285 if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
294 return -EINVAL; 286 return -EINVAL;
295 287
@@ -377,6 +369,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
377 goto err0; 369 goto err0;
378 } 370 }
379 371
372 hdmi.display_enabled = true;
373
380 mutex_unlock(&hdmi.lock); 374 mutex_unlock(&hdmi.lock);
381 return 0; 375 return 0;
382 376
@@ -391,8 +385,13 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
391 385
392 mutex_lock(&hdmi.lock); 386 mutex_lock(&hdmi.lock);
393 387
388 if (hdmi.audio_pdev && hdmi.audio_abort_cb)
389 hdmi.audio_abort_cb(&hdmi.audio_pdev->dev);
390
394 hdmi_power_off_full(dssdev); 391 hdmi_power_off_full(dssdev);
395 392
393 hdmi.display_enabled = false;
394
396 mutex_unlock(&hdmi.lock); 395 mutex_unlock(&hdmi.lock);
397} 396}
398 397
@@ -429,21 +428,6 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
429 mutex_unlock(&hdmi.lock); 428 mutex_unlock(&hdmi.lock);
430} 429}
431 430
432static int hdmi_get_clocks(struct platform_device *pdev)
433{
434 struct clk *clk;
435
436 clk = devm_clk_get(&pdev->dev, "sys_clk");
437 if (IS_ERR(clk)) {
438 DSSERR("can't get sys_clk\n");
439 return PTR_ERR(clk);
440 }
441
442 hdmi.sys_clk = clk;
443
444 return 0;
445}
446
447static int hdmi_connect(struct omap_dss_device *dssdev, 431static int hdmi_connect(struct omap_dss_device *dssdev,
448 struct omap_dss_device *dst) 432 struct omap_dss_device *dst)
449{ 433{
@@ -509,112 +493,6 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
509 return r; 493 return r;
510} 494}
511 495
512#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
513static int hdmi_audio_enable(struct omap_dss_device *dssdev)
514{
515 int r;
516
517 mutex_lock(&hdmi.lock);
518
519 if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
520 r = -EPERM;
521 goto err;
522 }
523
524 r = hdmi_wp_audio_enable(&hdmi.wp, true);
525 if (r)
526 goto err;
527
528 mutex_unlock(&hdmi.lock);
529 return 0;
530
531err:
532 mutex_unlock(&hdmi.lock);
533 return r;
534}
535
536static void hdmi_audio_disable(struct omap_dss_device *dssdev)
537{
538 hdmi_wp_audio_enable(&hdmi.wp, false);
539}
540
541static int hdmi_audio_start(struct omap_dss_device *dssdev)
542{
543 return hdmi_wp_audio_core_req_enable(&hdmi.wp, true);
544}
545
546static void hdmi_audio_stop(struct omap_dss_device *dssdev)
547{
548 hdmi_wp_audio_core_req_enable(&hdmi.wp, false);
549}
550
551static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
552{
553 bool r;
554
555 mutex_lock(&hdmi.lock);
556
557 r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
558
559 mutex_unlock(&hdmi.lock);
560 return r;
561}
562
563static int hdmi_audio_config(struct omap_dss_device *dssdev,
564 struct omap_dss_audio *audio)
565{
566 int r;
567 u32 pclk = hdmi.cfg.timings.pixelclock;
568
569 mutex_lock(&hdmi.lock);
570
571 if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
572 r = -EPERM;
573 goto err;
574 }
575
576 r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, audio, pclk);
577 if (r)
578 goto err;
579
580 mutex_unlock(&hdmi.lock);
581 return 0;
582
583err:
584 mutex_unlock(&hdmi.lock);
585 return r;
586}
587#else
588static int hdmi_audio_enable(struct omap_dss_device *dssdev)
589{
590 return -EPERM;
591}
592
593static void hdmi_audio_disable(struct omap_dss_device *dssdev)
594{
595}
596
597static int hdmi_audio_start(struct omap_dss_device *dssdev)
598{
599 return -EPERM;
600}
601
602static void hdmi_audio_stop(struct omap_dss_device *dssdev)
603{
604}
605
606static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
607{
608 return false;
609}
610
611static int hdmi_audio_config(struct omap_dss_device *dssdev,
612 struct omap_dss_audio *audio)
613{
614 return -EPERM;
615}
616#endif
617
618static int hdmi_set_infoframe(struct omap_dss_device *dssdev, 496static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
619 const struct hdmi_avi_infoframe *avi) 497 const struct hdmi_avi_infoframe *avi)
620{ 498{
@@ -643,13 +521,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
643 .read_edid = hdmi_read_edid, 521 .read_edid = hdmi_read_edid,
644 .set_infoframe = hdmi_set_infoframe, 522 .set_infoframe = hdmi_set_infoframe,
645 .set_hdmi_mode = hdmi_set_hdmi_mode, 523 .set_hdmi_mode = hdmi_set_hdmi_mode,
646
647 .audio_enable = hdmi_audio_enable,
648 .audio_disable = hdmi_audio_disable,
649 .audio_start = hdmi_audio_start,
650 .audio_stop = hdmi_audio_stop,
651 .audio_supported = hdmi_audio_supported,
652 .audio_config = hdmi_audio_config,
653}; 524};
654 525
655static void hdmi_init_output(struct platform_device *pdev) 526static void hdmi_init_output(struct platform_device *pdev)
@@ -667,7 +538,7 @@ static void hdmi_init_output(struct platform_device *pdev)
667 omapdss_register_output(out); 538 omapdss_register_output(out);
668} 539}
669 540
670static void __exit hdmi_uninit_output(struct platform_device *pdev) 541static void hdmi_uninit_output(struct platform_device *pdev)
671{ 542{
672 struct omap_dss_device *out = &hdmi.output; 543 struct omap_dss_device *out = &hdmi.output;
673 544
@@ -696,6 +567,119 @@ err:
696 return r; 567 return r;
697} 568}
698 569
570/* Audio callbacks */
571static int hdmi_audio_startup(struct device *dev,
572 void (*abort_cb)(struct device *dev))
573{
574 struct omap_hdmi *hd = dev_get_drvdata(dev);
575 int ret = 0;
576
577 mutex_lock(&hd->lock);
578
579 if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
580 ret = -EPERM;
581 goto out;
582 }
583
584 hd->audio_abort_cb = abort_cb;
585
586out:
587 mutex_unlock(&hd->lock);
588
589 return ret;
590}
591
592static int hdmi_audio_shutdown(struct device *dev)
593{
594 struct omap_hdmi *hd = dev_get_drvdata(dev);
595
596 mutex_lock(&hd->lock);
597 hd->audio_abort_cb = NULL;
598 mutex_unlock(&hd->lock);
599
600 return 0;
601}
602
603static int hdmi_audio_start(struct device *dev)
604{
605 struct omap_hdmi *hd = dev_get_drvdata(dev);
606
607 WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
608 WARN_ON(!hd->display_enabled);
609
610 /* No-idle while playing audio, store the old value */
611 hd->wp_idlemode = REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
612 REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
613
614 hdmi_wp_audio_enable(&hd->wp, true);
615 hdmi_wp_audio_core_req_enable(&hd->wp, true);
616
617 return 0;
618}
619
620static void hdmi_audio_stop(struct device *dev)
621{
622 struct omap_hdmi *hd = dev_get_drvdata(dev);
623
624 WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
625 WARN_ON(!hd->display_enabled);
626
627 hdmi_wp_audio_core_req_enable(&hd->wp, false);
628 hdmi_wp_audio_enable(&hd->wp, false);
629
630 /* Playback stopped, restore original idlemode */
631 REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
632}
633
634static int hdmi_audio_config(struct device *dev,
635 struct omap_dss_audio *dss_audio)
636{
637 struct omap_hdmi *hd = dev_get_drvdata(dev);
638 int ret;
639
640 mutex_lock(&hd->lock);
641
642 if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
643 ret = -EPERM;
644 goto out;
645 }
646
647 ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
648 hd->cfg.timings.pixelclock);
649
650out:
651 mutex_unlock(&hd->lock);
652
653 return ret;
654}
655
656static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
657 .audio_startup = hdmi_audio_startup,
658 .audio_shutdown = hdmi_audio_shutdown,
659 .audio_start = hdmi_audio_start,
660 .audio_stop = hdmi_audio_stop,
661 .audio_config = hdmi_audio_config,
662};
663
664static int hdmi_audio_register(struct device *dev)
665{
666 struct omap_hdmi_audio_pdata pdata = {
667 .dev = dev,
668 .dss_version = omapdss_get_version(),
669 .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
670 .ops = &hdmi_audio_ops,
671 };
672
673 hdmi.audio_pdev = platform_device_register_data(
674 dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
675 &pdata, sizeof(pdata));
676
677 if (IS_ERR(hdmi.audio_pdev))
678 return PTR_ERR(hdmi.audio_pdev);
679
680 return 0;
681}
682
699/* HDMI HW IP initialisation */ 683/* HDMI HW IP initialisation */
700static int omapdss_hdmihw_probe(struct platform_device *pdev) 684static int omapdss_hdmihw_probe(struct platform_device *pdev)
701{ 685{
@@ -703,6 +687,7 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
703 int irq; 687 int irq;
704 688
705 hdmi.pdev = pdev; 689 hdmi.pdev = pdev;
690 dev_set_drvdata(&pdev->dev, &hdmi);
706 691
707 mutex_init(&hdmi.lock); 692 mutex_init(&hdmi.lock);
708 693
@@ -716,28 +701,23 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
716 if (r) 701 if (r)
717 return r; 702 return r;
718 703
719 r = hdmi_pll_init(pdev, &hdmi.pll); 704 r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
720 if (r) 705 if (r)
721 return r; 706 return r;
722 707
723 r = hdmi_phy_init(pdev, &hdmi.phy); 708 r = hdmi_phy_init(pdev, &hdmi.phy);
724 if (r) 709 if (r)
725 return r; 710 goto err;
726 711
727 r = hdmi5_core_init(pdev, &hdmi.core); 712 r = hdmi5_core_init(pdev, &hdmi.core);
728 if (r) 713 if (r)
729 return r; 714 goto err;
730
731 r = hdmi_get_clocks(pdev);
732 if (r) {
733 DSSERR("can't get clocks\n");
734 return r;
735 }
736 715
737 irq = platform_get_irq(pdev, 0); 716 irq = platform_get_irq(pdev, 0);
738 if (irq < 0) { 717 if (irq < 0) {
739 DSSERR("platform_get_irq failed\n"); 718 DSSERR("platform_get_irq failed\n");
740 return -ENODEV; 719 r = -ENODEV;
720 goto err;
741 } 721 }
742 722
743 r = devm_request_threaded_irq(&pdev->dev, irq, 723 r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -745,22 +725,38 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
745 IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp); 725 IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
746 if (r) { 726 if (r) {
747 DSSERR("HDMI IRQ request failed\n"); 727 DSSERR("HDMI IRQ request failed\n");
748 return r; 728 goto err;
749 } 729 }
750 730
751 pm_runtime_enable(&pdev->dev); 731 pm_runtime_enable(&pdev->dev);
752 732
753 hdmi_init_output(pdev); 733 hdmi_init_output(pdev);
754 734
735 r = hdmi_audio_register(&pdev->dev);
736 if (r) {
737 DSSERR("Registering HDMI audio failed %d\n", r);
738 hdmi_uninit_output(pdev);
739 pm_runtime_disable(&pdev->dev);
740 return r;
741 }
742
755 dss_debugfs_create_file("hdmi", hdmi_dump_regs); 743 dss_debugfs_create_file("hdmi", hdmi_dump_regs);
756 744
757 return 0; 745 return 0;
746err:
747 hdmi_pll_uninit(&hdmi.pll);
748 return r;
758} 749}
759 750
760static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) 751static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
761{ 752{
753 if (hdmi.audio_pdev)
754 platform_device_unregister(hdmi.audio_pdev);
755
762 hdmi_uninit_output(pdev); 756 hdmi_uninit_output(pdev);
763 757
758 hdmi_pll_uninit(&hdmi.pll);
759
764 pm_runtime_disable(&pdev->dev); 760 pm_runtime_disable(&pdev->dev);
765 761
766 return 0; 762 return 0;
@@ -768,8 +764,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
768 764
769static int hdmi_runtime_suspend(struct device *dev) 765static int hdmi_runtime_suspend(struct device *dev)
770{ 766{
771 clk_disable_unprepare(hdmi.sys_clk);
772
773 dispc_runtime_put(); 767 dispc_runtime_put();
774 768
775 return 0; 769 return 0;
@@ -783,8 +777,6 @@ static int hdmi_runtime_resume(struct device *dev)
783 if (r < 0) 777 if (r < 0)
784 return r; 778 return r;
785 779
786 clk_prepare_enable(hdmi.sys_clk);
787
788 return 0; 780 return 0;
789} 781}
790 782
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
index 83acbf7a8c89..a3cfe3d708f7 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
@@ -30,10 +30,8 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
33#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
34#include <sound/asound.h> 33#include <sound/asound.h>
35#include <sound/asoundef.h> 34#include <sound/asoundef.h>
36#endif
37 35
38#include "hdmi5_core.h" 36#include "hdmi5_core.h"
39 37
@@ -644,9 +642,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
644 hdmi_core_enable_interrupts(core); 642 hdmi_core_enable_interrupts(core);
645} 643}
646 644
647
648#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
649
650static void hdmi5_core_audio_config(struct hdmi_core_data *core, 645static void hdmi5_core_audio_config(struct hdmi_core_data *core,
651 struct hdmi_core_audio_config *cfg) 646 struct hdmi_core_audio_config *cfg)
652{ 647{
@@ -721,7 +716,7 @@ static void hdmi5_core_audio_config(struct hdmi_core_data *core,
721 716
722 /* Source number */ 717 /* Source number */
723 val = cfg->iec60958_cfg->status[2] & IEC958_AES2_CON_SOURCE; 718 val = cfg->iec60958_cfg->status[2] & IEC958_AES2_CON_SOURCE;
724 REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 4); 719 REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 0);
725 720
726 /* Channel number right 0 */ 721 /* Channel number right 0 */
727 REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 2, 3, 0); 722 REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 2, 3, 0);
@@ -879,6 +874,9 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
879 /* only LPCM atm */ 874 /* only LPCM atm */
880 audio_format.type = HDMI_AUDIO_TYPE_LPCM; 875 audio_format.type = HDMI_AUDIO_TYPE_LPCM;
881 876
877 /* only allowed option */
878 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
879
882 /* disable start/stop signals of IEC 60958 blocks */ 880 /* disable start/stop signals of IEC 60958 blocks */
883 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON; 881 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
884 882
@@ -894,7 +892,6 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
894 892
895 return 0; 893 return 0;
896} 894}
897#endif
898 895
899int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core) 896int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
900{ 897{
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.h b/drivers/video/fbdev/omap2/dss/hdmi5_core.h
index ce7e9f376f04..f2f1022c5516 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.h
@@ -299,8 +299,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
299 struct hdmi_config *cfg); 299 struct hdmi_config *cfg);
300int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core); 300int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
301 301
302#if defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
303int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, 302int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
304 struct omap_dss_audio *audio, u32 pclk); 303 struct omap_dss_audio *audio, u32 pclk);
305#endif 304#endif
306#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_common.c b/drivers/video/fbdev/omap2/dss/hdmi_common.c
index 7d5f1039de9f..1b8fcc6c4ba1 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_common.c
@@ -48,7 +48,6 @@ int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
48 return 0; 48 return 0;
49} 49}
50 50
51#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
52int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts) 51int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
53{ 52{
54 u32 deep_color; 53 u32 deep_color;
@@ -147,4 +146,3 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
147 146
148 return 0; 147 return 0;
149} 148}
150#endif
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
index e007ac892d79..bc9e07d2afbe 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
@@ -20,9 +20,7 @@
20 20
21struct hdmi_phy_features { 21struct hdmi_phy_features {
22 bool bist_ctrl; 22 bool bist_ctrl;
23 bool calc_freqout;
24 bool ldo_voltage; 23 bool ldo_voltage;
25 unsigned long dcofreq_min;
26 unsigned long max_phy; 24 unsigned long max_phy;
27}; 25};
28 26
@@ -132,7 +130,8 @@ static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
132 REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27); 130 REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27);
133} 131}
134 132
135int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg) 133int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
134 unsigned long lfbitclk)
136{ 135{
137 u8 freqout; 136 u8 freqout;
138 137
@@ -149,20 +148,16 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg)
149 if (phy_feat->bist_ctrl) 148 if (phy_feat->bist_ctrl)
150 REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11); 149 REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
151 150
152 if (phy_feat->calc_freqout) { 151 /*
153 /* DCOCLK/10 is pixel clock, compare pclk with DCOCLK_MIN/10 */ 152 * If the hfbitclk != lfbitclk, it means the lfbitclk was configured
154 u32 dco_min = phy_feat->dcofreq_min / 10; 153 * to be used for TMDS.
155 u32 pclk = cfg->timings.pixelclock; 154 */
156 155 if (hfbitclk != lfbitclk)
157 if (pclk < dco_min) 156 freqout = 0;
158 freqout = 0; 157 else if (hfbitclk / 10 < phy_feat->max_phy)
159 else if ((pclk >= dco_min) && (pclk < phy_feat->max_phy))
160 freqout = 1;
161 else
162 freqout = 2;
163 } else {
164 freqout = 1; 158 freqout = 1;
165 } 159 else
160 freqout = 2;
166 161
167 /* 162 /*
168 * Write to phy address 0 to configure the clock 163 * Write to phy address 0 to configure the clock
@@ -184,17 +179,13 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, struct hdmi_config *cfg)
184 179
185static const struct hdmi_phy_features omap44xx_phy_feats = { 180static const struct hdmi_phy_features omap44xx_phy_feats = {
186 .bist_ctrl = false, 181 .bist_ctrl = false,
187 .calc_freqout = false,
188 .ldo_voltage = true, 182 .ldo_voltage = true,
189 .dcofreq_min = 500000000,
190 .max_phy = 185675000, 183 .max_phy = 185675000,
191}; 184};
192 185
193static const struct hdmi_phy_features omap54xx_phy_feats = { 186static const struct hdmi_phy_features omap54xx_phy_feats = {
194 .bist_ctrl = true, 187 .bist_ctrl = true,
195 .calc_freqout = true,
196 .ldo_voltage = false, 188 .ldo_voltage = false,
197 .dcofreq_min = 750000000,
198 .max_phy = 186000000, 189 .max_phy = 186000000,
199}; 190};
200 191
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 6d92bb32fe51..87accdb59c81 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -15,26 +15,13 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h>
19
18#include <video/omapdss.h> 20#include <video/omapdss.h>
19 21
20#include "dss.h" 22#include "dss.h"
21#include "hdmi.h" 23#include "hdmi.h"
22 24
23#define HDMI_DEFAULT_REGN 16
24#define HDMI_DEFAULT_REGM2 1
25
26struct hdmi_pll_features {
27 bool sys_reset;
28 /* this is a hack, need to replace it with a better computation of M2 */
29 bool bound_dcofreq;
30 unsigned long fint_min, fint_max;
31 u16 regm_max;
32 unsigned long dcofreq_low_min, dcofreq_low_max;
33 unsigned long dcofreq_high_min, dcofreq_high_max;
34};
35
36static const struct hdmi_pll_features *pll_feat;
37
38void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) 25void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
39{ 26{
40#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\ 27#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\
@@ -51,228 +38,189 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
51 DUMPPLL(PLLCTRL_CFG4); 38 DUMPPLL(PLLCTRL_CFG4);
52} 39}
53 40
54void hdmi_pll_compute(struct hdmi_pll_data *pll, unsigned long clkin, int phy) 41void hdmi_pll_compute(struct hdmi_pll_data *pll,
42 unsigned long target_tmds, struct dss_pll_clock_info *pi)
55{ 43{
56 struct hdmi_pll_info *pi = &pll->info; 44 unsigned long fint, clkdco, clkout;
57 unsigned long refclk; 45 unsigned long target_bitclk, target_clkdco;
58 u32 mf; 46 unsigned long min_dco;
47 unsigned n, m, mf, m2, sd;
48 unsigned long clkin;
49 const struct dss_pll_hw *hw = pll->pll.hw;
59 50
60 /* use our funky units */ 51 clkin = clk_get_rate(pll->pll.clkin);
61 clkin /= 10000;
62 52
63 /* 53 DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
64 * Input clock is predivided by N + 1
65 * out put of which is reference clk
66 */
67 54
68 pi->regn = HDMI_DEFAULT_REGN; 55 target_bitclk = target_tmds * 10;
69 56
70 refclk = clkin / pi->regn; 57 /* Fint */
58 n = DIV_ROUND_UP(clkin, hw->fint_max);
59 fint = clkin / n;
71 60
72 /* temorary hack to make sure DCO freq isn't calculated too low */ 61 /* adjust m2 so that the clkdco will be high enough */
73 if (pll_feat->bound_dcofreq && phy <= 65000) 62 min_dco = roundup(hw->clkdco_min, fint);
74 pi->regm2 = 3; 63 m2 = DIV_ROUND_UP(min_dco, target_bitclk);
75 else 64 if (m2 == 0)
76 pi->regm2 = HDMI_DEFAULT_REGM2; 65 m2 = 1;
77
78 /*
79 * multiplier is pixel_clk/ref_clk
80 * Multiplying by 100 to avoid fractional part removal
81 */
82 pi->regm = phy * pi->regm2 / refclk;
83
84 /*
85 * fractional multiplier is remainder of the difference between
86 * multiplier and actual phy(required pixel clock thus should be
87 * multiplied by 2^18(262144) divided by the reference clock
88 */
89 mf = (phy - pi->regm / pi->regm2 * refclk) * 262144;
90 pi->regmf = pi->regm2 * mf / refclk;
91
92 /*
93 * Dcofreq should be set to 1 if required pixel clock
94 * is greater than 1000MHz
95 */
96 pi->dcofreq = phy > 1000 * 100;
97 pi->regsd = ((pi->regm * clkin / 10) / (pi->regn * 250) + 5) / 10;
98
99 /* Set the reference clock to sysclk reference */
100 pi->refsel = HDMI_REFSEL_SYSCLK;
101
102 DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
103 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
104}
105 66
67 target_clkdco = target_bitclk * m2;
68 m = target_clkdco / fint;
106 69
107static int hdmi_pll_config(struct hdmi_pll_data *pll) 70 clkdco = fint * m;
108{
109 u32 r;
110 struct hdmi_pll_info *fmt = &pll->info;
111 71
112 /* PLL start always use manual mode */ 72 /* adjust clkdco with fractional mf */
113 REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, 0x0, 0, 0); 73 if (WARN_ON(target_clkdco - clkdco > fint))
114 74 mf = 0;
115 r = hdmi_read_reg(pll->base, PLLCTRL_CFG1);
116 r = FLD_MOD(r, fmt->regm, 20, 9); /* CFG1_PLL_REGM */
117 r = FLD_MOD(r, fmt->regn - 1, 8, 1); /* CFG1_PLL_REGN */
118 hdmi_write_reg(pll->base, PLLCTRL_CFG1, r);
119
120 r = hdmi_read_reg(pll->base, PLLCTRL_CFG2);
121
122 r = FLD_MOD(r, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
123 r = FLD_MOD(r, 0x1, 13, 13); /* PLL_REFEN */
124 r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
125 r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
126
127 if (fmt->dcofreq)
128 r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
129 else 75 else
130 r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */ 76 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
131
132 hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
133
134 REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
135 77
136 r = hdmi_read_reg(pll->base, PLLCTRL_CFG4); 78 if (mf > 0)
137 r = FLD_MOD(r, fmt->regm2, 24, 18); 79 clkdco += (u32)div_u64((u64)mf * fint, 262144);
138 r = FLD_MOD(r, fmt->regmf, 17, 0);
139 hdmi_write_reg(pll->base, PLLCTRL_CFG4, r);
140 80
141 /* go now */ 81 clkout = clkdco / m2;
142 REG_FLD_MOD(pll->base, PLLCTRL_PLL_GO, 0x1, 0, 0);
143 82
144 /* wait for bit change */ 83 /* sigma-delta */
145 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO, 84 sd = DIV_ROUND_UP(fint * m, 250000000);
146 0, 0, 0) != 0) {
147 DSSERR("PLL GO bit not clearing\n");
148 return -ETIMEDOUT;
149 }
150 85
151 /* Wait till the lock bit is set in PLL status */ 86 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
152 if (hdmi_wait_for_bit_change(pll->base, 87 n, m, mf, m2, sd);
153 PLLCTRL_PLL_STATUS, 1, 1, 1) != 1) { 88 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
154 DSSERR("cannot lock PLL\n");
155 DSSERR("CFG1 0x%x\n",
156 hdmi_read_reg(pll->base, PLLCTRL_CFG1));
157 DSSERR("CFG2 0x%x\n",
158 hdmi_read_reg(pll->base, PLLCTRL_CFG2));
159 DSSERR("CFG4 0x%x\n",
160 hdmi_read_reg(pll->base, PLLCTRL_CFG4));
161 return -ETIMEDOUT;
162 }
163 89
164 DSSDBG("PLL locked!\n"); 90 pi->n = n;
91 pi->m = m;
92 pi->mf = mf;
93 pi->mX[0] = m2;
94 pi->sd = sd;
165 95
166 return 0; 96 pi->fint = fint;
97 pi->clkdco = clkdco;
98 pi->clkout[0] = clkout;
167} 99}
168 100
169static int hdmi_pll_reset(struct hdmi_pll_data *pll) 101static int hdmi_pll_enable(struct dss_pll *dsspll)
170{
171 /* SYSRESET controlled by power FSM */
172 REG_FLD_MOD(pll->base, PLLCTRL_PLL_CONTROL, pll_feat->sys_reset, 3, 3);
173
174 /* READ 0x0 reset is in progress */
175 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_STATUS, 0, 0, 1)
176 != 1) {
177 DSSERR("Failed to sysreset PLL\n");
178 return -ETIMEDOUT;
179 }
180
181 return 0;
182}
183
184int hdmi_pll_enable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
185{ 102{
103 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
104 struct hdmi_wp_data *wp = pll->wp;
186 u16 r = 0; 105 u16 r = 0;
187 106
188 r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
189 if (r)
190 return r;
191
192 r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS); 107 r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
193 if (r) 108 if (r)
194 return r; 109 return r;
195 110
196 r = hdmi_pll_reset(pll);
197 if (r)
198 return r;
199
200 r = hdmi_pll_config(pll);
201 if (r)
202 return r;
203
204 return 0; 111 return 0;
205} 112}
206 113
207void hdmi_pll_disable(struct hdmi_pll_data *pll, struct hdmi_wp_data *wp) 114static void hdmi_pll_disable(struct dss_pll *dsspll)
208{ 115{
116 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
117 struct hdmi_wp_data *wp = pll->wp;
118
209 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); 119 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
210} 120}
211 121
212static const struct hdmi_pll_features omap44xx_pll_feats = { 122static const struct dss_pll_ops dsi_pll_ops = {
213 .sys_reset = false, 123 .enable = hdmi_pll_enable,
214 .bound_dcofreq = false, 124 .disable = hdmi_pll_disable,
215 .fint_min = 500000, 125 .set_config = dss_pll_write_config_type_b,
216 .fint_max = 2500000,
217 .regm_max = 4095,
218 .dcofreq_low_min = 500000000,
219 .dcofreq_low_max = 1000000000,
220 .dcofreq_high_min = 1000000000,
221 .dcofreq_high_max = 2000000000,
222}; 126};
223 127
224static const struct hdmi_pll_features omap54xx_pll_feats = { 128static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
225 .sys_reset = true, 129 .n_max = 255,
226 .bound_dcofreq = true, 130 .m_min = 20,
227 .fint_min = 620000, 131 .m_max = 4095,
228 .fint_max = 2500000, 132 .mX_max = 127,
229 .regm_max = 2046, 133 .fint_min = 500000,
230 .dcofreq_low_min = 750000000, 134 .fint_max = 2500000,
231 .dcofreq_low_max = 1500000000, 135 .clkdco_max = 1800000000,
232 .dcofreq_high_min = 1250000000, 136
233 .dcofreq_high_max = 2500000000UL, 137 .clkdco_min = 500000000,
138 .clkdco_low = 1000000000,
139 .clkdco_max = 2000000000,
140
141 .n_msb = 8,
142 .n_lsb = 1,
143 .m_msb = 20,
144 .m_lsb = 9,
145
146 .mX_msb[0] = 24,
147 .mX_lsb[0] = 18,
148
149 .has_selfreqdco = true,
234}; 150};
235 151
236static int hdmi_pll_init_features(struct platform_device *pdev) 152static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
153 .n_max = 255,
154 .m_min = 20,
155 .m_max = 2045,
156 .mX_max = 127,
157 .fint_min = 620000,
158 .fint_max = 2500000,
159 .clkdco_max = 1800000000,
160
161 .clkdco_min = 750000000,
162 .clkdco_low = 1500000000,
163 .clkdco_max = 2500000000UL,
164
165 .n_msb = 8,
166 .n_lsb = 1,
167 .m_msb = 20,
168 .m_lsb = 9,
169
170 .mX_msb[0] = 24,
171 .mX_lsb[0] = 18,
172
173 .has_selfreqdco = true,
174 .has_refsel = true,
175};
176
177static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll)
237{ 178{
238 struct hdmi_pll_features *dst; 179 struct dss_pll *pll = &hpll->pll;
239 const struct hdmi_pll_features *src; 180 struct clk *clk;
181 int r;
240 182
241 dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); 183 clk = devm_clk_get(&pdev->dev, "sys_clk");
242 if (!dst) { 184 if (IS_ERR(clk)) {
243 dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n"); 185 DSSERR("can't get sys_clk\n");
244 return -ENOMEM; 186 return PTR_ERR(clk);
245 } 187 }
246 188
189 pll->name = "hdmi";
190 pll->base = hpll->base;
191 pll->clkin = clk;
192
247 switch (omapdss_get_version()) { 193 switch (omapdss_get_version()) {
248 case OMAPDSS_VER_OMAP4430_ES1: 194 case OMAPDSS_VER_OMAP4430_ES1:
249 case OMAPDSS_VER_OMAP4430_ES2: 195 case OMAPDSS_VER_OMAP4430_ES2:
250 case OMAPDSS_VER_OMAP4: 196 case OMAPDSS_VER_OMAP4:
251 src = &omap44xx_pll_feats; 197 pll->hw = &dss_omap4_hdmi_pll_hw;
252 break; 198 break;
253 199
254 case OMAPDSS_VER_OMAP5: 200 case OMAPDSS_VER_OMAP5:
255 src = &omap54xx_pll_feats; 201 pll->hw = &dss_omap5_hdmi_pll_hw;
256 break; 202 break;
257 203
258 default: 204 default:
259 return -ENODEV; 205 return -ENODEV;
260 } 206 }
261 207
262 memcpy(dst, src, sizeof(*dst)); 208 pll->ops = &dsi_pll_ops;
263 pll_feat = dst; 209
210 r = dss_pll_register(pll);
211 if (r)
212 return r;
264 213
265 return 0; 214 return 0;
266} 215}
267 216
268int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll) 217int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
218 struct hdmi_wp_data *wp)
269{ 219{
270 int r; 220 int r;
271 struct resource *res; 221 struct resource *res;
272 222
273 r = hdmi_pll_init_features(pdev); 223 pll->wp = wp;
274 if (r)
275 return r;
276 224
277 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); 225 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
278 if (!res) { 226 if (!res) {
@@ -286,5 +234,18 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll)
286 return PTR_ERR(pll->base); 234 return PTR_ERR(pll->base);
287 } 235 }
288 236
237 r = dsi_init_pll_data(pdev, pll);
238 if (r) {
239 DSSERR("failed to init HDMI PLL\n");
240 return r;
241 }
242
289 return 0; 243 return 0;
290} 244}
245
246void hdmi_pll_uninit(struct hdmi_pll_data *hpll)
247{
248 struct dss_pll *pll = &hpll->pll;
249
250 dss_pll_unregister(pll);
251}
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
index 496327e2b21b..c15377e242cc 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
@@ -185,7 +185,6 @@ void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
185 timings->interlace = param->timings.interlace; 185 timings->interlace = param->timings.interlace;
186} 186}
187 187
188#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) || defined(CONFIG_OMAP5_DSS_HDMI_AUDIO)
189void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp, 188void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
190 struct hdmi_audio_format *aud_fmt) 189 struct hdmi_audio_format *aud_fmt)
191{ 190{
@@ -194,8 +193,12 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
194 DSSDBG("Enter hdmi_wp_audio_config_format\n"); 193 DSSDBG("Enter hdmi_wp_audio_config_format\n");
195 194
196 r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG); 195 r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
197 r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24); 196 if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
198 r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16); 197 omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
198 omapdss_get_version() == OMAPDSS_VER_OMAP4) {
199 r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
200 r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
201 }
199 r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5); 202 r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
200 r = FLD_MOD(r, aud_fmt->type, 4, 4); 203 r = FLD_MOD(r, aud_fmt->type, 4, 4);
201 r = FLD_MOD(r, aud_fmt->justification, 3, 3); 204 r = FLD_MOD(r, aud_fmt->justification, 3, 3);
@@ -236,7 +239,6 @@ int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
236 239
237 return 0; 240 return 0;
238} 241}
239#endif
240 242
241int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp) 243int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
242{ 244{
@@ -247,6 +249,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
247 DSSERR("can't get WP mem resource\n"); 249 DSSERR("can't get WP mem resource\n");
248 return -EINVAL; 250 return -EINVAL;
249 } 251 }
252 wp->phys_base = res->start;
250 253
251 wp->base = devm_ioremap_resource(&pdev->dev, res); 254 wp->base = devm_ioremap_resource(&pdev->dev, res);
252 if (IS_ERR(wp->base)) { 255 if (IS_ERR(wp->base)) {
@@ -256,3 +259,8 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
256 259
257 return 0; 260 return 0;
258} 261}
262
263phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp)
264{
265 return wp->phys_base + HDMI_WP_AUDIO_DATA;
266}
diff --git a/drivers/video/fbdev/omap2/dss/output.c b/drivers/video/fbdev/omap2/dss/output.c
index 2ab3afa615e8..16072159bd24 100644
--- a/drivers/video/fbdev/omap2/dss/output.c
+++ b/drivers/video/fbdev/omap2/dss/output.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22 23
23#include <video/omapdss.h> 24#include <video/omapdss.h>
24 25
@@ -131,18 +132,30 @@ struct omap_dss_device *omap_dss_find_output(const char *name)
131} 132}
132EXPORT_SYMBOL(omap_dss_find_output); 133EXPORT_SYMBOL(omap_dss_find_output);
133 134
134struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node) 135struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port)
135{ 136{
137 struct device_node *src_node;
136 struct omap_dss_device *out; 138 struct omap_dss_device *out;
139 u32 reg;
140
141 src_node = dss_of_port_get_parent_device(port);
142 if (!src_node)
143 return NULL;
144
145 reg = dss_of_port_get_port_number(port);
137 146
138 list_for_each_entry(out, &output_list, list) { 147 list_for_each_entry(out, &output_list, list) {
139 if (out->dev->of_node == node) 148 if (out->dev->of_node == src_node && out->port_num == reg) {
149 of_node_put(src_node);
140 return omap_dss_get_device(out); 150 return omap_dss_get_device(out);
151 }
141 } 152 }
142 153
154 of_node_put(src_node);
155
143 return NULL; 156 return NULL;
144} 157}
145EXPORT_SYMBOL(omap_dss_find_output_by_node); 158EXPORT_SYMBOL(omap_dss_find_output_by_port_node);
146 159
147struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) 160struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
148{ 161{
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
new file mode 100644
index 000000000000..50bc62c5d367
--- /dev/null
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -0,0 +1,378 @@
1/*
2 * Copyright (C) 2014 Texas Instruments Incorporated
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#define DSS_SUBSYS_NAME "PLL"
18
19#include <linux/clk.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/regulator/consumer.h>
23#include <linux/sched.h>
24
25#include <video/omapdss.h>
26
27#include "dss.h"
28
29#define PLL_CONTROL 0x0000
30#define PLL_STATUS 0x0004
31#define PLL_GO 0x0008
32#define PLL_CONFIGURATION1 0x000C
33#define PLL_CONFIGURATION2 0x0010
34#define PLL_CONFIGURATION3 0x0014
35#define PLL_SSC_CONFIGURATION1 0x0018
36#define PLL_SSC_CONFIGURATION2 0x001C
37#define PLL_CONFIGURATION4 0x0020
38
39static struct dss_pll *dss_plls[4];
40
41int dss_pll_register(struct dss_pll *pll)
42{
43 int i;
44
45 for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
46 if (!dss_plls[i]) {
47 dss_plls[i] = pll;
48 return 0;
49 }
50 }
51
52 return -EBUSY;
53}
54
55void dss_pll_unregister(struct dss_pll *pll)
56{
57 int i;
58
59 for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
60 if (dss_plls[i] == pll) {
61 dss_plls[i] = NULL;
62 return;
63 }
64 }
65}
66
67struct dss_pll *dss_pll_find(const char *name)
68{
69 int i;
70
71 for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
72 if (dss_plls[i] && strcmp(dss_plls[i]->name, name) == 0)
73 return dss_plls[i];
74 }
75
76 return NULL;
77}
78
79int dss_pll_enable(struct dss_pll *pll)
80{
81 int r;
82
83 r = clk_prepare_enable(pll->clkin);
84 if (r)
85 return r;
86
87 if (pll->regulator) {
88 r = regulator_enable(pll->regulator);
89 if (r)
90 goto err_reg;
91 }
92
93 r = pll->ops->enable(pll);
94 if (r)
95 goto err_enable;
96
97 return 0;
98
99err_enable:
100 regulator_disable(pll->regulator);
101err_reg:
102 clk_disable_unprepare(pll->clkin);
103 return r;
104}
105
106void dss_pll_disable(struct dss_pll *pll)
107{
108 pll->ops->disable(pll);
109
110 if (pll->regulator)
111 regulator_disable(pll->regulator);
112
113 clk_disable_unprepare(pll->clkin);
114
115 memset(&pll->cinfo, 0, sizeof(pll->cinfo));
116}
117
118int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo)
119{
120 int r;
121
122 r = pll->ops->set_config(pll, cinfo);
123 if (r)
124 return r;
125
126 pll->cinfo = *cinfo;
127
128 return 0;
129}
130
131bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
132 unsigned long out_min, unsigned long out_max,
133 dss_hsdiv_calc_func func, void *data)
134{
135 const struct dss_pll_hw *hw = pll->hw;
136 int m, m_start, m_stop;
137 unsigned long out;
138
139 out_min = out_min ? out_min : 1;
140 out_max = out_max ? out_max : ULONG_MAX;
141
142 m_start = max(DIV_ROUND_UP(clkdco, out_max), 1ul);
143
144 m_stop = min((unsigned)(clkdco / out_min), hw->mX_max);
145
146 for (m = m_start; m <= m_stop; ++m) {
147 out = clkdco / m;
148
149 if (func(m, out, data))
150 return true;
151 }
152
153 return false;
154}
155
156bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
157 unsigned long pll_min, unsigned long pll_max,
158 dss_pll_calc_func func, void *data)
159{
160 const struct dss_pll_hw *hw = pll->hw;
161 int n, n_start, n_stop;
162 int m, m_start, m_stop;
163 unsigned long fint, clkdco;
164 unsigned long pll_hw_max;
165 unsigned long fint_hw_min, fint_hw_max;
166
167 pll_hw_max = hw->clkdco_max;
168
169 fint_hw_min = hw->fint_min;
170 fint_hw_max = hw->fint_max;
171
172 n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
173 n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
174
175 pll_max = pll_max ? pll_max : ULONG_MAX;
176
177 for (n = n_start; n <= n_stop; ++n) {
178 fint = clkin / n;
179
180 m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
181 1ul);
182 m_stop = min3((unsigned)(pll_max / fint / 2),
183 (unsigned)(pll_hw_max / fint / 2),
184 hw->m_max);
185
186 for (m = m_start; m <= m_stop; ++m) {
187 clkdco = 2 * m * fint;
188
189 if (func(n, m, fint, clkdco, data))
190 return true;
191 }
192 }
193
194 return false;
195}
196
197static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
198{
199 unsigned long timeout;
200 ktime_t wait;
201 int t;
202
203 /* first busyloop to see if the bit changes right away */
204 t = 100;
205 while (t-- > 0) {
206 if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
207 return value;
208 }
209
210 /* then loop for 500ms, sleeping for 1ms in between */
211 timeout = jiffies + msecs_to_jiffies(500);
212 while (time_before(jiffies, timeout)) {
213 if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value)
214 return value;
215
216 wait = ns_to_ktime(1000 * 1000);
217 set_current_state(TASK_UNINTERRUPTIBLE);
218 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
219 }
220
221 return !value;
222}
223
224static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
225{
226 int t = 100;
227
228 while (t-- > 0) {
229 u32 v = readl_relaxed(pll->base + PLL_STATUS);
230 v &= hsdiv_ack_mask;
231 if (v == hsdiv_ack_mask)
232 return 0;
233 }
234
235 return -ETIMEDOUT;
236}
237
238int dss_pll_write_config_type_a(struct dss_pll *pll,
239 const struct dss_pll_clock_info *cinfo)
240{
241 const struct dss_pll_hw *hw = pll->hw;
242 void __iomem *base = pll->base;
243 int r = 0;
244 u32 l;
245
246 l = 0;
247 if (hw->has_stopmode)
248 l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */
249 l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */
250 l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */
251 /* M4 */
252 l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0,
253 hw->mX_msb[0], hw->mX_lsb[0]);
254 /* M5 */
255 l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0,
256 hw->mX_msb[1], hw->mX_lsb[1]);
257 writel_relaxed(l, base + PLL_CONFIGURATION1);
258
259 l = 0;
260 /* M6 */
261 l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0,
262 hw->mX_msb[2], hw->mX_lsb[2]);
263 /* M7 */
264 l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0,
265 hw->mX_msb[3], hw->mX_lsb[3]);
266 writel_relaxed(l, base + PLL_CONFIGURATION3);
267
268 l = readl_relaxed(base + PLL_CONFIGURATION2);
269 if (hw->has_freqsel) {
270 u32 f = cinfo->fint < 1000000 ? 0x3 :
271 cinfo->fint < 1250000 ? 0x4 :
272 cinfo->fint < 1500000 ? 0x5 :
273 cinfo->fint < 1750000 ? 0x6 :
274 0x7;
275
276 l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */
277 } else if (hw->has_selfreqdco) {
278 u32 f = cinfo->clkdco < hw->clkdco_low ? 0x2 : 0x4;
279
280 l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
281 }
282 l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */
283 l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */
284 l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */
285 l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */
286 l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */
287 if (hw->has_refsel)
288 l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */
289 l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */
290 l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */
291 writel_relaxed(l, base + PLL_CONFIGURATION2);
292
293 writel_relaxed(1, base + PLL_GO); /* PLL_GO */
294
295 if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
296 DSSERR("DSS DPLL GO bit not going down.\n");
297 r = -EIO;
298 goto err;
299 }
300
301 if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
302 DSSERR("cannot lock DSS DPLL\n");
303 r = -EIO;
304 goto err;
305 }
306
307 l = readl_relaxed(base + PLL_CONFIGURATION2);
308 l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */
309 l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */
310 l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */
311 l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */
312 l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */
313 l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */
314 writel_relaxed(l, base + PLL_CONFIGURATION2);
315
316 r = dss_wait_hsdiv_ack(pll,
317 (cinfo->mX[0] ? BIT(7) : 0) |
318 (cinfo->mX[1] ? BIT(8) : 0) |
319 (cinfo->mX[2] ? BIT(10) : 0) |
320 (cinfo->mX[3] ? BIT(11) : 0));
321 if (r) {
322 DSSERR("failed to enable HSDIV clocks\n");
323 goto err;
324 }
325
326err:
327 return r;
328}
329
330int dss_pll_write_config_type_b(struct dss_pll *pll,
331 const struct dss_pll_clock_info *cinfo)
332{
333 const struct dss_pll_hw *hw = pll->hw;
334 void __iomem *base = pll->base;
335 u32 l;
336
337 l = 0;
338 l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */
339 l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */
340 writel_relaxed(l, base + PLL_CONFIGURATION1);
341
342 l = readl_relaxed(base + PLL_CONFIGURATION2);
343 l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */
344 l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */
345 l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */
346 if (hw->has_refsel)
347 l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */
348
349 /* PLL_SELFREQDCO */
350 if (cinfo->clkdco > hw->clkdco_low)
351 l = FLD_MOD(l, 0x4, 3, 1);
352 else
353 l = FLD_MOD(l, 0x2, 3, 1);
354 writel_relaxed(l, base + PLL_CONFIGURATION2);
355
356 l = readl_relaxed(base + PLL_CONFIGURATION3);
357 l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */
358 writel_relaxed(l, base + PLL_CONFIGURATION3);
359
360 l = readl_relaxed(base + PLL_CONFIGURATION4);
361 l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */
362 l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */
363 writel_relaxed(l, base + PLL_CONFIGURATION4);
364
365 writel_relaxed(1, base + PLL_GO); /* PLL_GO */
366
367 if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
368 DSSERR("DSS DPLL GO bit not going down.\n");
369 return -EIO;
370 }
371
372 if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
373 DSSERR("cannot lock DSS DPLL\n");
374 return -ETIMEDOUT;
375 }
376
377 return 0;
378}
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index 4c9c46d4ea60..d9b10f27be20 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -425,7 +425,7 @@ err_datapairs:
425 return r; 425 return r;
426} 426}
427 427
428void __exit sdi_uninit_port(void) 428void __exit sdi_uninit_port(struct device_node *port)
429{ 429{
430 if (!sdi.port_initialized) 430 if (!sdi.port_initialized)
431 return; 431 return;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index c43b969e1e23..f0ae61a37f04 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1182,7 +1182,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1182 1182
1183 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); 1183 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
1184 1184
1185 par->state.vgabase = (void __iomem *) vga_res.start; 1185 par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
1186 1186
1187 /* Unlock regs */ 1187 /* Unlock regs */
1188 cr38 = vga_rcrt(par->state.vgabase, 0x38); 1188 cr38 = vga_rcrt(par->state.vgabase, 0x38);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 2bcc84ac18c7..cfde21d81c15 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -2181,8 +2181,7 @@ sh_mobile_lcdc_channel_fb_cleanup(struct sh_mobile_lcdc_chan *ch)
2181 if (!info || !info->device) 2181 if (!info || !info->device)
2182 return; 2182 return;
2183 2183
2184 if (ch->sglist) 2184 vfree(ch->sglist);
2185 vfree(ch->sglist);
2186 2185
2187 fb_dealloc_cmap(&info->cmap); 2186 fb_dealloc_cmap(&info->cmap);
2188 framebuffer_release(info); 2187 framebuffer_release(info);
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 210f3a02121a..b2ae9254fd75 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -26,6 +26,8 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_data/simplefb.h> 27#include <linux/platform_data/simplefb.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/clk-provider.h>
30#include <linux/of_platform.h>
29 31
30static struct fb_fix_screeninfo simplefb_fix = { 32static struct fb_fix_screeninfo simplefb_fix = {
31 .id = "simple", 33 .id = "simple",
@@ -41,6 +43,8 @@ static struct fb_var_screeninfo simplefb_var = {
41 .vmode = FB_VMODE_NONINTERLACED, 43 .vmode = FB_VMODE_NONINTERLACED,
42}; 44};
43 45
46#define PSEUDO_PALETTE_SIZE 16
47
44static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 48static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
45 u_int transp, struct fb_info *info) 49 u_int transp, struct fb_info *info)
46{ 50{
@@ -50,7 +54,7 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
50 u32 cb = blue >> (16 - info->var.blue.length); 54 u32 cb = blue >> (16 - info->var.blue.length);
51 u32 value; 55 u32 value;
52 56
53 if (regno >= 16) 57 if (regno >= PSEUDO_PALETTE_SIZE)
54 return -EINVAL; 58 return -EINVAL;
55 59
56 value = (cr << info->var.red.offset) | 60 value = (cr << info->var.red.offset) |
@@ -163,11 +167,113 @@ static int simplefb_parse_pd(struct platform_device *pdev,
163 return 0; 167 return 0;
164} 168}
165 169
170struct simplefb_par {
171 u32 palette[PSEUDO_PALETTE_SIZE];
172#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
173 int clk_count;
174 struct clk **clks;
175#endif
176};
177
178#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
179/*
180 * Clock handling code.
181 *
182 * Here we handle the clocks property of our "simple-framebuffer" dt node.
183 * This is necessary so that we can make sure that any clocks needed by
184 * the display engine that the bootloader set up for us (and for which it
185 * provided a simplefb dt node), stay up, for the life of the simplefb
186 * driver.
187 *
188 * When the driver unloads, we cleanly disable, and then release the clocks.
189 *
190 * We only complain about errors here, no action is taken as the most likely
191 * error can only happen due to a mismatch between the bootloader which set
192 * up simplefb, and the clock definitions in the device tree. Chances are
193 * that there are no adverse effects, and if there are, a clean teardown of
194 * the fb probe will not help us much either. So just complain and carry on,
195 * and hope that the user actually gets a working fb at the end of things.
196 */
197static int simplefb_clocks_init(struct simplefb_par *par,
198 struct platform_device *pdev)
199{
200 struct device_node *np = pdev->dev.of_node;
201 struct clk *clock;
202 int i, ret;
203
204 if (dev_get_platdata(&pdev->dev) || !np)
205 return 0;
206
207 par->clk_count = of_clk_get_parent_count(np);
208 if (par->clk_count <= 0)
209 return 0;
210
211 par->clks = kcalloc(par->clk_count, sizeof(struct clk *), GFP_KERNEL);
212 if (!par->clks)
213 return -ENOMEM;
214
215 for (i = 0; i < par->clk_count; i++) {
216 clock = of_clk_get(np, i);
217 if (IS_ERR(clock)) {
218 if (PTR_ERR(clock) == -EPROBE_DEFER) {
219 while (--i >= 0) {
220 if (par->clks[i])
221 clk_put(par->clks[i]);
222 }
223 kfree(par->clks);
224 return -EPROBE_DEFER;
225 }
226 dev_err(&pdev->dev, "%s: clock %d not found: %ld\n",
227 __func__, i, PTR_ERR(clock));
228 continue;
229 }
230 par->clks[i] = clock;
231 }
232
233 for (i = 0; i < par->clk_count; i++) {
234 if (par->clks[i]) {
235 ret = clk_prepare_enable(par->clks[i]);
236 if (ret) {
237 dev_err(&pdev->dev,
238 "%s: failed to enable clock %d: %d\n",
239 __func__, i, ret);
240 clk_put(par->clks[i]);
241 par->clks[i] = NULL;
242 }
243 }
244 }
245
246 return 0;
247}
248
249static void simplefb_clocks_destroy(struct simplefb_par *par)
250{
251 int i;
252
253 if (!par->clks)
254 return;
255
256 for (i = 0; i < par->clk_count; i++) {
257 if (par->clks[i]) {
258 clk_disable_unprepare(par->clks[i]);
259 clk_put(par->clks[i]);
260 }
261 }
262
263 kfree(par->clks);
264}
265#else
266static int simplefb_clocks_init(struct simplefb_par *par,
267 struct platform_device *pdev) { return 0; }
268static void simplefb_clocks_destroy(struct simplefb_par *par) { }
269#endif
270
166static int simplefb_probe(struct platform_device *pdev) 271static int simplefb_probe(struct platform_device *pdev)
167{ 272{
168 int ret; 273 int ret;
169 struct simplefb_params params; 274 struct simplefb_params params;
170 struct fb_info *info; 275 struct fb_info *info;
276 struct simplefb_par *par;
171 struct resource *mem; 277 struct resource *mem;
172 278
173 if (fb_get_options("simplefb", NULL)) 279 if (fb_get_options("simplefb", NULL))
@@ -188,11 +294,13 @@ static int simplefb_probe(struct platform_device *pdev)
188 return -EINVAL; 294 return -EINVAL;
189 } 295 }
190 296
191 info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev); 297 info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev);
192 if (!info) 298 if (!info)
193 return -ENOMEM; 299 return -ENOMEM;
194 platform_set_drvdata(pdev, info); 300 platform_set_drvdata(pdev, info);
195 301
302 par = info->par;
303
196 info->fix = simplefb_fix; 304 info->fix = simplefb_fix;
197 info->fix.smem_start = mem->start; 305 info->fix.smem_start = mem->start;
198 info->fix.smem_len = resource_size(mem); 306 info->fix.smem_len = resource_size(mem);
@@ -211,8 +319,8 @@ static int simplefb_probe(struct platform_device *pdev)
211 319
212 info->apertures = alloc_apertures(1); 320 info->apertures = alloc_apertures(1);
213 if (!info->apertures) { 321 if (!info->apertures) {
214 framebuffer_release(info); 322 ret = -ENOMEM;
215 return -ENOMEM; 323 goto error_fb_release;
216 } 324 }
217 info->apertures->ranges[0].base = info->fix.smem_start; 325 info->apertures->ranges[0].base = info->fix.smem_start;
218 info->apertures->ranges[0].size = info->fix.smem_len; 326 info->apertures->ranges[0].size = info->fix.smem_len;
@@ -222,10 +330,14 @@ static int simplefb_probe(struct platform_device *pdev)
222 info->screen_base = ioremap_wc(info->fix.smem_start, 330 info->screen_base = ioremap_wc(info->fix.smem_start,
223 info->fix.smem_len); 331 info->fix.smem_len);
224 if (!info->screen_base) { 332 if (!info->screen_base) {
225 framebuffer_release(info); 333 ret = -ENOMEM;
226 return -ENODEV; 334 goto error_fb_release;
227 } 335 }
228 info->pseudo_palette = (void *)(info + 1); 336 info->pseudo_palette = par->palette;
337
338 ret = simplefb_clocks_init(par, pdev);
339 if (ret < 0)
340 goto error_unmap;
229 341
230 dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", 342 dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
231 info->fix.smem_start, info->fix.smem_len, 343 info->fix.smem_start, info->fix.smem_len,
@@ -238,21 +350,29 @@ static int simplefb_probe(struct platform_device *pdev)
238 ret = register_framebuffer(info); 350 ret = register_framebuffer(info);
239 if (ret < 0) { 351 if (ret < 0) {
240 dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); 352 dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
241 iounmap(info->screen_base); 353 goto error_clocks;
242 framebuffer_release(info);
243 return ret;
244 } 354 }
245 355
246 dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); 356 dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
247 357
248 return 0; 358 return 0;
359
360error_clocks:
361 simplefb_clocks_destroy(par);
362error_unmap:
363 iounmap(info->screen_base);
364error_fb_release:
365 framebuffer_release(info);
366 return ret;
249} 367}
250 368
251static int simplefb_remove(struct platform_device *pdev) 369static int simplefb_remove(struct platform_device *pdev)
252{ 370{
253 struct fb_info *info = platform_get_drvdata(pdev); 371 struct fb_info *info = platform_get_drvdata(pdev);
372 struct simplefb_par *par = info->par;
254 373
255 unregister_framebuffer(info); 374 unregister_framebuffer(info);
375 simplefb_clocks_destroy(par);
256 framebuffer_release(info); 376 framebuffer_release(info);
257 377
258 return 0; 378 return 0;
@@ -273,7 +393,27 @@ static struct platform_driver simplefb_driver = {
273 .probe = simplefb_probe, 393 .probe = simplefb_probe,
274 .remove = simplefb_remove, 394 .remove = simplefb_remove,
275}; 395};
276module_platform_driver(simplefb_driver); 396
397static int __init simplefb_init(void)
398{
399 int ret;
400 struct device_node *np;
401
402 ret = platform_driver_register(&simplefb_driver);
403 if (ret)
404 return ret;
405
406 if (IS_ENABLED(CONFIG_OF) && of_chosen) {
407 for_each_child_of_node(of_chosen, np) {
408 if (of_device_is_compatible(np, "simple-framebuffer"))
409 of_platform_device_create(np, NULL, NULL);
410 }
411 }
412
413 return 0;
414}
415
416fs_initcall(simplefb_init);
277 417
278MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>"); 418MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
279MODULE_DESCRIPTION("Simple framebuffer driver"); 419MODULE_DESCRIPTION("Simple framebuffer driver");
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index e5d11b1892e8..fcf610edf217 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -5989,7 +5989,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5989 5989
5990 if(!ivideo->sisvga_enabled) { 5990 if(!ivideo->sisvga_enabled) {
5991 if(pci_enable_device(pdev)) { 5991 if(pci_enable_device(pdev)) {
5992 if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); 5992 pci_dev_put(ivideo->nbridge);
5993 framebuffer_release(sis_fb_info); 5993 framebuffer_release(sis_fb_info);
5994 return -EIO; 5994 return -EIO;
5995 } 5995 }
@@ -6202,10 +6202,8 @@ error_0: iounmap(ivideo->video_vbase);
6202error_1: release_mem_region(ivideo->video_base, ivideo->video_size); 6202error_1: release_mem_region(ivideo->video_base, ivideo->video_size);
6203error_2: release_mem_region(ivideo->mmio_base, ivideo->mmio_size); 6203error_2: release_mem_region(ivideo->mmio_base, ivideo->mmio_size);
6204error_3: vfree(ivideo->bios_abase); 6204error_3: vfree(ivideo->bios_abase);
6205 if(ivideo->lpcdev) 6205 pci_dev_put(ivideo->lpcdev);
6206 pci_dev_put(ivideo->lpcdev); 6206 pci_dev_put(ivideo->nbridge);
6207 if(ivideo->nbridge)
6208 pci_dev_put(ivideo->nbridge);
6209 if(!ivideo->sisvga_enabled) 6207 if(!ivideo->sisvga_enabled)
6210 pci_disable_device(pdev); 6208 pci_disable_device(pdev);
6211 framebuffer_release(sis_fb_info); 6209 framebuffer_release(sis_fb_info);
@@ -6505,11 +6503,9 @@ static void sisfb_remove(struct pci_dev *pdev)
6505 6503
6506 vfree(ivideo->bios_abase); 6504 vfree(ivideo->bios_abase);
6507 6505
6508 if(ivideo->lpcdev) 6506 pci_dev_put(ivideo->lpcdev);
6509 pci_dev_put(ivideo->lpcdev);
6510 6507
6511 if(ivideo->nbridge) 6508 pci_dev_put(ivideo->nbridge);
6512 pci_dev_put(ivideo->nbridge);
6513 6509
6514#ifdef CONFIG_MTRR 6510#ifdef CONFIG_MTRR
6515 /* Release MTRR region */ 6511 /* Release MTRR region */
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 9e74e8fbe074..8b98b011fc04 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1988,6 +1988,7 @@ static int sm501fb_probe(struct platform_device *pdev)
1988 if (info->fb[HEAD_PANEL] == NULL && 1988 if (info->fb[HEAD_PANEL] == NULL &&
1989 info->fb[HEAD_CRT] == NULL) { 1989 info->fb[HEAD_CRT] == NULL) {
1990 dev_err(dev, "no framebuffers found\n"); 1990 dev_err(dev, "no framebuffers found\n");
1991 ret = -ENODEV;
1991 goto err_alloc; 1992 goto err_alloc;
1992 } 1993 }
1993 1994
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index d513ed6a49f2..9279e5f6696e 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1142,8 +1142,7 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
1142 fb_dealloc_cmap(&info->cmap); 1142 fb_dealloc_cmap(&info->cmap);
1143 if (info->monspecs.modedb) 1143 if (info->monspecs.modedb)
1144 fb_destroy_modedb(info->monspecs.modedb); 1144 fb_destroy_modedb(info->monspecs.modedb);
1145 if (info->screen_base) 1145 vfree(info->screen_base);
1146 vfree(info->screen_base);
1147 1146
1148 fb_destroy_modelist(&info->modelist); 1147 fb_destroy_modelist(&info->modelist);
1149 1148
@@ -1743,8 +1742,7 @@ error:
1743 fb_dealloc_cmap(&info->cmap); 1742 fb_dealloc_cmap(&info->cmap);
1744 if (info->monspecs.modedb) 1743 if (info->monspecs.modedb)
1745 fb_destroy_modedb(info->monspecs.modedb); 1744 fb_destroy_modedb(info->monspecs.modedb);
1746 if (info->screen_base) 1745 vfree(info->screen_base);
1747 vfree(info->screen_base);
1748 1746
1749 fb_destroy_modelist(&info->modelist); 1747 fb_destroy_modelist(&info->modelist);
1750 1748
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 046d51d83d74..ff2b8731a2dc 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -922,8 +922,7 @@ static void dlfb_free(struct kref *kref)
922{ 922{
923 struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref); 923 struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
924 924
925 if (dev->backing_buffer) 925 vfree(dev->backing_buffer);
926 vfree(dev->backing_buffer);
927 926
928 kfree(dev->edid); 927 kfree(dev->edid);
929 928
@@ -953,8 +952,7 @@ static void dlfb_free_framebuffer(struct dlfb_data *dev)
953 fb_dealloc_cmap(&info->cmap); 952 fb_dealloc_cmap(&info->cmap);
954 if (info->monspecs.modedb) 953 if (info->monspecs.modedb)
955 fb_destroy_modedb(info->monspecs.modedb); 954 fb_destroy_modedb(info->monspecs.modedb);
956 if (info->screen_base) 955 vfree(info->screen_base);
957 vfree(info->screen_base);
958 956
959 fb_destroy_modelist(&info->modelist); 957 fb_destroy_modelist(&info->modelist);
960 958
@@ -1203,8 +1201,7 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
1203 if (!new_back) 1201 if (!new_back)
1204 pr_info("No shadow/backing buffer allocated\n"); 1202 pr_info("No shadow/backing buffer allocated\n");
1205 else { 1203 else {
1206 if (dev->backing_buffer) 1204 vfree(dev->backing_buffer);
1207 vfree(dev->backing_buffer);
1208 dev->backing_buffer = new_back; 1205 dev->backing_buffer = new_back;
1209 } 1206 }
1210 } 1207 }
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 509d452e8f91..d32d1c4d1b99 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1219,8 +1219,7 @@ static int uvesafb_release(struct fb_info *info, int user)
1219 uvesafb_vbe_state_restore(par, par->vbe_state_orig); 1219 uvesafb_vbe_state_restore(par, par->vbe_state_orig);
1220out: 1220out:
1221 atomic_dec(&par->ref_count); 1221 atomic_dec(&par->ref_count);
1222 if (task) 1222 uvesafb_free(task);
1223 uvesafb_free(task);
1224 return 0; 1223 return 0;
1225} 1224}
1226 1225
@@ -1923,8 +1922,7 @@ static int uvesafb_init(void)
1923 err = -ENOMEM; 1922 err = -ENOMEM;
1924 1923
1925 if (err) { 1924 if (err) {
1926 if (uvesafb_device) 1925 platform_device_put(uvesafb_device);
1927 platform_device_put(uvesafb_device);
1928 platform_driver_unregister(&uvesafb_driver); 1926 platform_driver_unregister(&uvesafb_driver);
1929 cn_del_callback(&uvesafb_cn_id); 1927 cn_del_callback(&uvesafb_cn_id);
1930 return err; 1928 return err;
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 5f930aeccf1f..6b70d7f62b2f 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -1003,13 +1003,15 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1003 struct vml_info *vinfo = container_of(info, struct vml_info, info); 1003 struct vml_info *vinfo = container_of(info, struct vml_info, info);
1004 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1004 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1005 int ret; 1005 int ret;
1006 unsigned long prot;
1006 1007
1007 ret = vmlfb_vram_offset(vinfo, offset); 1008 ret = vmlfb_vram_offset(vinfo, offset);
1008 if (ret) 1009 if (ret)
1009 return -EINVAL; 1010 return -EINVAL;
1010 1011
1011 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 1012 prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
1012 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 1013 pgprot_val(vma->vm_page_prot) =
1014 prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
1013 1015
1014 return vm_iomap_memory(vma, vinfo->vram_start, 1016 return vm_iomap_memory(vma, vinfo->vram_start,
1015 vinfo->vram_contig_size); 1017 vinfo->vram_contig_size);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 325c43c6ff97..f9718f012aae 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1937,8 +1937,7 @@ out_fb1_unreg_lcd_cle266:
1937out_dealloc_cmap: 1937out_dealloc_cmap:
1938 fb_dealloc_cmap(&viafbinfo->cmap); 1938 fb_dealloc_cmap(&viafbinfo->cmap);
1939out_fb1_release: 1939out_fb1_release:
1940 if (viafbinfo1) 1940 framebuffer_release(viafbinfo1);
1941 framebuffer_release(viafbinfo1);
1942out_fb_release: 1941out_fb_release:
1943 i2c_bus_free(viaparinfo->shared); 1942 i2c_bus_free(viaparinfo->shared);
1944 framebuffer_release(viafbinfo); 1943 framebuffer_release(viafbinfo);
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 5c7cbc6c6236..ea7f056ed5fe 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -731,7 +731,7 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
731 731
732 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg); 732 pcibios_bus_to_resource(dev->bus, &vga_res, &bus_reg);
733 733
734 par->state.vgabase = (void __iomem *) vga_res.start; 734 par->state.vgabase = (void __iomem *) (unsigned long) vga_res.start;
735 735
736 /* Find how many physical memory there is on card */ 736 /* Find how many physical memory there is on card */
737 memsize1 = (vga_rseq(par->state.vgabase, 0x34) + 1) >> 1; 737 memsize1 = (vga_rseq(par->state.vgabase, 0x34) + 1) >> 1;
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 9076635697bb..bf5104b56894 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o 1obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 2obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 3obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
4virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o
4obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o 5obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index df598dd8c5c8..f22665868781 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -3,6 +3,7 @@
3#include <linux/virtio_config.h> 3#include <linux/virtio_config.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/idr.h> 5#include <linux/idr.h>
6#include <uapi/linux/virtio_ids.h>
6 7
7/* Unique numbering for virtio devices. */ 8/* Unique numbering for virtio devices. */
8static DEFINE_IDA(virtio_index_ida); 9static DEFINE_IDA(virtio_index_ida);
@@ -49,9 +50,9 @@ static ssize_t features_show(struct device *_d,
49 50
50 /* We actually represent this as a bitstring, as it could be 51 /* We actually represent this as a bitstring, as it could be
51 * arbitrary length in future. */ 52 * arbitrary length in future. */
52 for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) 53 for (i = 0; i < sizeof(dev->features)*8; i++)
53 len += sprintf(buf+len, "%c", 54 len += sprintf(buf+len, "%c",
54 test_bit(i, dev->features) ? '1' : '0'); 55 __virtio_test_bit(dev, i) ? '1' : '0');
55 len += sprintf(buf+len, "\n"); 56 len += sprintf(buf+len, "\n");
56 return len; 57 return len;
57} 58}
@@ -113,6 +114,13 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
113 for (i = 0; i < drv->feature_table_size; i++) 114 for (i = 0; i < drv->feature_table_size; i++)
114 if (drv->feature_table[i] == fbit) 115 if (drv->feature_table[i] == fbit)
115 return; 116 return;
117
118 if (drv->feature_table_legacy) {
119 for (i = 0; i < drv->feature_table_size_legacy; i++)
120 if (drv->feature_table_legacy[i] == fbit)
121 return;
122 }
123
116 BUG(); 124 BUG();
117} 125}
118EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); 126EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
@@ -159,7 +167,10 @@ static int virtio_dev_probe(struct device *_d)
159 int err, i; 167 int err, i;
160 struct virtio_device *dev = dev_to_virtio(_d); 168 struct virtio_device *dev = dev_to_virtio(_d);
161 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 169 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
162 u32 device_features; 170 u64 device_features;
171 u64 driver_features;
172 u64 driver_features_legacy;
173 unsigned status;
163 174
164 /* We have a driver! */ 175 /* We have a driver! */
165 add_status(dev, VIRTIO_CONFIG_S_DRIVER); 176 add_status(dev, VIRTIO_CONFIG_S_DRIVER);
@@ -167,34 +178,66 @@ static int virtio_dev_probe(struct device *_d)
167 /* Figure out what features the device supports. */ 178 /* Figure out what features the device supports. */
168 device_features = dev->config->get_features(dev); 179 device_features = dev->config->get_features(dev);
169 180
170 /* Features supported by both device and driver into dev->features. */ 181 /* Figure out what features the driver supports. */
171 memset(dev->features, 0, sizeof(dev->features)); 182 driver_features = 0;
172 for (i = 0; i < drv->feature_table_size; i++) { 183 for (i = 0; i < drv->feature_table_size; i++) {
173 unsigned int f = drv->feature_table[i]; 184 unsigned int f = drv->feature_table[i];
174 BUG_ON(f >= 32); 185 BUG_ON(f >= 64);
175 if (device_features & (1 << f)) 186 driver_features |= (1ULL << f);
176 set_bit(f, dev->features); 187 }
188
189 /* Some drivers have a separate feature table for virtio v1.0 */
190 if (drv->feature_table_legacy) {
191 driver_features_legacy = 0;
192 for (i = 0; i < drv->feature_table_size_legacy; i++) {
193 unsigned int f = drv->feature_table_legacy[i];
194 BUG_ON(f >= 64);
195 driver_features_legacy |= (1ULL << f);
196 }
197 } else {
198 driver_features_legacy = driver_features;
177 } 199 }
178 200
201 if (device_features & (1ULL << VIRTIO_F_VERSION_1))
202 dev->features = driver_features & device_features;
203 else
204 dev->features = driver_features_legacy & device_features;
205
179 /* Transport features always preserved to pass to finalize_features. */ 206 /* Transport features always preserved to pass to finalize_features. */
180 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) 207 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
181 if (device_features & (1 << i)) 208 if (device_features & (1ULL << i))
182 set_bit(i, dev->features); 209 __virtio_set_bit(dev, i);
183 210
184 dev->config->finalize_features(dev); 211 err = dev->config->finalize_features(dev);
212 if (err)
213 goto err;
214
215 if (virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
216 add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
217 status = dev->config->get_status(dev);
218 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
219 dev_err(_d, "virtio: device refuses features: %x\n",
220 status);
221 err = -ENODEV;
222 goto err;
223 }
224 }
185 225
186 err = drv->probe(dev); 226 err = drv->probe(dev);
187 if (err) 227 if (err)
188 add_status(dev, VIRTIO_CONFIG_S_FAILED); 228 goto err;
189 else {
190 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
191 if (drv->scan)
192 drv->scan(dev);
193 229
194 virtio_config_enable(dev); 230 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
195 } 231 if (drv->scan)
232 drv->scan(dev);
233
234 virtio_config_enable(dev);
196 235
236 return 0;
237err:
238 add_status(dev, VIRTIO_CONFIG_S_FAILED);
197 return err; 239 return err;
240
198} 241}
199 242
200static int virtio_dev_remove(struct device *_d) 243static int virtio_dev_remove(struct device *_d)
@@ -223,6 +266,12 @@ static struct bus_type virtio_bus = {
223 .remove = virtio_dev_remove, 266 .remove = virtio_dev_remove,
224}; 267};
225 268
269bool virtio_device_is_legacy_only(struct virtio_device_id id)
270{
271 return id.device == VIRTIO_ID_BALLOON;
272}
273EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
274
226int register_virtio_driver(struct virtio_driver *driver) 275int register_virtio_driver(struct virtio_driver *driver)
227{ 276{
228 /* Catch this early. */ 277 /* Catch this early. */
@@ -303,6 +352,7 @@ EXPORT_SYMBOL_GPL(virtio_device_freeze);
303int virtio_device_restore(struct virtio_device *dev) 352int virtio_device_restore(struct virtio_device *dev)
304{ 353{
305 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 354 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
355 int ret;
306 356
307 /* We always start by resetting the device, in case a previous 357 /* We always start by resetting the device, in case a previous
308 * driver messed it up. */ 358 * driver messed it up. */
@@ -322,14 +372,14 @@ int virtio_device_restore(struct virtio_device *dev)
322 /* We have a driver! */ 372 /* We have a driver! */
323 add_status(dev, VIRTIO_CONFIG_S_DRIVER); 373 add_status(dev, VIRTIO_CONFIG_S_DRIVER);
324 374
325 dev->config->finalize_features(dev); 375 ret = dev->config->finalize_features(dev);
376 if (ret)
377 goto err;
326 378
327 if (drv->restore) { 379 if (drv->restore) {
328 int ret = drv->restore(dev); 380 ret = drv->restore(dev);
329 if (ret) { 381 if (ret)
330 add_status(dev, VIRTIO_CONFIG_S_FAILED); 382 goto err;
331 return ret;
332 }
333 } 383 }
334 384
335 /* Finally, tell the device we're all set */ 385 /* Finally, tell the device we're all set */
@@ -338,6 +388,10 @@ int virtio_device_restore(struct virtio_device *dev)
338 virtio_config_enable(dev); 388 virtio_config_enable(dev);
339 389
340 return 0; 390 return 0;
391
392err:
393 add_status(dev, VIRTIO_CONFIG_S_FAILED);
394 return ret;
341} 395}
342EXPORT_SYMBOL_GPL(virtio_device_restore); 396EXPORT_SYMBOL_GPL(virtio_device_restore);
343#endif 397#endif
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index ef9a1650bb80..5219210d31ce 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -142,7 +142,7 @@ struct virtio_mmio_vq_info {
142 142
143/* Configuration interface */ 143/* Configuration interface */
144 144
145static u32 vm_get_features(struct virtio_device *vdev) 145static u64 vm_get_features(struct virtio_device *vdev)
146{ 146{
147 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 147 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
148 148
@@ -152,19 +152,20 @@ static u32 vm_get_features(struct virtio_device *vdev)
152 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); 152 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
153} 153}
154 154
155static void vm_finalize_features(struct virtio_device *vdev) 155static int vm_finalize_features(struct virtio_device *vdev)
156{ 156{
157 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 157 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
158 int i;
159 158
160 /* Give virtio_ring a chance to accept features. */ 159 /* Give virtio_ring a chance to accept features. */
161 vring_transport_features(vdev); 160 vring_transport_features(vdev);
162 161
163 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { 162 /* Make sure we don't have any features > 32 bits! */
164 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); 163 BUG_ON((u32)vdev->features != vdev->features);
165 writel(vdev->features[i], 164
166 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 165 writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
167 } 166 writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
167
168 return 0;
168} 169}
169 170
170static void vm_get(struct virtio_device *vdev, unsigned offset, 171static void vm_get(struct virtio_device *vdev, unsigned offset,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
deleted file mode 100644
index d34ebfa604f3..000000000000
--- a/drivers/virtio/virtio_pci.c
+++ /dev/null
@@ -1,802 +0,0 @@
1/*
2 * Virtio PCI driver
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 *
9 * Authors:
10 * Anthony Liguori <aliguori@us.ibm.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/list.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/virtio.h>
23#include <linux/virtio_config.h>
24#include <linux/virtio_ring.h>
25#include <linux/virtio_pci.h>
26#include <linux/highmem.h>
27#include <linux/spinlock.h>
28
29MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
30MODULE_DESCRIPTION("virtio-pci");
31MODULE_LICENSE("GPL");
32MODULE_VERSION("1");
33
34/* Our device structure */
35struct virtio_pci_device
36{
37 struct virtio_device vdev;
38 struct pci_dev *pci_dev;
39
40 /* the IO mapping for the PCI config space */
41 void __iomem *ioaddr;
42
43 /* a list of queues so we can dispatch IRQs */
44 spinlock_t lock;
45 struct list_head virtqueues;
46
47 /* MSI-X support */
48 int msix_enabled;
49 int intx_enabled;
50 struct msix_entry *msix_entries;
51 cpumask_var_t *msix_affinity_masks;
52 /* Name strings for interrupts. This size should be enough,
53 * and I'm too lazy to allocate each name separately. */
54 char (*msix_names)[256];
55 /* Number of available vectors */
56 unsigned msix_vectors;
57 /* Vectors allocated, excluding per-vq vectors if any */
58 unsigned msix_used_vectors;
59
60 /* Whether we have vector per vq */
61 bool per_vq_vectors;
62};
63
64/* Constants for MSI-X */
65/* Use first vector for configuration changes, second and the rest for
66 * virtqueues Thus, we need at least 2 vectors for MSI. */
67enum {
68 VP_MSIX_CONFIG_VECTOR = 0,
69 VP_MSIX_VQ_VECTOR = 1,
70};
71
72struct virtio_pci_vq_info
73{
74 /* the actual virtqueue */
75 struct virtqueue *vq;
76
77 /* the number of entries in the queue */
78 int num;
79
80 /* the virtual address of the ring queue */
81 void *queue;
82
83 /* the list node for the virtqueues list */
84 struct list_head node;
85
86 /* MSI-X vector (or none) */
87 unsigned msix_vector;
88};
89
90/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
91static const struct pci_device_id virtio_pci_id_table[] = {
92 { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
93 { 0 }
94};
95
96MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
97
98/* Convert a generic virtio device to our structure */
99static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
100{
101 return container_of(vdev, struct virtio_pci_device, vdev);
102}
103
104/* virtio config->get_features() implementation */
105static u32 vp_get_features(struct virtio_device *vdev)
106{
107 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
108
109 /* When someone needs more than 32 feature bits, we'll need to
110 * steal a bit to indicate that the rest are somewhere else. */
111 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
112}
113
114/* virtio config->finalize_features() implementation */
115static void vp_finalize_features(struct virtio_device *vdev)
116{
117 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
118
119 /* Give virtio_ring a chance to accept features. */
120 vring_transport_features(vdev);
121
122 /* We only support 32 feature bits. */
123 BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1);
124 iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES);
125}
126
127/* virtio config->get() implementation */
128static void vp_get(struct virtio_device *vdev, unsigned offset,
129 void *buf, unsigned len)
130{
131 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
132 void __iomem *ioaddr = vp_dev->ioaddr +
133 VIRTIO_PCI_CONFIG(vp_dev) + offset;
134 u8 *ptr = buf;
135 int i;
136
137 for (i = 0; i < len; i++)
138 ptr[i] = ioread8(ioaddr + i);
139}
140
141/* the config->set() implementation. it's symmetric to the config->get()
142 * implementation */
143static void vp_set(struct virtio_device *vdev, unsigned offset,
144 const void *buf, unsigned len)
145{
146 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
147 void __iomem *ioaddr = vp_dev->ioaddr +
148 VIRTIO_PCI_CONFIG(vp_dev) + offset;
149 const u8 *ptr = buf;
150 int i;
151
152 for (i = 0; i < len; i++)
153 iowrite8(ptr[i], ioaddr + i);
154}
155
156/* config->{get,set}_status() implementations */
157static u8 vp_get_status(struct virtio_device *vdev)
158{
159 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
160 return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
161}
162
163static void vp_set_status(struct virtio_device *vdev, u8 status)
164{
165 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
166 /* We should never be setting status to 0. */
167 BUG_ON(status == 0);
168 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
169}
170
171/* wait for pending irq handlers */
172static void vp_synchronize_vectors(struct virtio_device *vdev)
173{
174 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
175 int i;
176
177 if (vp_dev->intx_enabled)
178 synchronize_irq(vp_dev->pci_dev->irq);
179
180 for (i = 0; i < vp_dev->msix_vectors; ++i)
181 synchronize_irq(vp_dev->msix_entries[i].vector);
182}
183
184static void vp_reset(struct virtio_device *vdev)
185{
186 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
187 /* 0 status means a reset. */
188 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
189 /* Flush out the status write, and flush in device writes,
190 * including MSi-X interrupts, if any. */
191 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
192 /* Flush pending VQ/configuration callbacks. */
193 vp_synchronize_vectors(vdev);
194}
195
196/* the notify function used when creating a virt queue */
197static bool vp_notify(struct virtqueue *vq)
198{
199 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
200
201 /* we write the queue's selector into the notification register to
202 * signal the other end */
203 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
204 return true;
205}
206
207/* Handle a configuration change: Tell driver if it wants to know. */
208static irqreturn_t vp_config_changed(int irq, void *opaque)
209{
210 struct virtio_pci_device *vp_dev = opaque;
211
212 virtio_config_changed(&vp_dev->vdev);
213 return IRQ_HANDLED;
214}
215
216/* Notify all virtqueues on an interrupt. */
217static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
218{
219 struct virtio_pci_device *vp_dev = opaque;
220 struct virtio_pci_vq_info *info;
221 irqreturn_t ret = IRQ_NONE;
222 unsigned long flags;
223
224 spin_lock_irqsave(&vp_dev->lock, flags);
225 list_for_each_entry(info, &vp_dev->virtqueues, node) {
226 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
227 ret = IRQ_HANDLED;
228 }
229 spin_unlock_irqrestore(&vp_dev->lock, flags);
230
231 return ret;
232}
233
234/* A small wrapper to also acknowledge the interrupt when it's handled.
235 * I really need an EIO hook for the vring so I can ack the interrupt once we
236 * know that we'll be handling the IRQ but before we invoke the callback since
237 * the callback may notify the host which results in the host attempting to
238 * raise an interrupt that we would then mask once we acknowledged the
239 * interrupt. */
240static irqreturn_t vp_interrupt(int irq, void *opaque)
241{
242 struct virtio_pci_device *vp_dev = opaque;
243 u8 isr;
244
245 /* reading the ISR has the effect of also clearing it so it's very
246 * important to save off the value. */
247 isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
248
249 /* It's definitely not us if the ISR was not high */
250 if (!isr)
251 return IRQ_NONE;
252
253 /* Configuration change? Tell driver if it wants to know. */
254 if (isr & VIRTIO_PCI_ISR_CONFIG)
255 vp_config_changed(irq, opaque);
256
257 return vp_vring_interrupt(irq, opaque);
258}
259
260static void vp_free_vectors(struct virtio_device *vdev)
261{
262 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
263 int i;
264
265 if (vp_dev->intx_enabled) {
266 free_irq(vp_dev->pci_dev->irq, vp_dev);
267 vp_dev->intx_enabled = 0;
268 }
269
270 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
271 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
272
273 for (i = 0; i < vp_dev->msix_vectors; i++)
274 if (vp_dev->msix_affinity_masks[i])
275 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
276
277 if (vp_dev->msix_enabled) {
278 /* Disable the vector used for configuration */
279 iowrite16(VIRTIO_MSI_NO_VECTOR,
280 vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
281 /* Flush the write out to device */
282 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
283
284 pci_disable_msix(vp_dev->pci_dev);
285 vp_dev->msix_enabled = 0;
286 }
287
288 vp_dev->msix_vectors = 0;
289 vp_dev->msix_used_vectors = 0;
290 kfree(vp_dev->msix_names);
291 vp_dev->msix_names = NULL;
292 kfree(vp_dev->msix_entries);
293 vp_dev->msix_entries = NULL;
294 kfree(vp_dev->msix_affinity_masks);
295 vp_dev->msix_affinity_masks = NULL;
296}
297
298static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
299 bool per_vq_vectors)
300{
301 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
302 const char *name = dev_name(&vp_dev->vdev.dev);
303 unsigned i, v;
304 int err = -ENOMEM;
305
306 vp_dev->msix_vectors = nvectors;
307
308 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
309 GFP_KERNEL);
310 if (!vp_dev->msix_entries)
311 goto error;
312 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
313 GFP_KERNEL);
314 if (!vp_dev->msix_names)
315 goto error;
316 vp_dev->msix_affinity_masks
317 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
318 GFP_KERNEL);
319 if (!vp_dev->msix_affinity_masks)
320 goto error;
321 for (i = 0; i < nvectors; ++i)
322 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
323 GFP_KERNEL))
324 goto error;
325
326 for (i = 0; i < nvectors; ++i)
327 vp_dev->msix_entries[i].entry = i;
328
329 err = pci_enable_msix_exact(vp_dev->pci_dev,
330 vp_dev->msix_entries, nvectors);
331 if (err)
332 goto error;
333 vp_dev->msix_enabled = 1;
334
335 /* Set the vector used for configuration */
336 v = vp_dev->msix_used_vectors;
337 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
338 "%s-config", name);
339 err = request_irq(vp_dev->msix_entries[v].vector,
340 vp_config_changed, 0, vp_dev->msix_names[v],
341 vp_dev);
342 if (err)
343 goto error;
344 ++vp_dev->msix_used_vectors;
345
346 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
347 /* Verify we had enough resources to assign the vector */
348 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
349 if (v == VIRTIO_MSI_NO_VECTOR) {
350 err = -EBUSY;
351 goto error;
352 }
353
354 if (!per_vq_vectors) {
355 /* Shared vector for all VQs */
356 v = vp_dev->msix_used_vectors;
357 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
358 "%s-virtqueues", name);
359 err = request_irq(vp_dev->msix_entries[v].vector,
360 vp_vring_interrupt, 0, vp_dev->msix_names[v],
361 vp_dev);
362 if (err)
363 goto error;
364 ++vp_dev->msix_used_vectors;
365 }
366 return 0;
367error:
368 vp_free_vectors(vdev);
369 return err;
370}
371
372static int vp_request_intx(struct virtio_device *vdev)
373{
374 int err;
375 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
376
377 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
378 IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
379 if (!err)
380 vp_dev->intx_enabled = 1;
381 return err;
382}
383
384static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
385 void (*callback)(struct virtqueue *vq),
386 const char *name,
387 u16 msix_vec)
388{
389 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
390 struct virtio_pci_vq_info *info;
391 struct virtqueue *vq;
392 unsigned long flags, size;
393 u16 num;
394 int err;
395
396 /* Select the queue we're interested in */
397 iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
398
399 /* Check if queue is either not available or already active. */
400 num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
401 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
402 return ERR_PTR(-ENOENT);
403
404 /* allocate and fill out our structure the represents an active
405 * queue */
406 info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL);
407 if (!info)
408 return ERR_PTR(-ENOMEM);
409
410 info->num = num;
411 info->msix_vector = msix_vec;
412
413 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
414 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
415 if (info->queue == NULL) {
416 err = -ENOMEM;
417 goto out_info;
418 }
419
420 /* activate the queue */
421 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
422 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
423
424 /* create the vring */
425 vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
426 true, info->queue, vp_notify, callback, name);
427 if (!vq) {
428 err = -ENOMEM;
429 goto out_activate_queue;
430 }
431
432 vq->priv = info;
433 info->vq = vq;
434
435 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
436 iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
437 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
438 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
439 err = -EBUSY;
440 goto out_assign;
441 }
442 }
443
444 if (callback) {
445 spin_lock_irqsave(&vp_dev->lock, flags);
446 list_add(&info->node, &vp_dev->virtqueues);
447 spin_unlock_irqrestore(&vp_dev->lock, flags);
448 } else {
449 INIT_LIST_HEAD(&info->node);
450 }
451
452 return vq;
453
454out_assign:
455 vring_del_virtqueue(vq);
456out_activate_queue:
457 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
458 free_pages_exact(info->queue, size);
459out_info:
460 kfree(info);
461 return ERR_PTR(err);
462}
463
464static void vp_del_vq(struct virtqueue *vq)
465{
466 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
467 struct virtio_pci_vq_info *info = vq->priv;
468 unsigned long flags, size;
469
470 spin_lock_irqsave(&vp_dev->lock, flags);
471 list_del(&info->node);
472 spin_unlock_irqrestore(&vp_dev->lock, flags);
473
474 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
475
476 if (vp_dev->msix_enabled) {
477 iowrite16(VIRTIO_MSI_NO_VECTOR,
478 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
479 /* Flush the write out to device */
480 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
481 }
482
483 vring_del_virtqueue(vq);
484
485 /* Select and deactivate the queue */
486 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
487
488 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
489 free_pages_exact(info->queue, size);
490 kfree(info);
491}
492
493/* the config->del_vqs() implementation */
494static void vp_del_vqs(struct virtio_device *vdev)
495{
496 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
497 struct virtqueue *vq, *n;
498 struct virtio_pci_vq_info *info;
499
500 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
501 info = vq->priv;
502 if (vp_dev->per_vq_vectors &&
503 info->msix_vector != VIRTIO_MSI_NO_VECTOR)
504 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
505 vq);
506 vp_del_vq(vq);
507 }
508 vp_dev->per_vq_vectors = false;
509
510 vp_free_vectors(vdev);
511}
512
513static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
514 struct virtqueue *vqs[],
515 vq_callback_t *callbacks[],
516 const char *names[],
517 bool use_msix,
518 bool per_vq_vectors)
519{
520 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
521 u16 msix_vec;
522 int i, err, nvectors, allocated_vectors;
523
524 if (!use_msix) {
525 /* Old style: one normal interrupt for change and all vqs. */
526 err = vp_request_intx(vdev);
527 if (err)
528 goto error_request;
529 } else {
530 if (per_vq_vectors) {
531 /* Best option: one for change interrupt, one per vq. */
532 nvectors = 1;
533 for (i = 0; i < nvqs; ++i)
534 if (callbacks[i])
535 ++nvectors;
536 } else {
537 /* Second best: one for change, shared for all vqs. */
538 nvectors = 2;
539 }
540
541 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
542 if (err)
543 goto error_request;
544 }
545
546 vp_dev->per_vq_vectors = per_vq_vectors;
547 allocated_vectors = vp_dev->msix_used_vectors;
548 for (i = 0; i < nvqs; ++i) {
549 if (!names[i]) {
550 vqs[i] = NULL;
551 continue;
552 } else if (!callbacks[i] || !vp_dev->msix_enabled)
553 msix_vec = VIRTIO_MSI_NO_VECTOR;
554 else if (vp_dev->per_vq_vectors)
555 msix_vec = allocated_vectors++;
556 else
557 msix_vec = VP_MSIX_VQ_VECTOR;
558 vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
559 if (IS_ERR(vqs[i])) {
560 err = PTR_ERR(vqs[i]);
561 goto error_find;
562 }
563
564 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
565 continue;
566
567 /* allocate per-vq irq if available and necessary */
568 snprintf(vp_dev->msix_names[msix_vec],
569 sizeof *vp_dev->msix_names,
570 "%s-%s",
571 dev_name(&vp_dev->vdev.dev), names[i]);
572 err = request_irq(vp_dev->msix_entries[msix_vec].vector,
573 vring_interrupt, 0,
574 vp_dev->msix_names[msix_vec],
575 vqs[i]);
576 if (err) {
577 vp_del_vq(vqs[i]);
578 goto error_find;
579 }
580 }
581 return 0;
582
583error_find:
584 vp_del_vqs(vdev);
585
586error_request:
587 return err;
588}
589
590/* the config->find_vqs() implementation */
591static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
592 struct virtqueue *vqs[],
593 vq_callback_t *callbacks[],
594 const char *names[])
595{
596 int err;
597
598 /* Try MSI-X with one vector per queue. */
599 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
600 if (!err)
601 return 0;
602 /* Fallback: MSI-X with one vector for config, one shared for queues. */
603 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
604 true, false);
605 if (!err)
606 return 0;
607 /* Finally fall back to regular interrupts. */
608 return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
609 false, false);
610}
611
612static const char *vp_bus_name(struct virtio_device *vdev)
613{
614 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
615
616 return pci_name(vp_dev->pci_dev);
617}
618
619/* Setup the affinity for a virtqueue:
620 * - force the affinity for per vq vector
621 * - OR over all affinities for shared MSI
622 * - ignore the affinity request if we're using INTX
623 */
624static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
625{
626 struct virtio_device *vdev = vq->vdev;
627 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
628 struct virtio_pci_vq_info *info = vq->priv;
629 struct cpumask *mask;
630 unsigned int irq;
631
632 if (!vq->callback)
633 return -EINVAL;
634
635 if (vp_dev->msix_enabled) {
636 mask = vp_dev->msix_affinity_masks[info->msix_vector];
637 irq = vp_dev->msix_entries[info->msix_vector].vector;
638 if (cpu == -1)
639 irq_set_affinity_hint(irq, NULL);
640 else {
641 cpumask_set_cpu(cpu, mask);
642 irq_set_affinity_hint(irq, mask);
643 }
644 }
645 return 0;
646}
647
648static const struct virtio_config_ops virtio_pci_config_ops = {
649 .get = vp_get,
650 .set = vp_set,
651 .get_status = vp_get_status,
652 .set_status = vp_set_status,
653 .reset = vp_reset,
654 .find_vqs = vp_find_vqs,
655 .del_vqs = vp_del_vqs,
656 .get_features = vp_get_features,
657 .finalize_features = vp_finalize_features,
658 .bus_name = vp_bus_name,
659 .set_vq_affinity = vp_set_vq_affinity,
660};
661
662static void virtio_pci_release_dev(struct device *_d)
663{
664 /*
665 * No need for a release method as we allocate/free
666 * all devices together with the pci devices.
667 * Provide an empty one to avoid getting a warning from core.
668 */
669}
670
671/* the PCI probing function */
672static int virtio_pci_probe(struct pci_dev *pci_dev,
673 const struct pci_device_id *id)
674{
675 struct virtio_pci_device *vp_dev;
676 int err;
677
678 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
679 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
680 return -ENODEV;
681
682 if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
683 printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
684 VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
685 return -ENODEV;
686 }
687
688 /* allocate our structure and fill it out */
689 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
690 if (vp_dev == NULL)
691 return -ENOMEM;
692
693 vp_dev->vdev.dev.parent = &pci_dev->dev;
694 vp_dev->vdev.dev.release = virtio_pci_release_dev;
695 vp_dev->vdev.config = &virtio_pci_config_ops;
696 vp_dev->pci_dev = pci_dev;
697 INIT_LIST_HEAD(&vp_dev->virtqueues);
698 spin_lock_init(&vp_dev->lock);
699
700 /* Disable MSI/MSIX to bring device to a known good state. */
701 pci_msi_off(pci_dev);
702
703 /* enable the device */
704 err = pci_enable_device(pci_dev);
705 if (err)
706 goto out;
707
708 err = pci_request_regions(pci_dev, "virtio-pci");
709 if (err)
710 goto out_enable_device;
711
712 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
713 if (vp_dev->ioaddr == NULL) {
714 err = -ENOMEM;
715 goto out_req_regions;
716 }
717
718 pci_set_drvdata(pci_dev, vp_dev);
719 pci_set_master(pci_dev);
720
721 /* we use the subsystem vendor/device id as the virtio vendor/device
722 * id. this allows us to use the same PCI vendor/device id for all
723 * virtio devices and to identify the particular virtio driver by
724 * the subsystem ids */
725 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
726 vp_dev->vdev.id.device = pci_dev->subsystem_device;
727
728 /* finally register the virtio device */
729 err = register_virtio_device(&vp_dev->vdev);
730 if (err)
731 goto out_set_drvdata;
732
733 return 0;
734
735out_set_drvdata:
736 pci_iounmap(pci_dev, vp_dev->ioaddr);
737out_req_regions:
738 pci_release_regions(pci_dev);
739out_enable_device:
740 pci_disable_device(pci_dev);
741out:
742 kfree(vp_dev);
743 return err;
744}
745
746static void virtio_pci_remove(struct pci_dev *pci_dev)
747{
748 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
749
750 unregister_virtio_device(&vp_dev->vdev);
751
752 vp_del_vqs(&vp_dev->vdev);
753 pci_iounmap(pci_dev, vp_dev->ioaddr);
754 pci_release_regions(pci_dev);
755 pci_disable_device(pci_dev);
756 kfree(vp_dev);
757}
758
759#ifdef CONFIG_PM_SLEEP
760static int virtio_pci_freeze(struct device *dev)
761{
762 struct pci_dev *pci_dev = to_pci_dev(dev);
763 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
764 int ret;
765
766 ret = virtio_device_freeze(&vp_dev->vdev);
767
768 if (!ret)
769 pci_disable_device(pci_dev);
770 return ret;
771}
772
773static int virtio_pci_restore(struct device *dev)
774{
775 struct pci_dev *pci_dev = to_pci_dev(dev);
776 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
777 int ret;
778
779 ret = pci_enable_device(pci_dev);
780 if (ret)
781 return ret;
782
783 pci_set_master(pci_dev);
784 return virtio_device_restore(&vp_dev->vdev);
785}
786
787static const struct dev_pm_ops virtio_pci_pm_ops = {
788 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
789};
790#endif
791
792static struct pci_driver virtio_pci_driver = {
793 .name = "virtio-pci",
794 .id_table = virtio_pci_id_table,
795 .probe = virtio_pci_probe,
796 .remove = virtio_pci_remove,
797#ifdef CONFIG_PM_SLEEP
798 .driver.pm = &virtio_pci_pm_ops,
799#endif
800};
801
802module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
new file mode 100644
index 000000000000..953057d84185
--- /dev/null
+++ b/drivers/virtio/virtio_pci_common.c
@@ -0,0 +1,464 @@
1/*
2 * Virtio PCI driver - common functionality for all device versions
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#include "virtio_pci_common.h"
21
22/* wait for pending irq handlers */
23void vp_synchronize_vectors(struct virtio_device *vdev)
24{
25 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
26 int i;
27
28 if (vp_dev->intx_enabled)
29 synchronize_irq(vp_dev->pci_dev->irq);
30
31 for (i = 0; i < vp_dev->msix_vectors; ++i)
32 synchronize_irq(vp_dev->msix_entries[i].vector);
33}
34
35/* the notify function used when creating a virt queue */
36bool vp_notify(struct virtqueue *vq)
37{
38 /* we write the queue's selector into the notification register to
39 * signal the other end */
40 iowrite16(vq->index, (void __iomem *)vq->priv);
41 return true;
42}
43
44/* Handle a configuration change: Tell driver if it wants to know. */
45static irqreturn_t vp_config_changed(int irq, void *opaque)
46{
47 struct virtio_pci_device *vp_dev = opaque;
48
49 virtio_config_changed(&vp_dev->vdev);
50 return IRQ_HANDLED;
51}
52
53/* Notify all virtqueues on an interrupt. */
54static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
55{
56 struct virtio_pci_device *vp_dev = opaque;
57 struct virtio_pci_vq_info *info;
58 irqreturn_t ret = IRQ_NONE;
59 unsigned long flags;
60
61 spin_lock_irqsave(&vp_dev->lock, flags);
62 list_for_each_entry(info, &vp_dev->virtqueues, node) {
63 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
64 ret = IRQ_HANDLED;
65 }
66 spin_unlock_irqrestore(&vp_dev->lock, flags);
67
68 return ret;
69}
70
71/* A small wrapper to also acknowledge the interrupt when it's handled.
72 * I really need an EIO hook for the vring so I can ack the interrupt once we
73 * know that we'll be handling the IRQ but before we invoke the callback since
74 * the callback may notify the host which results in the host attempting to
75 * raise an interrupt that we would then mask once we acknowledged the
76 * interrupt. */
77static irqreturn_t vp_interrupt(int irq, void *opaque)
78{
79 struct virtio_pci_device *vp_dev = opaque;
80 u8 isr;
81
82 /* reading the ISR has the effect of also clearing it so it's very
83 * important to save off the value. */
84 isr = ioread8(vp_dev->isr);
85
86 /* It's definitely not us if the ISR was not high */
87 if (!isr)
88 return IRQ_NONE;
89
90 /* Configuration change? Tell driver if it wants to know. */
91 if (isr & VIRTIO_PCI_ISR_CONFIG)
92 vp_config_changed(irq, opaque);
93
94 return vp_vring_interrupt(irq, opaque);
95}
96
97static void vp_free_vectors(struct virtio_device *vdev)
98{
99 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
100 int i;
101
102 if (vp_dev->intx_enabled) {
103 free_irq(vp_dev->pci_dev->irq, vp_dev);
104 vp_dev->intx_enabled = 0;
105 }
106
107 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
108 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
109
110 for (i = 0; i < vp_dev->msix_vectors; i++)
111 if (vp_dev->msix_affinity_masks[i])
112 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
113
114 if (vp_dev->msix_enabled) {
115 /* Disable the vector used for configuration */
116 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
117
118 pci_disable_msix(vp_dev->pci_dev);
119 vp_dev->msix_enabled = 0;
120 }
121
122 vp_dev->msix_vectors = 0;
123 vp_dev->msix_used_vectors = 0;
124 kfree(vp_dev->msix_names);
125 vp_dev->msix_names = NULL;
126 kfree(vp_dev->msix_entries);
127 vp_dev->msix_entries = NULL;
128 kfree(vp_dev->msix_affinity_masks);
129 vp_dev->msix_affinity_masks = NULL;
130}
131
132static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
133 bool per_vq_vectors)
134{
135 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
136 const char *name = dev_name(&vp_dev->vdev.dev);
137 unsigned i, v;
138 int err = -ENOMEM;
139
140 vp_dev->msix_vectors = nvectors;
141
142 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
143 GFP_KERNEL);
144 if (!vp_dev->msix_entries)
145 goto error;
146 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
147 GFP_KERNEL);
148 if (!vp_dev->msix_names)
149 goto error;
150 vp_dev->msix_affinity_masks
151 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
152 GFP_KERNEL);
153 if (!vp_dev->msix_affinity_masks)
154 goto error;
155 for (i = 0; i < nvectors; ++i)
156 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
157 GFP_KERNEL))
158 goto error;
159
160 for (i = 0; i < nvectors; ++i)
161 vp_dev->msix_entries[i].entry = i;
162
163 err = pci_enable_msix_exact(vp_dev->pci_dev,
164 vp_dev->msix_entries, nvectors);
165 if (err)
166 goto error;
167 vp_dev->msix_enabled = 1;
168
169 /* Set the vector used for configuration */
170 v = vp_dev->msix_used_vectors;
171 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
172 "%s-config", name);
173 err = request_irq(vp_dev->msix_entries[v].vector,
174 vp_config_changed, 0, vp_dev->msix_names[v],
175 vp_dev);
176 if (err)
177 goto error;
178 ++vp_dev->msix_used_vectors;
179
180 v = vp_dev->config_vector(vp_dev, v);
181 /* Verify we had enough resources to assign the vector */
182 if (v == VIRTIO_MSI_NO_VECTOR) {
183 err = -EBUSY;
184 goto error;
185 }
186
187 if (!per_vq_vectors) {
188 /* Shared vector for all VQs */
189 v = vp_dev->msix_used_vectors;
190 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
191 "%s-virtqueues", name);
192 err = request_irq(vp_dev->msix_entries[v].vector,
193 vp_vring_interrupt, 0, vp_dev->msix_names[v],
194 vp_dev);
195 if (err)
196 goto error;
197 ++vp_dev->msix_used_vectors;
198 }
199 return 0;
200error:
201 vp_free_vectors(vdev);
202 return err;
203}
204
205static int vp_request_intx(struct virtio_device *vdev)
206{
207 int err;
208 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
209
210 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
211 IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
212 if (!err)
213 vp_dev->intx_enabled = 1;
214 return err;
215}
216
217static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
218 void (*callback)(struct virtqueue *vq),
219 const char *name,
220 u16 msix_vec)
221{
222 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
223 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
224 struct virtqueue *vq;
225 unsigned long flags;
226
227 /* fill out our structure that represents an active queue */
228 if (!info)
229 return ERR_PTR(-ENOMEM);
230
231 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
232 if (IS_ERR(vq))
233 goto out_info;
234
235 info->vq = vq;
236 if (callback) {
237 spin_lock_irqsave(&vp_dev->lock, flags);
238 list_add(&info->node, &vp_dev->virtqueues);
239 spin_unlock_irqrestore(&vp_dev->lock, flags);
240 } else {
241 INIT_LIST_HEAD(&info->node);
242 }
243
244 vp_dev->vqs[index] = info;
245 return vq;
246
247out_info:
248 kfree(info);
249 return vq;
250}
251
252static void vp_del_vq(struct virtqueue *vq)
253{
254 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
255 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
256 unsigned long flags;
257
258 spin_lock_irqsave(&vp_dev->lock, flags);
259 list_del(&info->node);
260 spin_unlock_irqrestore(&vp_dev->lock, flags);
261
262 vp_dev->del_vq(info);
263 kfree(info);
264}
265
266/* the config->del_vqs() implementation */
267void vp_del_vqs(struct virtio_device *vdev)
268{
269 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
270 struct virtqueue *vq, *n;
271 struct virtio_pci_vq_info *info;
272
273 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
274 info = vp_dev->vqs[vq->index];
275 if (vp_dev->per_vq_vectors &&
276 info->msix_vector != VIRTIO_MSI_NO_VECTOR)
277 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
278 vq);
279 vp_del_vq(vq);
280 }
281 vp_dev->per_vq_vectors = false;
282
283 vp_free_vectors(vdev);
284 kfree(vp_dev->vqs);
285}
286
287static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
288 struct virtqueue *vqs[],
289 vq_callback_t *callbacks[],
290 const char *names[],
291 bool use_msix,
292 bool per_vq_vectors)
293{
294 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
295 u16 msix_vec;
296 int i, err, nvectors, allocated_vectors;
297
298 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
299 if (!vp_dev->vqs)
300 return -ENOMEM;
301
302 if (!use_msix) {
303 /* Old style: one normal interrupt for change and all vqs. */
304 err = vp_request_intx(vdev);
305 if (err)
306 goto error_find;
307 } else {
308 if (per_vq_vectors) {
309 /* Best option: one for change interrupt, one per vq. */
310 nvectors = 1;
311 for (i = 0; i < nvqs; ++i)
312 if (callbacks[i])
313 ++nvectors;
314 } else {
315 /* Second best: one for change, shared for all vqs. */
316 nvectors = 2;
317 }
318
319 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
320 if (err)
321 goto error_find;
322 }
323
324 vp_dev->per_vq_vectors = per_vq_vectors;
325 allocated_vectors = vp_dev->msix_used_vectors;
326 for (i = 0; i < nvqs; ++i) {
327 if (!names[i]) {
328 vqs[i] = NULL;
329 continue;
330 } else if (!callbacks[i] || !vp_dev->msix_enabled)
331 msix_vec = VIRTIO_MSI_NO_VECTOR;
332 else if (vp_dev->per_vq_vectors)
333 msix_vec = allocated_vectors++;
334 else
335 msix_vec = VP_MSIX_VQ_VECTOR;
336 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
337 if (IS_ERR(vqs[i])) {
338 err = PTR_ERR(vqs[i]);
339 goto error_find;
340 }
341
342 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
343 continue;
344
345 /* allocate per-vq irq if available and necessary */
346 snprintf(vp_dev->msix_names[msix_vec],
347 sizeof *vp_dev->msix_names,
348 "%s-%s",
349 dev_name(&vp_dev->vdev.dev), names[i]);
350 err = request_irq(vp_dev->msix_entries[msix_vec].vector,
351 vring_interrupt, 0,
352 vp_dev->msix_names[msix_vec],
353 vqs[i]);
354 if (err) {
355 vp_del_vq(vqs[i]);
356 goto error_find;
357 }
358 }
359 return 0;
360
361error_find:
362 vp_del_vqs(vdev);
363 return err;
364}
365
366/* the config->find_vqs() implementation */
367int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
368 struct virtqueue *vqs[],
369 vq_callback_t *callbacks[],
370 const char *names[])
371{
372 int err;
373
374 /* Try MSI-X with one vector per queue. */
375 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
376 if (!err)
377 return 0;
378 /* Fallback: MSI-X with one vector for config, one shared for queues. */
379 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
380 true, false);
381 if (!err)
382 return 0;
383 /* Finally fall back to regular interrupts. */
384 return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
385 false, false);
386}
387
388const char *vp_bus_name(struct virtio_device *vdev)
389{
390 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
391
392 return pci_name(vp_dev->pci_dev);
393}
394
395/* Setup the affinity for a virtqueue:
396 * - force the affinity for per vq vector
397 * - OR over all affinities for shared MSI
398 * - ignore the affinity request if we're using INTX
399 */
400int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
401{
402 struct virtio_device *vdev = vq->vdev;
403 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
404 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
405 struct cpumask *mask;
406 unsigned int irq;
407
408 if (!vq->callback)
409 return -EINVAL;
410
411 if (vp_dev->msix_enabled) {
412 mask = vp_dev->msix_affinity_masks[info->msix_vector];
413 irq = vp_dev->msix_entries[info->msix_vector].vector;
414 if (cpu == -1)
415 irq_set_affinity_hint(irq, NULL);
416 else {
417 cpumask_set_cpu(cpu, mask);
418 irq_set_affinity_hint(irq, mask);
419 }
420 }
421 return 0;
422}
423
424void virtio_pci_release_dev(struct device *_d)
425{
426 /*
427 * No need for a release method as we allocate/free
428 * all devices together with the pci devices.
429 * Provide an empty one to avoid getting a warning from core.
430 */
431}
432
433#ifdef CONFIG_PM_SLEEP
434static int virtio_pci_freeze(struct device *dev)
435{
436 struct pci_dev *pci_dev = to_pci_dev(dev);
437 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
438 int ret;
439
440 ret = virtio_device_freeze(&vp_dev->vdev);
441
442 if (!ret)
443 pci_disable_device(pci_dev);
444 return ret;
445}
446
447static int virtio_pci_restore(struct device *dev)
448{
449 struct pci_dev *pci_dev = to_pci_dev(dev);
450 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
451 int ret;
452
453 ret = pci_enable_device(pci_dev);
454 if (ret)
455 return ret;
456
457 pci_set_master(pci_dev);
458 return virtio_device_restore(&vp_dev->vdev);
459}
460
461const struct dev_pm_ops virtio_pci_pm_ops = {
462 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
463};
464#endif
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
new file mode 100644
index 000000000000..d840dad4149d
--- /dev/null
+++ b/drivers/virtio/virtio_pci_common.h
@@ -0,0 +1,136 @@
1#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
2#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
3/*
4 * Virtio PCI driver - APIs for common functionality for all device versions
5 *
6 * This module allows virtio devices to be used over a virtual PCI device.
7 * This can be used with QEMU based VMMs like KVM or Xen.
8 *
9 * Copyright IBM Corp. 2007
10 * Copyright Red Hat, Inc. 2014
11 *
12 * Authors:
13 * Anthony Liguori <aliguori@us.ibm.com>
14 * Rusty Russell <rusty@rustcorp.com.au>
15 * Michael S. Tsirkin <mst@redhat.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2 or later.
18 * See the COPYING file in the top-level directory.
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/list.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/virtio.h>
28#include <linux/virtio_config.h>
29#include <linux/virtio_ring.h>
30#define VIRTIO_PCI_NO_LEGACY
31#include <linux/virtio_pci.h>
32#include <linux/highmem.h>
33#include <linux/spinlock.h>
34
35struct virtio_pci_vq_info {
36 /* the actual virtqueue */
37 struct virtqueue *vq;
38
39 /* the number of entries in the queue */
40 int num;
41
42 /* the virtual address of the ring queue */
43 void *queue;
44
45 /* the list node for the virtqueues list */
46 struct list_head node;
47
48 /* MSI-X vector (or none) */
49 unsigned msix_vector;
50};
51
52/* Our device structure */
53struct virtio_pci_device {
54 struct virtio_device vdev;
55 struct pci_dev *pci_dev;
56
57 /* the IO mapping for the PCI config space */
58 void __iomem *ioaddr;
59
60 /* the IO mapping for ISR operation */
61 void __iomem *isr;
62
63 /* a list of queues so we can dispatch IRQs */
64 spinlock_t lock;
65 struct list_head virtqueues;
66
67 /* array of all queues for house-keeping */
68 struct virtio_pci_vq_info **vqs;
69
70 /* MSI-X support */
71 int msix_enabled;
72 int intx_enabled;
73 struct msix_entry *msix_entries;
74 cpumask_var_t *msix_affinity_masks;
75 /* Name strings for interrupts. This size should be enough,
76 * and I'm too lazy to allocate each name separately. */
77 char (*msix_names)[256];
78 /* Number of available vectors */
79 unsigned msix_vectors;
80 /* Vectors allocated, excluding per-vq vectors if any */
81 unsigned msix_used_vectors;
82
83 /* Whether we have vector per vq */
84 bool per_vq_vectors;
85
86 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
87 struct virtio_pci_vq_info *info,
88 unsigned idx,
89 void (*callback)(struct virtqueue *vq),
90 const char *name,
91 u16 msix_vec);
92 void (*del_vq)(struct virtio_pci_vq_info *info);
93
94 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
95};
96
97/* Constants for MSI-X */
98/* Use first vector for configuration changes, second and the rest for
99 * virtqueues Thus, we need at least 2 vectors for MSI. */
100enum {
101 VP_MSIX_CONFIG_VECTOR = 0,
102 VP_MSIX_VQ_VECTOR = 1,
103};
104
105/* Convert a generic virtio device to our structure */
106static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
107{
108 return container_of(vdev, struct virtio_pci_device, vdev);
109}
110
111/* wait for pending irq handlers */
112void vp_synchronize_vectors(struct virtio_device *vdev);
113/* the notify function used when creating a virt queue */
114bool vp_notify(struct virtqueue *vq);
115/* the config->del_vqs() implementation */
116void vp_del_vqs(struct virtio_device *vdev);
117/* the config->find_vqs() implementation */
118int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
119 struct virtqueue *vqs[],
120 vq_callback_t *callbacks[],
121 const char *names[]);
122const char *vp_bus_name(struct virtio_device *vdev);
123
124/* Setup the affinity for a virtqueue:
125 * - force the affinity for per vq vector
126 * - OR over all affinities for shared MSI
127 * - ignore the affinity request if we're using INTX
128 */
129int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
130void virtio_pci_release_dev(struct device *);
131
132#ifdef CONFIG_PM_SLEEP
133extern const struct dev_pm_ops virtio_pci_pm_ops;
134#endif
135
136#endif
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
new file mode 100644
index 000000000000..2588252e5c1c
--- /dev/null
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -0,0 +1,326 @@
1/*
2 * Virtio PCI driver - legacy device support
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#include "virtio_pci_common.h"
21
22/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
23static const struct pci_device_id virtio_pci_id_table[] = {
24 { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
25 { 0 }
26};
27
28MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
29
30/* virtio config->get_features() implementation */
31static u64 vp_get_features(struct virtio_device *vdev)
32{
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34
35 /* When someone needs more than 32 feature bits, we'll need to
36 * steal a bit to indicate that the rest are somewhere else. */
37 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
38}
39
40/* virtio config->finalize_features() implementation */
41static int vp_finalize_features(struct virtio_device *vdev)
42{
43 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
44
45 /* Give virtio_ring a chance to accept features. */
46 vring_transport_features(vdev);
47
48 /* Make sure we don't have any features > 32 bits! */
49 BUG_ON((u32)vdev->features != vdev->features);
50
51 /* We only support 32 feature bits. */
52 iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
53
54 return 0;
55}
56
57/* virtio config->get() implementation */
58static void vp_get(struct virtio_device *vdev, unsigned offset,
59 void *buf, unsigned len)
60{
61 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
62 void __iomem *ioaddr = vp_dev->ioaddr +
63 VIRTIO_PCI_CONFIG(vp_dev) + offset;
64 u8 *ptr = buf;
65 int i;
66
67 for (i = 0; i < len; i++)
68 ptr[i] = ioread8(ioaddr + i);
69}
70
71/* the config->set() implementation. it's symmetric to the config->get()
72 * implementation */
73static void vp_set(struct virtio_device *vdev, unsigned offset,
74 const void *buf, unsigned len)
75{
76 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
77 void __iomem *ioaddr = vp_dev->ioaddr +
78 VIRTIO_PCI_CONFIG(vp_dev) + offset;
79 const u8 *ptr = buf;
80 int i;
81
82 for (i = 0; i < len; i++)
83 iowrite8(ptr[i], ioaddr + i);
84}
85
86/* config->{get,set}_status() implementations */
87static u8 vp_get_status(struct virtio_device *vdev)
88{
89 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
90 return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
91}
92
93static void vp_set_status(struct virtio_device *vdev, u8 status)
94{
95 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
96 /* We should never be setting status to 0. */
97 BUG_ON(status == 0);
98 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
99}
100
101static void vp_reset(struct virtio_device *vdev)
102{
103 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
104 /* 0 status means a reset. */
105 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
106 /* Flush out the status write, and flush in device writes,
107 * including MSi-X interrupts, if any. */
108 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
109 /* Flush pending VQ/configuration callbacks. */
110 vp_synchronize_vectors(vdev);
111}
112
113static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
114{
115 /* Setup the vector used for configuration events */
116 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
117 /* Verify we had enough resources to assign the vector */
118 /* Will also flush the write out to device */
119 return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
120}
121
122static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
123 struct virtio_pci_vq_info *info,
124 unsigned index,
125 void (*callback)(struct virtqueue *vq),
126 const char *name,
127 u16 msix_vec)
128{
129 struct virtqueue *vq;
130 unsigned long size;
131 u16 num;
132 int err;
133
134 /* Select the queue we're interested in */
135 iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
136
137 /* Check if queue is either not available or already active. */
138 num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
139 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
140 return ERR_PTR(-ENOENT);
141
142 info->num = num;
143 info->msix_vector = msix_vec;
144
145 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
146 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
147 if (info->queue == NULL)
148 return ERR_PTR(-ENOMEM);
149
150 /* activate the queue */
151 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
152 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
153
154 /* create the vring */
155 vq = vring_new_virtqueue(index, info->num,
156 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
157 true, info->queue, vp_notify, callback, name);
158 if (!vq) {
159 err = -ENOMEM;
160 goto out_activate_queue;
161 }
162
163 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
164
165 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
166 iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
167 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
168 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
169 err = -EBUSY;
170 goto out_assign;
171 }
172 }
173
174 return vq;
175
176out_assign:
177 vring_del_virtqueue(vq);
178out_activate_queue:
179 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
180 free_pages_exact(info->queue, size);
181 return ERR_PTR(err);
182}
183
184static void del_vq(struct virtio_pci_vq_info *info)
185{
186 struct virtqueue *vq = info->vq;
187 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
188 unsigned long size;
189
190 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
191
192 if (vp_dev->msix_enabled) {
193 iowrite16(VIRTIO_MSI_NO_VECTOR,
194 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
195 /* Flush the write out to device */
196 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
197 }
198
199 vring_del_virtqueue(vq);
200
201 /* Select and deactivate the queue */
202 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
203
204 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
205 free_pages_exact(info->queue, size);
206}
207
208static const struct virtio_config_ops virtio_pci_config_ops = {
209 .get = vp_get,
210 .set = vp_set,
211 .get_status = vp_get_status,
212 .set_status = vp_set_status,
213 .reset = vp_reset,
214 .find_vqs = vp_find_vqs,
215 .del_vqs = vp_del_vqs,
216 .get_features = vp_get_features,
217 .finalize_features = vp_finalize_features,
218 .bus_name = vp_bus_name,
219 .set_vq_affinity = vp_set_vq_affinity,
220};
221
222/* the PCI probing function */
223static int virtio_pci_probe(struct pci_dev *pci_dev,
224 const struct pci_device_id *id)
225{
226 struct virtio_pci_device *vp_dev;
227 int err;
228
229 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
230 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
231 return -ENODEV;
232
233 if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
234 printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
235 VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
236 return -ENODEV;
237 }
238
239 /* allocate our structure and fill it out */
240 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
241 if (vp_dev == NULL)
242 return -ENOMEM;
243
244 vp_dev->vdev.dev.parent = &pci_dev->dev;
245 vp_dev->vdev.dev.release = virtio_pci_release_dev;
246 vp_dev->vdev.config = &virtio_pci_config_ops;
247 vp_dev->pci_dev = pci_dev;
248 INIT_LIST_HEAD(&vp_dev->virtqueues);
249 spin_lock_init(&vp_dev->lock);
250
251 /* Disable MSI/MSIX to bring device to a known good state. */
252 pci_msi_off(pci_dev);
253
254 /* enable the device */
255 err = pci_enable_device(pci_dev);
256 if (err)
257 goto out;
258
259 err = pci_request_regions(pci_dev, "virtio-pci");
260 if (err)
261 goto out_enable_device;
262
263 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
264 if (vp_dev->ioaddr == NULL) {
265 err = -ENOMEM;
266 goto out_req_regions;
267 }
268
269 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
270
271 pci_set_drvdata(pci_dev, vp_dev);
272 pci_set_master(pci_dev);
273
274 /* we use the subsystem vendor/device id as the virtio vendor/device
275 * id. this allows us to use the same PCI vendor/device id for all
276 * virtio devices and to identify the particular virtio driver by
277 * the subsystem ids */
278 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
279 vp_dev->vdev.id.device = pci_dev->subsystem_device;
280
281 vp_dev->config_vector = vp_config_vector;
282 vp_dev->setup_vq = setup_vq;
283 vp_dev->del_vq = del_vq;
284
285 /* finally register the virtio device */
286 err = register_virtio_device(&vp_dev->vdev);
287 if (err)
288 goto out_set_drvdata;
289
290 return 0;
291
292out_set_drvdata:
293 pci_iounmap(pci_dev, vp_dev->ioaddr);
294out_req_regions:
295 pci_release_regions(pci_dev);
296out_enable_device:
297 pci_disable_device(pci_dev);
298out:
299 kfree(vp_dev);
300 return err;
301}
302
303static void virtio_pci_remove(struct pci_dev *pci_dev)
304{
305 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
306
307 unregister_virtio_device(&vp_dev->vdev);
308
309 vp_del_vqs(&vp_dev->vdev);
310 pci_iounmap(pci_dev, vp_dev->ioaddr);
311 pci_release_regions(pci_dev);
312 pci_disable_device(pci_dev);
313 kfree(vp_dev);
314}
315
316static struct pci_driver virtio_pci_driver = {
317 .name = "virtio-pci",
318 .id_table = virtio_pci_id_table,
319 .probe = virtio_pci_probe,
320 .remove = virtio_pci_remove,
321#ifdef CONFIG_PM_SLEEP
322 .driver.pm = &virtio_pci_pm_ops,
323#endif
324};
325
326module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 3b1f89b6e743..00ec6b3f96b2 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -99,7 +99,8 @@ struct vring_virtqueue
99 99
100#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 100#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
101 101
102static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) 102static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
103 unsigned int total_sg, gfp_t gfp)
103{ 104{
104 struct vring_desc *desc; 105 struct vring_desc *desc;
105 unsigned int i; 106 unsigned int i;
@@ -116,7 +117,7 @@ static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp)
116 return NULL; 117 return NULL;
117 118
118 for (i = 0; i < total_sg; i++) 119 for (i = 0; i < total_sg; i++)
119 desc[i].next = i+1; 120 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
120 return desc; 121 return desc;
121} 122}
122 123
@@ -165,17 +166,17 @@ static inline int virtqueue_add(struct virtqueue *_vq,
165 /* If the host supports indirect descriptor tables, and we have multiple 166 /* If the host supports indirect descriptor tables, and we have multiple
166 * buffers, then go indirect. FIXME: tune this threshold */ 167 * buffers, then go indirect. FIXME: tune this threshold */
167 if (vq->indirect && total_sg > 1 && vq->vq.num_free) 168 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
168 desc = alloc_indirect(total_sg, gfp); 169 desc = alloc_indirect(_vq, total_sg, gfp);
169 else 170 else
170 desc = NULL; 171 desc = NULL;
171 172
172 if (desc) { 173 if (desc) {
173 /* Use a single buffer which doesn't continue */ 174 /* Use a single buffer which doesn't continue */
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 175 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
175 vq->vring.desc[head].addr = virt_to_phys(desc); 176 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
176 /* avoid kmemleak false positive (hidden by virt_to_phys) */ 177 /* avoid kmemleak false positive (hidden by virt_to_phys) */
177 kmemleak_ignore(desc); 178 kmemleak_ignore(desc);
178 vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); 179 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
179 180
180 /* Set up rest to use this indirect table. */ 181 /* Set up rest to use this indirect table. */
181 i = 0; 182 i = 0;
@@ -205,28 +206,28 @@ static inline int virtqueue_add(struct virtqueue *_vq,
205 206
206 for (n = 0; n < out_sgs; n++) { 207 for (n = 0; n < out_sgs; n++) {
207 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 208 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
208 desc[i].flags = VRING_DESC_F_NEXT; 209 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
209 desc[i].addr = sg_phys(sg); 210 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
210 desc[i].len = sg->length; 211 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
211 prev = i; 212 prev = i;
212 i = desc[i].next; 213 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
213 } 214 }
214 } 215 }
215 for (; n < (out_sgs + in_sgs); n++) { 216 for (; n < (out_sgs + in_sgs); n++) {
216 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 217 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
217 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 218 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
218 desc[i].addr = sg_phys(sg); 219 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
219 desc[i].len = sg->length; 220 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
220 prev = i; 221 prev = i;
221 i = desc[i].next; 222 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
222 } 223 }
223 } 224 }
224 /* Last one doesn't continue. */ 225 /* Last one doesn't continue. */
225 desc[prev].flags &= ~VRING_DESC_F_NEXT; 226 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
226 227
227 /* Update free pointer */ 228 /* Update free pointer */
228 if (indirect) 229 if (indirect)
229 vq->free_head = vq->vring.desc[head].next; 230 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
230 else 231 else
231 vq->free_head = i; 232 vq->free_head = i;
232 233
@@ -235,13 +236,13 @@ static inline int virtqueue_add(struct virtqueue *_vq,
235 236
236 /* Put entry in available array (but don't update avail->idx until they 237 /* Put entry in available array (but don't update avail->idx until they
237 * do sync). */ 238 * do sync). */
238 avail = (vq->vring.avail->idx & (vq->vring.num-1)); 239 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
239 vq->vring.avail->ring[avail] = head; 240 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
240 241
241 /* Descriptors and available array need to be set before we expose the 242 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 243 * new available array entries. */
243 virtio_wmb(vq->weak_barriers); 244 virtio_wmb(vq->weak_barriers);
244 vq->vring.avail->idx++; 245 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
245 vq->num_added++; 246 vq->num_added++;
246 247
247 /* This is very unlikely, but theoretically possible. Kick 248 /* This is very unlikely, but theoretically possible. Kick
@@ -354,8 +355,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 * event. */ 355 * event. */
355 virtio_mb(vq->weak_barriers); 356 virtio_mb(vq->weak_barriers);
356 357
357 old = vq->vring.avail->idx - vq->num_added; 358 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
358 new = vq->vring.avail->idx; 359 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
359 vq->num_added = 0; 360 vq->num_added = 0;
360 361
361#ifdef DEBUG 362#ifdef DEBUG
@@ -367,10 +368,10 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
367#endif 368#endif
368 369
369 if (vq->event) { 370 if (vq->event) {
370 needs_kick = vring_need_event(vring_avail_event(&vq->vring), 371 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
371 new, old); 372 new, old);
372 } else { 373 } else {
373 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 374 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
374 } 375 }
375 END_USE(vq); 376 END_USE(vq);
376 return needs_kick; 377 return needs_kick;
@@ -432,15 +433,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
432 i = head; 433 i = head;
433 434
434 /* Free the indirect table */ 435 /* Free the indirect table */
435 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 436 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
436 kfree(phys_to_virt(vq->vring.desc[i].addr)); 437 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
437 438
438 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 439 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
439 i = vq->vring.desc[i].next; 440 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
440 vq->vq.num_free++; 441 vq->vq.num_free++;
441 } 442 }
442 443
443 vq->vring.desc[i].next = vq->free_head; 444 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
444 vq->free_head = head; 445 vq->free_head = head;
445 /* Plus final descriptor */ 446 /* Plus final descriptor */
446 vq->vq.num_free++; 447 vq->vq.num_free++;
@@ -448,7 +449,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
448 449
449static inline bool more_used(const struct vring_virtqueue *vq) 450static inline bool more_used(const struct vring_virtqueue *vq)
450{ 451{
451 return vq->last_used_idx != vq->vring.used->idx; 452 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
452} 453}
453 454
454/** 455/**
@@ -491,8 +492,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
491 virtio_rmb(vq->weak_barriers); 492 virtio_rmb(vq->weak_barriers);
492 493
493 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 494 last_used = (vq->last_used_idx & (vq->vring.num - 1));
494 i = vq->vring.used->ring[last_used].id; 495 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
495 *len = vq->vring.used->ring[last_used].len; 496 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
496 497
497 if (unlikely(i >= vq->vring.num)) { 498 if (unlikely(i >= vq->vring.num)) {
498 BAD_RING(vq, "id %u out of range\n", i); 499 BAD_RING(vq, "id %u out of range\n", i);
@@ -510,8 +511,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
510 /* If we expect an interrupt for the next entry, tell host 511 /* If we expect an interrupt for the next entry, tell host
511 * by writing event index and flush out the write before 512 * by writing event index and flush out the write before
512 * the read in the next get_buf call. */ 513 * the read in the next get_buf call. */
513 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 514 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
514 vring_used_event(&vq->vring) = vq->last_used_idx; 515 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
515 virtio_mb(vq->weak_barriers); 516 virtio_mb(vq->weak_barriers);
516 } 517 }
517 518
@@ -537,7 +538,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
537{ 538{
538 struct vring_virtqueue *vq = to_vvq(_vq); 539 struct vring_virtqueue *vq = to_vvq(_vq);
539 540
540 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 541 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
541} 542}
542EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 543EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
543 544
@@ -565,8 +566,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
565 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 566 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
566 * either clear the flags bit or point the event index at the next 567 * either clear the flags bit or point the event index at the next
567 * entry. Always do both to keep code simple. */ 568 * entry. Always do both to keep code simple. */
568 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 569 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
569 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; 570 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
570 END_USE(vq); 571 END_USE(vq);
571 return last_used_idx; 572 return last_used_idx;
572} 573}
@@ -586,7 +587,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
586 struct vring_virtqueue *vq = to_vvq(_vq); 587 struct vring_virtqueue *vq = to_vvq(_vq);
587 588
588 virtio_mb(vq->weak_barriers); 589 virtio_mb(vq->weak_barriers);
589 return (u16)last_used_idx != vq->vring.used->idx; 590 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
590} 591}
591EXPORT_SYMBOL_GPL(virtqueue_poll); 592EXPORT_SYMBOL_GPL(virtqueue_poll);
592 593
@@ -633,12 +634,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
633 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 634 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
634 * either clear the flags bit or point the event index at the next 635 * either clear the flags bit or point the event index at the next
635 * entry. Always do both to keep code simple. */ 636 * entry. Always do both to keep code simple. */
636 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 637 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
637 /* TODO: tune this threshold */ 638 /* TODO: tune this threshold */
638 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 639 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
639 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 640 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
640 virtio_mb(vq->weak_barriers); 641 virtio_mb(vq->weak_barriers);
641 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 642 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
642 END_USE(vq); 643 END_USE(vq);
643 return false; 644 return false;
644 } 645 }
@@ -670,7 +671,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
670 /* detach_buf clears data, so grab it now. */ 671 /* detach_buf clears data, so grab it now. */
671 buf = vq->data[i]; 672 buf = vq->data[i];
672 detach_buf(vq, i); 673 detach_buf(vq, i);
673 vq->vring.avail->idx--; 674 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
674 END_USE(vq); 675 END_USE(vq);
675 return buf; 676 return buf;
676 } 677 }
@@ -747,12 +748,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
747 748
748 /* No callback? Tell other side not to bother us. */ 749 /* No callback? Tell other side not to bother us. */
749 if (!callback) 750 if (!callback)
750 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 751 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
751 752
752 /* Put everything in free lists. */ 753 /* Put everything in free lists. */
753 vq->free_head = 0; 754 vq->free_head = 0;
754 for (i = 0; i < num-1; i++) { 755 for (i = 0; i < num-1; i++) {
755 vq->vring.desc[i].next = i+1; 756 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
756 vq->data[i] = NULL; 757 vq->data[i] = NULL;
757 } 758 }
758 vq->data[i] = NULL; 759 vq->data[i] = NULL;
@@ -779,9 +780,11 @@ void vring_transport_features(struct virtio_device *vdev)
779 break; 780 break;
780 case VIRTIO_RING_F_EVENT_IDX: 781 case VIRTIO_RING_F_EVENT_IDX:
781 break; 782 break;
783 case VIRTIO_F_VERSION_1:
784 break;
782 default: 785 default:
783 /* We don't understand this bit. */ 786 /* We don't understand this bit. */
784 clear_bit(i, vdev->features); 787 __virtio_clear_bit(vdev, i);
785 } 788 }
786 } 789 }
787} 790}
@@ -826,4 +829,20 @@ void virtio_break_device(struct virtio_device *dev)
826} 829}
827EXPORT_SYMBOL_GPL(virtio_break_device); 830EXPORT_SYMBOL_GPL(virtio_break_device);
828 831
832void *virtqueue_get_avail(struct virtqueue *_vq)
833{
834 struct vring_virtqueue *vq = to_vvq(_vq);
835
836 return vq->vring.avail;
837}
838EXPORT_SYMBOL_GPL(virtqueue_get_avail);
839
840void *virtqueue_get_used(struct virtqueue *_vq)
841{
842 struct vring_virtqueue *vq = to_vvq(_vq);
843
844 return vq->vring.used;
845}
846EXPORT_SYMBOL_GPL(virtqueue_get_used);
847
829MODULE_LICENSE("GPL"); 848MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 9900e8ec7393..03321d6a2684 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -72,11 +72,18 @@ struct hdq_data {
72static int omap_hdq_probe(struct platform_device *pdev); 72static int omap_hdq_probe(struct platform_device *pdev);
73static int omap_hdq_remove(struct platform_device *pdev); 73static int omap_hdq_remove(struct platform_device *pdev);
74 74
75static struct of_device_id omap_hdq_dt_ids[] = {
76 { .compatible = "ti,omap3-1w" },
77 {}
78};
79MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
80
75static struct platform_driver omap_hdq_driver = { 81static struct platform_driver omap_hdq_driver = {
76 .probe = omap_hdq_probe, 82 .probe = omap_hdq_probe,
77 .remove = omap_hdq_remove, 83 .remove = omap_hdq_remove,
78 .driver = { 84 .driver = {
79 .name = "omap_hdq", 85 .name = "omap_hdq",
86 .of_match_table = omap_hdq_dt_ids,
80 }, 87 },
81}; 88};
82 89
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d0107d424ee4..08f41add1461 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -154,14 +154,14 @@ config ARM_SP805_WATCHDOG
154 154
155config AT91RM9200_WATCHDOG 155config AT91RM9200_WATCHDOG
156 tristate "AT91RM9200 watchdog" 156 tristate "AT91RM9200 watchdog"
157 depends on ARCH_AT91RM9200 157 depends on SOC_AT91RM9200
158 help 158 help
159 Watchdog timer embedded into AT91RM9200 chips. This will reboot your 159 Watchdog timer embedded into AT91RM9200 chips. This will reboot your
160 system when the timeout is reached. 160 system when the timeout is reached.
161 161
162config AT91SAM9X_WATCHDOG 162config AT91SAM9X_WATCHDOG
163 tristate "AT91SAM9X / AT91CAP9 watchdog" 163 tristate "AT91SAM9X / AT91CAP9 watchdog"
164 depends on ARCH_AT91 && !ARCH_AT91RM9200 164 depends on ARCH_AT91
165 select WATCHDOG_CORE 165 select WATCHDOG_CORE
166 help 166 help
167 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will 167 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 00d0741228fc..8cb1ff3bcd90 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -114,6 +114,46 @@ static int armada370_wdt_clock_init(struct platform_device *pdev,
114 return 0; 114 return 0;
115} 115}
116 116
117static int armada375_wdt_clock_init(struct platform_device *pdev,
118 struct orion_watchdog *dev)
119{
120 int ret;
121
122 dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
123 if (!IS_ERR(dev->clk)) {
124 ret = clk_prepare_enable(dev->clk);
125 if (ret) {
126 clk_put(dev->clk);
127 return ret;
128 }
129
130 atomic_io_modify(dev->reg + TIMER_CTRL,
131 WDT_AXP_FIXED_ENABLE_BIT,
132 WDT_AXP_FIXED_ENABLE_BIT);
133 dev->clk_rate = clk_get_rate(dev->clk);
134
135 return 0;
136 }
137
138 /* Mandatory fallback for proper devicetree backward compatibility */
139 dev->clk = clk_get(&pdev->dev, NULL);
140 if (IS_ERR(dev->clk))
141 return PTR_ERR(dev->clk);
142
143 ret = clk_prepare_enable(dev->clk);
144 if (ret) {
145 clk_put(dev->clk);
146 return ret;
147 }
148
149 atomic_io_modify(dev->reg + TIMER_CTRL,
150 WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT),
151 WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT));
152 dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
153
154 return 0;
155}
156
117static int armadaxp_wdt_clock_init(struct platform_device *pdev, 157static int armadaxp_wdt_clock_init(struct platform_device *pdev,
118 struct orion_watchdog *dev) 158 struct orion_watchdog *dev)
119{ 159{
@@ -394,7 +434,7 @@ static const struct orion_watchdog_data armada375_data = {
394 .rstout_mask_bit = BIT(10), 434 .rstout_mask_bit = BIT(10),
395 .wdt_enable_bit = BIT(8), 435 .wdt_enable_bit = BIT(8),
396 .wdt_counter_offset = 0x34, 436 .wdt_counter_offset = 0x34,
397 .clock_init = armada370_wdt_clock_init, 437 .clock_init = armada375_wdt_clock_init,
398 .enabled = armada375_enabled, 438 .enabled = armada375_enabled,
399 .start = armada375_start, 439 .start = armada375_start,
400 .stop = armada375_stop, 440 .stop = armada375_stop,
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 8532c3e2aea7..1626dc66e763 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
161static const struct s3c2410_wdt_variant drv_data_exynos7 = { 161static const struct s3c2410_wdt_variant drv_data_exynos7 = {
162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET, 162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET, 163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
164 .mask_bit = 0, 164 .mask_bit = 23,
165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, 165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
166 .rst_stat_bit = 23, /* A57 WDTRESET */ 166 .rst_stat_bit = 23, /* A57 WDTRESET */
167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, 167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index 1f850c97482f..f745db270171 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -294,6 +294,7 @@ static const struct efi efi_xen __initconst = {
294 .acpi = EFI_INVALID_TABLE_ADDR, 294 .acpi = EFI_INVALID_TABLE_ADDR,
295 .acpi20 = EFI_INVALID_TABLE_ADDR, 295 .acpi20 = EFI_INVALID_TABLE_ADDR,
296 .smbios = EFI_INVALID_TABLE_ADDR, 296 .smbios = EFI_INVALID_TABLE_ADDR,
297 .smbios3 = EFI_INVALID_TABLE_ADDR,
297 .sal_systab = EFI_INVALID_TABLE_ADDR, 298 .sal_systab = EFI_INVALID_TABLE_ADDR,
298 .boot_info = EFI_INVALID_TABLE_ADDR, 299 .boot_info = EFI_INVALID_TABLE_ADDR,
299 .hcdp = EFI_INVALID_TABLE_ADDR, 300 .hcdp = EFI_INVALID_TABLE_ADDR,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 3e32146472a5..50610a6acf3d 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -274,10 +274,6 @@ static void scsiback_print_status(char *sense_buffer, int errors,
274 tpg->tport->tport_name, pending_req->v2p->lun, 274 tpg->tport->tport_name, pending_req->v2p->lun,
275 pending_req->cmnd[0], status_byte(errors), msg_byte(errors), 275 pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
276 host_byte(errors), driver_byte(errors)); 276 host_byte(errors), driver_byte(errors));
277
278 if (CHECK_CONDITION & status_byte(errors))
279 __scsi_print_sense("xen-pvscsi", sense_buffer,
280 SCSI_SENSE_BUFFERSIZE);
281} 277}
282 278
283static void scsiback_fast_flush_area(struct vscsibk_pend *req) 279static void scsiback_fast_flush_area(struct vscsibk_pend *req)